From bb998747d6c5213e3a366936c482e149dce62720 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 1 Jul 2020 10:33:56 -0400 Subject: cmd/go: populate the Module field for test packages Fixes #39974 Change-Id: I52bb13e887fde52bf789198059c39fd6aacd96f0 Reviewed-on: https://go-review.googlesource.com/c/go/+/240678 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- src/cmd/go/internal/load/test.go | 2 ++ src/cmd/go/testdata/script/mod_list_test.txt | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index a0e275095b..e0f13323df 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -191,6 +191,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p GoFiles: p.XTestGoFiles, Imports: p.XTestImports, ForTest: p.ImportPath, + Module: p.Module, Error: pxtestErr, }, Internal: PackageInternal{ @@ -222,6 +223,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p ImportPath: p.ImportPath + ".test", Root: p.Root, Imports: str.StringList(TestMainDeps), + Module: p.Module, }, Internal: PackageInternal{ Build: &build.Package{Name: "main"}, diff --git a/src/cmd/go/testdata/script/mod_list_test.txt b/src/cmd/go/testdata/script/mod_list_test.txt index a99e4f36cd..f697af6c92 100644 --- a/src/cmd/go/testdata/script/mod_list_test.txt +++ b/src/cmd/go/testdata/script/mod_list_test.txt @@ -3,9 +3,19 @@ env GO111MODULE=on # go list -compiled -test must handle test-only packages # golang.org/issue/27097. go list -compiled -test +stdout -count=4 '^.' # 4 lines stdout '^m$' stdout '^m\.test$' stdout '^m \[m\.test\]$' +stdout '^m_test \[m\.test\]$' + +# https://golang.org/issue/39974: test packages should have the Module field populated. +go list -test -f '{{.ImportPath}}{{with .Module}}: {{.Path}}{{end}}' +stdout -count=4 '^.' # 4 lines +stdout '^m: m$' +stdout '^m\.test: m$' +stdout '^m \[m\.test\]: m$' +stdout '^m_test \[m\.test\]: m$' -- go.mod -- module m @@ -14,3 +24,7 @@ module m package x import "testing" func Test(t *testing.T) {} +-- x_x_test.go -- +package x_test +import "testing" +func Test(t *testing.T) {} -- cgit v1.2.3-54-g00ecf From 94953d3e5928c8a577bad7911aabbf627269ef77 Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Mon, 24 Aug 2020 13:45:27 +0200 Subject: sync: delete dirty keys inside Map.LoadAndDelete Fixes #40999 Change-Id: Ie32427e5cb5ed512b976b554850f50be156ce9f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/250197 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/sync/map.go | 1 + src/sync/map_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/src/sync/map.go b/src/sync/map.go index a61e2ebdd6..9ad25353ff 100644 --- a/src/sync/map.go +++ b/src/sync/map.go @@ -274,6 +274,7 @@ func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) { e, ok = read.m[key] if !ok && read.amended { e, ok = m.dirty[key] + delete(m.dirty, key) // Regardless of whether the entry was present, record a miss: this key // will take the slow path until the dirty map is promoted to the read // map. diff --git a/src/sync/map_test.go b/src/sync/map_test.go index 4ae989a6d5..7f163caa5c 100644 --- a/src/sync/map_test.go +++ b/src/sync/map_test.go @@ -9,6 +9,7 @@ import ( "reflect" "runtime" "sync" + "sync/atomic" "testing" "testing/quick" ) @@ -171,3 +172,26 @@ func TestConcurrentRange(t *testing.T) { } } } + +func TestIssue40999(t *testing.T) { + var m sync.Map + + // Since the miss-counting in missLocked (via Delete) + // compares the miss count with len(m.dirty), + // add an initial entry to bias len(m.dirty) above the miss count. + m.Store(nil, struct{}{}) + + var finalized uint32 + + // Set finalizers that count for collected keys. A non-zero count + // indicates that keys have not been leaked. + for atomic.LoadUint32(&finalized) == 0 { + p := new(int) + runtime.SetFinalizer(p, func(*int) { + atomic.AddUint32(&finalized, 1) + }) + m.Store(p, struct{}{}) + m.Delete(p) + runtime.GC() + } +} -- cgit v1.2.3-54-g00ecf From 5c76382762cfc34b7a7678668460f127fec4a35b Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 13 Mar 2020 16:46:51 -0400 Subject: cmd/go/internal/modload: add a "v" prefix to the indexed go version This allows semver-based comparisons of the version without additional allocations. Also comment on the reason for the loops that iterate over modFile instead. (I was reading the vendor code in order to add the lazy-loading version check, and this section was a bit unclear to me.) For #36460 Change-Id: I11559d81ffb4eba0e4e10e6fa3c01990b11f9180 Reviewed-on: https://go-review.googlesource.com/c/go/+/240622 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/init.go | 6 +++--- src/cmd/go/internal/modload/modfile.go | 14 ++++++++------ src/cmd/go/internal/modload/vendor.go | 4 +++- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 93027c44c4..71c7b158b8 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -483,15 +483,15 @@ func setDefaultBuildMod() { if fi, err := os.Stat(filepath.Join(modRoot, "vendor")); err == nil && fi.IsDir() { modGo := "unspecified" - if index.goVersion != "" { - if semver.Compare("v"+index.goVersion, "v1.14") >= 0 { + if index.goVersionV != "" { + if semver.Compare(index.goVersionV, "v1.14") >= 0 { // The Go version is at least 1.14, and a vendor directory exists. // Set -mod=vendor by default. cfg.BuildMod = "vendor" cfg.BuildModReason = "Go version in go.mod is at least 1.14 and vendor directory exists." return } else { - modGo = index.goVersion + modGo = index.goVersionV[1:] } } diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 9f4ec5a49f..9a166cae54 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -20,7 +20,7 @@ type modFileIndex struct { data []byte dataNeedsFix bool // true if fixVersion applied a change while parsing data module module.Version - goVersion string + goVersionV string // GoVersion with "v" prefix require map[module.Version]requireMeta replace map[module.Version]module.Version exclude map[module.Version]bool @@ -66,9 +66,11 @@ func indexModFile(data []byte, modFile *modfile.File, needsFix bool) *modFileInd i.module = modFile.Module.Mod } - i.goVersion = "" + i.goVersionV = "" if modFile.Go != nil { - i.goVersion = modFile.Go.Version + // We're going to use the semver package to compare Go versions, so go ahead + // and add the "v" prefix it expects once instead of every time. + i.goVersionV = "v" + modFile.Go.Version } i.require = make(map[module.Version]requireMeta, len(modFile.Require)) @@ -114,11 +116,11 @@ func (i *modFileIndex) modFileIsDirty(modFile *modfile.File) bool { } if modFile.Go == nil { - if i.goVersion != "" { + if i.goVersionV != "" { return true } - } else if modFile.Go.Version != i.goVersion { - if i.goVersion == "" && cfg.BuildMod == "readonly" { + } else if "v"+modFile.Go.Version != i.goVersionV { + if i.goVersionV == "" && cfg.BuildMod == "readonly" { // go.mod files did not always require a 'go' version, so do not error out // if one is missing — we may be inside an older module in the module // cache, and should bias toward providing useful behavior. diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index 71f68efbcc..9f34b829fc 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -133,7 +133,7 @@ func checkVendorConsistency() { readVendorList() pre114 := false - if modFile.Go == nil || semver.Compare("v"+modFile.Go.Version, "v1.14") < 0 { + if semver.Compare(index.goVersionV, "v1.14") < 0 { // Go versions before 1.14 did not include enough information in // vendor/modules.txt to check for consistency. // If we know that we're on an earlier version, relax the consistency check. @@ -150,6 +150,8 @@ func checkVendorConsistency() { } } + // Iterate over the Require directives in their original (not indexed) order + // so that the errors match the original file. for _, r := range modFile.Require { if !vendorMeta[r.Mod].Explicit { if pre114 { -- cgit v1.2.3-54-g00ecf From 5a691927659d1057bb3be80087732b5df5889aca Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 24 Jul 2020 17:43:55 -0400 Subject: cmd/go/internal/modload: drop requirements on excluded versions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, when we encountered an excluded version in any module's requirements, we would resolve it to the next higher version. Unfortunately, the meaning of “the next higher version” can change over time. Moreover, users who use 'exclude' directives normally either already require some higher version (using the 'exclude' directive to prune out invalid requirements from some intermediate version), or already require some lower version (using the 'exclude' directive to prevent 'go get -u' from upgrading to a known-bad version). In both of these cases, resolving an upgrade for the excluded version is needless work even in the best case: it adds work for the 'go' command when there is already a perfectly usable selected version of the module in the requirement graph. Instead, we now interpret the 'exclude' directive as dropping all references to the excluded version. This implements the approach described in https://golang.org/issue/36465#issuecomment-572694990. Fixes #36465 Updates #36460 Change-Id: Ibf0187daced417b4cc23b97125826778658e4b0f Reviewed-on: https://go-review.googlesource.com/c/go/+/244773 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- src/cmd/go/internal/modload/init.go | 12 ++++- src/cmd/go/internal/modload/mvs.go | 19 +++---- src/cmd/go/testdata/script/mod_require_exclude.txt | 62 +++++++++++++++++++--- 3 files changed, 72 insertions(+), 21 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 71c7b158b8..7f493104b1 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -383,8 +383,8 @@ func InitMod(ctx context.Context) { legacyModInit() } - modFileToBuildList() setDefaultBuildMod() + modFileToBuildList() if cfg.BuildMod == "vendor" { readVendorList() checkVendorConsistency() @@ -459,7 +459,15 @@ func modFileToBuildList() { list := []module.Version{Target} for _, r := range modFile.Require { - list = append(list, r.Mod) + if index != nil && index.exclude[r.Mod] { + if cfg.BuildMod == "mod" { + fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) + } else { + fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) + } + } else { + list = append(list, r.Mod) + } } buildList = list } diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 67eb2c2e19..39d0d69524 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -54,20 +54,15 @@ func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { if err != nil { return cached{nil, err} } - for i, mv := range list { - if index != nil { - for index.exclude[mv] { - mv1, err := r.next(mv) - if err != nil { - return cached{nil, err} - } - if mv1.Version == "none" { - return cached{nil, fmt.Errorf("%s(%s) depends on excluded %s(%s) with no newer version available", mod.Path, mod.Version, mv.Path, mv.Version)} - } - mv = mv1 + if index != nil && len(index.exclude) > 0 { + // Drop requirements on excluded versions. + nonExcluded := list[:0] + for _, r := range list { + if !index.exclude[r] { + nonExcluded = append(nonExcluded, r) } } - list[i] = mv + list = nonExcluded } return cached{list, nil} diff --git a/src/cmd/go/testdata/script/mod_require_exclude.txt b/src/cmd/go/testdata/script/mod_require_exclude.txt index 60f7e3fa91..1a0fc3097b 100644 --- a/src/cmd/go/testdata/script/mod_require_exclude.txt +++ b/src/cmd/go/testdata/script/mod_require_exclude.txt @@ -1,16 +1,51 @@ # build with no newer version to satisfy exclude env GO111MODULE=on -! go list -m all -stderr 'no newer version available' +cp go.mod go.mod.orig + +# With the selected version excluded, commands that query that version without +# updating go.mod should fail. + +! go list -mod=readonly -m all +stderr '^go: ignoring requirement on excluded version rsc.io/sampler v1\.99\.99$' +stderr '^go: updates to go.mod needed, disabled by -mod=readonly$' +! stdout '^rsc.io/sampler v1.99.99' +cmp go.mod go.mod.orig + +! go list -mod=vendor -m rsc.io/sampler +stderr '^go: ignoring requirement on excluded version rsc.io/sampler v1\.99\.99$' +stderr '^go list -m: module rsc.io/sampler: can''t resolve module using the vendor directory\n\t\(Use -mod=mod or -mod=readonly to bypass\.\)$' +! stdout '^rsc.io/sampler v1.99.99' +cmp go.mod go.mod.orig + +# With the selected version excluded, commands that load only modules should +# drop the excluded module. + +go list -m all +stderr '^go: dropping requirement on excluded version rsc.io/sampler v1\.99\.99$' +stdout '^x$' +! stdout '^rsc.io/sampler' +cmp go.mod go.moddrop + +# With the latest version excluded, 'go list' should resolve needed packages +# from the next-highest version. + +cp go.mod.orig go.mod +go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all +stderr '^go: dropping requirement on excluded version rsc.io/sampler v1\.99\.99$' +stdout '^x $' +! stdout '^rsc.io/sampler v1.99.99' +stdout '^rsc.io/sampler v1.3.0' # build with newer version available cp go.mod2 go.mod -go list -m all +go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all +stderr '^go: dropping requirement on excluded version rsc.io/quote v1\.5\.1$' stdout 'rsc.io/quote v1.5.2' # build with excluded newer version cp go.mod3 go.mod -go list -m all +go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all +! stderr '^go: dropping requirement' stdout 'rsc.io/quote v1.5.1' -- x.go -- @@ -19,15 +54,28 @@ import _ "rsc.io/quote" -- go.mod -- module x -exclude rsc.io/sampler latest -require rsc.io/sampler latest +go 1.13 + +exclude rsc.io/sampler v1.99.99 +require rsc.io/sampler v1.99.99 +-- go.moddrop -- +module x + +go 1.13 + +exclude rsc.io/sampler v1.99.99 -- go.mod2 -- module x + +go 1.13 + exclude rsc.io/quote v1.5.1 require rsc.io/quote v1.5.1 - -- go.mod3 -- module x + +go 1.13 + exclude rsc.io/quote v1.5.2 require rsc.io/quote v1.5.1 -- cgit v1.2.3-54-g00ecf From 9bcc5d20b6f2574e5b98822e0986a1cfa14032f6 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Thu, 23 Jul 2020 22:24:04 -0400 Subject: cmd/go/internal/mvs: reverse the order of BuildListError.stack When we print the stack from a BuildListError, we print the main module first and the error last. That was the opposite of the order in which in was stored in memory, leading to (arguably) more complex code and (definitely) my own inability to reason about the contents of the slice. For now, it's still more convenient to construct the stack reversed, so we do that and then reverse it before packing it into the error. For #36460 Change-Id: I6312fb67b2ad9bf9b64071fe829854833208bad7 Reviewed-on: https://go-review.googlesource.com/c/go/+/244759 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod --- src/cmd/go/internal/mvs/mvs.go | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go index 1f8eaa1f60..1056a500ff 100644 --- a/src/cmd/go/internal/mvs/mvs.go +++ b/src/cmd/go/internal/mvs/mvs.go @@ -83,7 +83,7 @@ func (e *BuildListError) Module() module.Version { if len(e.stack) == 0 { return module.Version{} } - return e.stack[0].m + return e.stack[len(e.stack)-1].m } func (e *BuildListError) Error() string { @@ -93,22 +93,22 @@ func (e *BuildListError) Error() string { // Don't print modules at the beginning of the chain without a // version. These always seem to be the main module or a // synthetic module ("target@"). - for len(stack) > 0 && stack[len(stack)-1].m.Version == "" { - stack = stack[:len(stack)-1] + for len(stack) > 0 && stack[0].m.Version == "" { + stack = stack[1:] } - for i := len(stack) - 1; i >= 1; i-- { - fmt.Fprintf(b, "%s@%s %s\n\t", stack[i].m.Path, stack[i].m.Version, stack[i].nextReason) - } if len(stack) == 0 { b.WriteString(e.Err.Error()) } else { + for _, elem := range stack[:len(stack)-1] { + fmt.Fprintf(b, "%s@%s %s\n\t", elem.m.Path, elem.m.Version, elem.nextReason) + } // Ensure that the final module path and version are included as part of the // error message. if _, ok := e.Err.(*module.ModuleError); ok { fmt.Fprintf(b, "%v", e.Err) } else { - fmt.Fprintf(b, "%v", module.VersionError(stack[0].m, e.Err)) + fmt.Fprintf(b, "%v", module.VersionError(stack[len(stack)-1].m, e.Err)) } } return b.String() @@ -202,16 +202,27 @@ func buildList(target module.Version, reqs Reqs, upgrade func(module.Version) (m q = q[1:] if node.err != nil { - err := &BuildListError{ - Err: node.err, - stack: []buildListErrorElem{{m: node.m}}, - } + // Construct the stack reversed (from the error to the main module), + // then reverse it to obtain the usual order (from the main module to + // the error). + stack := []buildListErrorElem{{m: node.m}} for n, prev := neededBy[node], node; n != nil; n, prev = neededBy[n], n { reason := "requires" if n.upgrade == prev.m { reason = "updating to" } - err.stack = append(err.stack, buildListErrorElem{m: n.m, nextReason: reason}) + stack = append(stack, buildListErrorElem{m: n.m, nextReason: reason}) + } + i, j := 0, len(stack)-1 + for i < j { + stack[i], stack[j] = stack[j], stack[i] + i++ + j-- + } + + err := &BuildListError{ + Err: node.err, + stack: stack, } return nil, err } -- cgit v1.2.3-54-g00ecf From 6a718175a6b5532bb49160047731181a4ecec2a1 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 4 Aug 2020 23:27:18 -0400 Subject: cmd/go/internal/mvs: export a NewBuildListError function Also factor out BuildListError to a separate file. For #36460 Change-Id: Ibd1143893b09a2bbef659bea1e8c5dd35184a7ef Reviewed-on: https://go-review.googlesource.com/c/go/+/247764 Reviewed-by: Jay Conrod --- src/cmd/go/internal/mvs/errors.go | 96 +++++++++++++++++++++++++++++++++++++++ src/cmd/go/internal/mvs/mvs.go | 77 +++++-------------------------- 2 files changed, 108 insertions(+), 65 deletions(-) create mode 100644 src/cmd/go/internal/mvs/errors.go diff --git a/src/cmd/go/internal/mvs/errors.go b/src/cmd/go/internal/mvs/errors.go new file mode 100644 index 0000000000..8577902878 --- /dev/null +++ b/src/cmd/go/internal/mvs/errors.go @@ -0,0 +1,96 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mvs + +import ( + "fmt" + "strings" + + "golang.org/x/mod/module" +) + +// BuildListError decorates an error that occurred gathering requirements +// while constructing a build list. BuildListError prints the chain +// of requirements to the module where the error occurred. +type BuildListError struct { + Err error + stack []buildListErrorElem +} + +type buildListErrorElem struct { + m module.Version + + // nextReason is the reason this module depends on the next module in the + // stack. Typically either "requires", or "updating to". + nextReason string +} + +// NewBuildListError returns a new BuildListError wrapping an error that +// occurred at a module found along the given path of requirements and/or +// upgrades, which must be non-empty. +// +// The isUpgrade function reports whether a path step is due to an upgrade. +// A nil isUpgrade function indicates that none of the path steps are due to upgrades. +func NewBuildListError(err error, path []module.Version, isUpgrade func(from, to module.Version) bool) *BuildListError { + stack := make([]buildListErrorElem, 0, len(path)) + for len(path) > 1 { + reason := "requires" + if isUpgrade != nil && isUpgrade(path[0], path[1]) { + reason = "updating to" + } + stack = append(stack, buildListErrorElem{ + m: path[0], + nextReason: reason, + }) + path = path[1:] + } + stack = append(stack, buildListErrorElem{m: path[0]}) + + return &BuildListError{ + Err: err, + stack: stack, + } +} + +// Module returns the module where the error occurred. If the module stack +// is empty, this returns a zero value. +func (e *BuildListError) Module() module.Version { + if len(e.stack) == 0 { + return module.Version{} + } + return e.stack[len(e.stack)-1].m +} + +func (e *BuildListError) Error() string { + b := &strings.Builder{} + stack := e.stack + + // Don't print modules at the beginning of the chain without a + // version. These always seem to be the main module or a + // synthetic module ("target@"). + for len(stack) > 0 && stack[0].m.Version == "" { + stack = stack[1:] + } + + if len(stack) == 0 { + b.WriteString(e.Err.Error()) + } else { + for _, elem := range stack[:len(stack)-1] { + fmt.Fprintf(b, "%s@%s %s\n\t", elem.m.Path, elem.m.Version, elem.nextReason) + } + // Ensure that the final module path and version are included as part of the + // error message. + m := stack[len(stack)-1].m + if _, ok := e.Err.(*module.ModuleError); ok { + // TODO(bcmills): Also ensure that the module path and version match. + // (Otherwise, we may be reporting an error from a replacement without + // indicating the replacement path.) + fmt.Fprintf(b, "%v", e.Err) + } else { + fmt.Fprintf(b, "%v", module.VersionError(m, e.Err)) + } + } + return b.String() +} diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go index 1056a500ff..ea23a9f45e 100644 --- a/src/cmd/go/internal/mvs/mvs.go +++ b/src/cmd/go/internal/mvs/mvs.go @@ -9,7 +9,6 @@ package mvs import ( "fmt" "sort" - "strings" "sync" "sync/atomic" @@ -61,59 +60,6 @@ type Reqs interface { Previous(m module.Version) (module.Version, error) } -// BuildListError decorates an error that occurred gathering requirements -// while constructing a build list. BuildListError prints the chain -// of requirements to the module where the error occurred. -type BuildListError struct { - Err error - stack []buildListErrorElem -} - -type buildListErrorElem struct { - m module.Version - - // nextReason is the reason this module depends on the next module in the - // stack. Typically either "requires", or "upgraded to". - nextReason string -} - -// Module returns the module where the error occurred. If the module stack -// is empty, this returns a zero value. -func (e *BuildListError) Module() module.Version { - if len(e.stack) == 0 { - return module.Version{} - } - return e.stack[len(e.stack)-1].m -} - -func (e *BuildListError) Error() string { - b := &strings.Builder{} - stack := e.stack - - // Don't print modules at the beginning of the chain without a - // version. These always seem to be the main module or a - // synthetic module ("target@"). - for len(stack) > 0 && stack[0].m.Version == "" { - stack = stack[1:] - } - - if len(stack) == 0 { - b.WriteString(e.Err.Error()) - } else { - for _, elem := range stack[:len(stack)-1] { - fmt.Fprintf(b, "%s@%s %s\n\t", elem.m.Path, elem.m.Version, elem.nextReason) - } - // Ensure that the final module path and version are included as part of the - // error message. - if _, ok := e.Err.(*module.ModuleError); ok { - fmt.Fprintf(b, "%v", e.Err) - } else { - fmt.Fprintf(b, "%v", module.VersionError(stack[len(stack)-1].m, e.Err)) - } - } - return b.String() -} - // BuildList returns the build list for the target module. // // target is the root vertex of a module requirement graph. For cmd/go, this is @@ -202,29 +148,30 @@ func buildList(target module.Version, reqs Reqs, upgrade func(module.Version) (m q = q[1:] if node.err != nil { - // Construct the stack reversed (from the error to the main module), + pathUpgrade := map[module.Version]module.Version{} + + // Construct the error path reversed (from the error to the main module), // then reverse it to obtain the usual order (from the main module to // the error). - stack := []buildListErrorElem{{m: node.m}} + errPath := []module.Version{node.m} for n, prev := neededBy[node], node; n != nil; n, prev = neededBy[n], n { - reason := "requires" if n.upgrade == prev.m { - reason = "updating to" + pathUpgrade[n.m] = prev.m } - stack = append(stack, buildListErrorElem{m: n.m, nextReason: reason}) + errPath = append(errPath, n.m) } - i, j := 0, len(stack)-1 + i, j := 0, len(errPath)-1 for i < j { - stack[i], stack[j] = stack[j], stack[i] + errPath[i], errPath[j] = errPath[j], errPath[i] i++ j-- } - err := &BuildListError{ - Err: node.err, - stack: stack, + isUpgrade := func(from, to module.Version) bool { + return pathUpgrade[from] == to } - return nil, err + + return nil, NewBuildListError(node.err, errPath, isUpgrade) } neighbors := node.required -- cgit v1.2.3-54-g00ecf From c777863f77bb77be2e6c336b78dee2089647ff0e Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 4 Aug 2020 23:53:01 -0400 Subject: cmd/go/internal/mvs: indicate the actual version when printing a mismatched ModuleError Previously, we suppressed the module version annotation if the last error in the stack was a *module.ModuleError, regardless of its path. However, if the error is for a replacement module, that produces a confusing error message: the error is attributed to the last module in the error path, but actually originates in the replacement (which is not otherwise indicated). Now, we print both the original and the replacement modules when they differ, which may add some unfortunate redundancy in the output but at least doesn't drop the very relevant information about replacements. Fixes #35039 Change-Id: I631a7398033602b1bd5656150a4fad4945a87ade Reviewed-on: https://go-review.googlesource.com/c/go/+/247765 Reviewed-by: Jay Conrod --- src/cmd/go/internal/mvs/errors.go | 17 +++++++++++------ .../go/testdata/script/mod_load_replace_mismatch.txt | 2 +- src/cmd/go/testdata/script/mod_replace_gopkgin.txt | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/cmd/go/internal/mvs/errors.go b/src/cmd/go/internal/mvs/errors.go index 8577902878..5564965fb5 100644 --- a/src/cmd/go/internal/mvs/errors.go +++ b/src/cmd/go/internal/mvs/errors.go @@ -78,16 +78,21 @@ func (e *BuildListError) Error() string { b.WriteString(e.Err.Error()) } else { for _, elem := range stack[:len(stack)-1] { - fmt.Fprintf(b, "%s@%s %s\n\t", elem.m.Path, elem.m.Version, elem.nextReason) + fmt.Fprintf(b, "%s %s\n\t", elem.m, elem.nextReason) } // Ensure that the final module path and version are included as part of the // error message. m := stack[len(stack)-1].m - if _, ok := e.Err.(*module.ModuleError); ok { - // TODO(bcmills): Also ensure that the module path and version match. - // (Otherwise, we may be reporting an error from a replacement without - // indicating the replacement path.) - fmt.Fprintf(b, "%v", e.Err) + if mErr, ok := e.Err.(*module.ModuleError); ok { + actual := module.Version{Path: mErr.Path, Version: mErr.Version} + if v, ok := mErr.Err.(*module.InvalidVersionError); ok { + actual.Version = v.Version + } + if actual == m { + fmt.Fprintf(b, "%v", e.Err) + } else { + fmt.Fprintf(b, "%s (replaced by %s): %v", m, actual, mErr.Err) + } } else { fmt.Fprintf(b, "%v", module.VersionError(m, e.Err)) } diff --git a/src/cmd/go/testdata/script/mod_load_replace_mismatch.txt b/src/cmd/go/testdata/script/mod_load_replace_mismatch.txt index 74dbb34b8a..067e209b01 100644 --- a/src/cmd/go/testdata/script/mod_load_replace_mismatch.txt +++ b/src/cmd/go/testdata/script/mod_load_replace_mismatch.txt @@ -18,6 +18,6 @@ package use import _ "rsc.io/quote" -- want -- -go: example.com/quote@v1.5.2: parsing go.mod: +go: rsc.io/quote@v1.5.2 (replaced by example.com/quote@v1.5.2): parsing go.mod: module declares its path as: rsc.io/Quote but was required as: rsc.io/quote diff --git a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt index 28c1196284..674c99cb0c 100644 --- a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt +++ b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt @@ -34,7 +34,7 @@ go list -m gopkg.in/src-d/go-git.v4 # A mismatched gopkg.in path should not be able to replace a different major version. cd ../3-to-gomod-4 ! go list -m gopkg.in/src-d/go-git.v3 -stderr '^go: gopkg\.in/src-d/go-git\.v3@v3.0.0-20190801152248-0d1a009cbb60: invalid version: go\.mod has non-\.\.\.\.v3 module path "gopkg\.in/src-d/go-git\.v4" at revision 0d1a009cbb60$' +stderr '^go: gopkg\.in/src-d/go-git\.v3@v3\.2\.0 \(replaced by gopkg\.in/src-d/go-git\.v3@v3\.0\.0-20190801152248-0d1a009cbb60\): version "v3\.0\.0-20190801152248-0d1a009cbb60" invalid: go\.mod has non-\.\.\.\.v3 module path "gopkg\.in/src-d/go-git\.v4" at revision 0d1a009cbb60$' -- 4-to-4/go.mod -- module golang.org/issue/34254 -- cgit v1.2.3-54-g00ecf From 2a9636dc2bdbb2865dde686352de528c6953c7bf Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 21 Jul 2020 16:50:59 -0400 Subject: cmd/go/internal/modload: cache the Go language version for each module globally Previously, this cache was a member of the (ephemeral) modload.loader struct. However, the Go language version for a given module version does not vary based on the build list, the set of loaded packages, the build tags in use, the meaning of the "all" pattern, or anything else that can be configured for an instance of the package loader. The map containing that information is therefore not appropriate as a field of the (configurable, package-list-dependent) loader struct. The Go language version mapping could, in theory, be read from the go.mod file in the module cache (or replacement directory) every time it is needed: this map is just a cache, and as such it belongs alongside the other caches and indexes in the modload package, which are currently found in modfile.go. We may want to do the same sort of global caching for the mapping from each module.Version to its list of direct requirements (which are similarly idempotent), but for now that is left for a future change. For #36460 For #36876 Change-Id: I90ac176ffea97f30c47d6540c3dfb874dc9cfa4f Reviewed-on: https://go-review.googlesource.com/c/go/+/244078 Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/build.go | 15 ++++++++++----- src/cmd/go/internal/modload/load.go | 10 +--------- src/cmd/go/internal/modload/modfile.go | 7 +++++++ src/cmd/go/internal/modload/mvs.go | 8 +++----- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index a101681a1f..b6f955d591 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -126,8 +126,8 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi Version: m.Version, Indirect: fromBuildList && loaded != nil && !loaded.direct[m.Path], } - if loaded != nil { - info.GoVersion = loaded.goVersion[m.Path] + if v, ok := rawGoVersion.Load(m); ok { + info.GoVersion = v.(string) } // completeFromModCache fills in the extra fields in m using the module cache. @@ -155,6 +155,8 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi } if !fromBuildList { + // If this was an explicitly-versioned argument to 'go mod download' or + // 'go list -m', report the actual requested version, not its replacement. completeFromModCache(info) // Will set m.Error in vendor mode. return info } @@ -178,9 +180,12 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi // worth the cost, and we're going to overwrite the GoMod and Dir from the // replacement anyway. See https://golang.org/issue/27859. info.Replace = &modinfo.ModulePublic{ - Path: r.Path, - Version: r.Version, - GoVersion: info.GoVersion, + Path: r.Path, + Version: r.Version, + } + if goV, ok := rawGoVersion.Load(r); ok { + info.Replace.GoVersion = goV.(string) + info.GoVersion = info.Replace.GoVersion } if r.Version == "" { if filepath.IsAbs(r.Path) { diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 686d491219..2a37f1d874 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -627,8 +627,7 @@ type loader struct { pkgCache *par.Cache // map from string to *loadPkg // computed at end of iterations - direct map[string]bool // imported directly by main module - goVersion map[string]string // go version recorded in each module + direct map[string]bool // imported directly by main module } // LoadTests controls whether the loaders load tests of the root packages. @@ -754,13 +753,6 @@ func (ld *loader) load(roots func() []string) { } } - // Add Go versions, computed during walk. - ld.goVersion = make(map[string]string) - for _, m := range buildList { - v, _ := reqs.(*mvsReqs).versions.Load(m) - ld.goVersion[m.Path], _ = v.(string) - } - // Mix in direct markings (really, lack of indirect markings) // from go.mod, unless we scanned the whole module // and can therefore be sure we know better than go.mod. diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 9a166cae54..9ff00e9b5c 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -7,6 +7,7 @@ package modload import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "sync" "golang.org/x/mod/modfile" "golang.org/x/mod/module" @@ -164,3 +165,9 @@ func (i *modFileIndex) modFileIsDirty(modFile *modfile.File) bool { return false } + +// rawGoVersion records the Go version parsed from each module's go.mod file. +// +// If a module is replaced, the version of the replacement is keyed by the +// replacement module.Version, not the version being replaced. +var rawGoVersion sync.Map // map[module.Version]string diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 39d0d69524..873f5891c9 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -11,7 +11,6 @@ import ( "os" "path/filepath" "sort" - "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -30,7 +29,6 @@ import ( type mvsReqs struct { buildList []module.Version cache par.Cache - versions sync.Map } // Reqs returns the current module requirement graph. @@ -83,7 +81,7 @@ func (r *mvsReqs) modFileToList(f *modfile.File) []module.Version { func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { if mod == Target { if modFile != nil && modFile.Go != nil { - r.versions.LoadOrStore(mod, modFile.Go.Version) + rawGoVersion.LoadOrStore(mod, modFile.Go.Version) } return append([]module.Version(nil), r.buildList[1:]...), nil } @@ -113,7 +111,7 @@ func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { return nil, fmt.Errorf("parsing %s: %v", base.ShortPath(gomod), err) } if f.Go != nil { - r.versions.LoadOrStore(mod, f.Go.Version) + rawGoVersion.LoadOrStore(repl, f.Go.Version) } return r.modFileToList(f), nil } @@ -147,7 +145,7 @@ func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { but was required as: %s`, mpath, origPath)) } if f.Go != nil { - r.versions.LoadOrStore(mod, f.Go.Version) + rawGoVersion.LoadOrStore(mod, f.Go.Version) } return r.modFileToList(f), nil -- cgit v1.2.3-54-g00ecf From a9146a49d0db666a7efd5f5d4555cf6117405cf5 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Thu, 23 Jul 2020 00:45:27 -0400 Subject: cmd/go/internal/modload: cache parsed go.mod files globally Previously they were cached per mvsReqs instance. However, the contents of the go.mod file of a given dependency version can only vary if the 'replace' directives that apply to that version have changed, and the only time we change 'replace' directives is in 'go mod edit' (which does not care about the build list or MVS). This not only simplifies the mvsReqs implementation, but also makes more of the underlying logic independent of mvsReqs. For #36460 Change-Id: Ieac20c2fcd56f64d847ac8a1b40f9361ece78663 Reviewed-on: https://go-review.googlesource.com/c/go/+/244774 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod --- src/cmd/go/internal/modload/build.go | 15 +- src/cmd/go/internal/modload/modfile.go | 171 +++++++++++++++++++++ src/cmd/go/internal/modload/mvs.go | 109 +------------ src/cmd/go/testdata/script/mod_invalid_version.txt | 4 +- 4 files changed, 190 insertions(+), 109 deletions(-) diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index b6f955d591..7e182b4a4d 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -132,6 +132,8 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi // completeFromModCache fills in the extra fields in m using the module cache. completeFromModCache := func(m *modinfo.ModulePublic) { + mod := module.Version{Path: m.Path, Version: m.Version} + if m.Version != "" { if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { m.Error = &modinfo.ModuleError{Err: err.Error()} @@ -140,7 +142,6 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi m.Time = &q.Time } - mod := module.Version{Path: m.Path, Version: m.Version} gomod, err := modfetch.CachePath(mod, "mod") if err == nil { if info, err := os.Stat(gomod); err == nil && info.Mode().IsRegular() { @@ -152,6 +153,12 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi m.Dir = dir } } + + if m.GoVersion == "" { + if summary, err := rawGoModSummary(mod); err == nil && summary.goVersionV != "" { + m.GoVersion = summary.goVersionV[1:] + } + } } if !fromBuildList { @@ -183,9 +190,8 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi Path: r.Path, Version: r.Version, } - if goV, ok := rawGoVersion.Load(r); ok { - info.Replace.GoVersion = goV.(string) - info.GoVersion = info.Replace.GoVersion + if v, ok := rawGoVersion.Load(m); ok { + info.Replace.GoVersion = v.(string) } if r.Version == "" { if filepath.IsAbs(r.Path) { @@ -200,6 +206,7 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi info.Dir = info.Replace.Dir info.GoMod = info.Replace.GoMod } + info.GoVersion = info.Replace.GoVersion return info } diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 9ff00e9b5c..c04e2add13 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -7,10 +7,17 @@ package modload import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modfetch" + "cmd/go/internal/par" + "errors" + "fmt" + "path/filepath" "sync" "golang.org/x/mod/modfile" "golang.org/x/mod/module" + "golang.org/x/mod/semver" ) var modFile *modfile.File @@ -171,3 +178,167 @@ func (i *modFileIndex) modFileIsDirty(modFile *modfile.File) bool { // If a module is replaced, the version of the replacement is keyed by the // replacement module.Version, not the version being replaced. var rawGoVersion sync.Map // map[module.Version]string + +// A modFileSummary is a summary of a go.mod file for which we do not need to +// retain complete information — for example, the go.mod file of a dependency +// module. +type modFileSummary struct { + module module.Version + goVersionV string // GoVersion with "v" prefix + require []module.Version +} + +// goModSummary returns a summary of the go.mod file for module m, +// taking into account any replacements for m, exclusions of its dependencies, +// and or vendoring. +// +// goModSummary cannot be used on the Target module, as its requirements +// may change. +// +// The caller must not modify the returned summary. +func goModSummary(m module.Version) (*modFileSummary, error) { + if m == Target { + panic("internal error: goModSummary called on the Target module") + } + + type cached struct { + summary *modFileSummary + err error + } + c := goModSummaryCache.Do(m, func() interface{} { + if cfg.BuildMod == "vendor" { + summary := &modFileSummary{ + module: module.Version{Path: m.Path}, + } + if vendorVersion[m.Path] != m.Version { + // This module is not vendored, so packages cannot be loaded from it and + // it cannot be relevant to the build. + return cached{summary, nil} + } + + // For every module other than the target, + // return the full list of modules from modules.txt. + readVendorList() + + // TODO(#36876): Load the "go" version from vendor/modules.txt and store it + // in rawGoVersion with the appropriate key. + + // We don't know what versions the vendored module actually relies on, + // so assume that it requires everything. + summary.require = vendorList + return cached{summary, nil} + } + + actual := Replacement(m) + if actual.Path == "" { + actual = m + } + summary, err := rawGoModSummary(actual) + if err != nil { + return cached{nil, err} + } + + if actual.Version == "" { + // The actual module is a filesystem-local replacement, for which we have + // unfortunately not enforced any sort of invariants about module lines or + // matching module paths. Anything goes. + // + // TODO(bcmills): Remove this special-case, update tests, and add a + // release note. + } else { + if summary.module.Path == "" { + return cached{nil, module.VersionError(actual, errors.New("parsing go.mod: missing module line"))} + } + + // In theory we should only allow mpath to be unequal to mod.Path here if the + // version that we fetched lacks an explicit go.mod file: if the go.mod file + // is explicit, then it should match exactly (to ensure that imports of other + // packages within the module are interpreted correctly). Unfortunately, we + // can't determine that information from the module proxy protocol: we'll have + // to leave that validation for when we load actual packages from within the + // module. + if mpath := summary.module.Path; mpath != m.Path && mpath != actual.Path { + return cached{nil, module.VersionError(actual, fmt.Errorf(`parsing go.mod: + module declares its path as: %s + but was required as: %s`, mpath, m.Path))} + } + } + + if index != nil && len(index.exclude) > 0 { + // Drop any requirements on excluded versions. + nonExcluded := summary.require[:0] + for _, r := range summary.require { + if !index.exclude[r] { + nonExcluded = append(nonExcluded, r) + } + } + summary.require = nonExcluded + } + return cached{summary, nil} + }).(cached) + + return c.summary, c.err +} + +var goModSummaryCache par.Cache // module.Version → goModSummary result + +// rawGoModSummary returns a new summary of the go.mod file for module m, +// ignoring all replacements that may apply to m and excludes that may apply to +// its dependencies. +// +// rawGoModSummary cannot be used on the Target module. +func rawGoModSummary(m module.Version) (*modFileSummary, error) { + if m == Target { + panic("internal error: rawGoModSummary called on the Target module") + } + + summary := new(modFileSummary) + var f *modfile.File + if m.Version == "" { + // m is a replacement module with only a file path. + dir := m.Path + if !filepath.IsAbs(dir) { + dir = filepath.Join(ModRoot(), dir) + } + gomod := filepath.Join(dir, "go.mod") + + data, err := lockedfile.Read(gomod) + if err != nil { + return nil, module.VersionError(m, fmt.Errorf("reading %s: %v", base.ShortPath(gomod), err)) + } + f, err = modfile.ParseLax(gomod, data, nil) + if err != nil { + return nil, module.VersionError(m, fmt.Errorf("parsing %s: %v", base.ShortPath(gomod), err)) + } + } else { + if !semver.IsValid(m.Version) { + // Disallow the broader queries supported by fetch.Lookup. + base.Fatalf("go: internal error: %s@%s: unexpected invalid semantic version", m.Path, m.Version) + } + + data, err := modfetch.GoMod(m.Path, m.Version) + if err != nil { + return nil, err + } + f, err = modfile.ParseLax("go.mod", data, nil) + if err != nil { + return nil, module.VersionError(m, fmt.Errorf("parsing go.mod: %v", err)) + } + } + + if f.Module != nil { + summary.module = f.Module.Mod + } + if f.Go != nil && f.Go.Version != "" { + rawGoVersion.LoadOrStore(m, f.Go.Version) + summary.goVersionV = "v" + f.Go.Version + } + if len(f.Require) > 0 { + summary.require = make([]module.Version, 0, len(f.Require)) + for _, req := range f.Require { + summary.require = append(summary.require, req.Mod) + } + } + + return summary, nil +} diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 873f5891c9..6b6ad945e4 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -6,20 +6,15 @@ package modload import ( "context" - "errors" "fmt" "os" "path/filepath" "sort" - "cmd/go/internal/base" - "cmd/go/internal/cfg" - "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch" "cmd/go/internal/mvs" "cmd/go/internal/par" - "golang.org/x/mod/modfile" "golang.org/x/mod/module" "golang.org/x/mod/semver" ) @@ -28,7 +23,7 @@ import ( // with any exclusions or replacements applied internally. type mvsReqs struct { buildList []module.Version - cache par.Cache + cache par.Cache // module.Version → Required method results } // Reqs returns the current module requirement graph. @@ -42,113 +37,21 @@ func Reqs() mvs.Reqs { } func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { - type cached struct { - list []module.Version - err error - } - - c := r.cache.Do(mod, func() interface{} { - list, err := r.required(mod) - if err != nil { - return cached{nil, err} - } - if index != nil && len(index.exclude) > 0 { - // Drop requirements on excluded versions. - nonExcluded := list[:0] - for _, r := range list { - if !index.exclude[r] { - nonExcluded = append(nonExcluded, r) - } - } - list = nonExcluded - } - - return cached{list, nil} - }).(cached) - - return c.list, c.err -} - -func (r *mvsReqs) modFileToList(f *modfile.File) []module.Version { - list := make([]module.Version, 0, len(f.Require)) - for _, r := range f.Require { - list = append(list, r.Mod) - } - return list -} - -// required returns a unique copy of the requirements of mod. -func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { if mod == Target { - if modFile != nil && modFile.Go != nil { - rawGoVersion.LoadOrStore(mod, modFile.Go.Version) - } - return append([]module.Version(nil), r.buildList[1:]...), nil - } - - if cfg.BuildMod == "vendor" { - // For every module other than the target, - // return the full list of modules from modules.txt. - readVendorList() - return append([]module.Version(nil), vendorList...), nil - } - - origPath := mod.Path - if repl := Replacement(mod); repl.Path != "" { - if repl.Version == "" { - // TODO: need to slip the new version into the tags list etc. - dir := repl.Path - if !filepath.IsAbs(dir) { - dir = filepath.Join(ModRoot(), dir) - } - gomod := filepath.Join(dir, "go.mod") - data, err := lockedfile.Read(gomod) - if err != nil { - return nil, fmt.Errorf("parsing %s: %v", base.ShortPath(gomod), err) - } - f, err := modfile.ParseLax(gomod, data, nil) - if err != nil { - return nil, fmt.Errorf("parsing %s: %v", base.ShortPath(gomod), err) - } - if f.Go != nil { - rawGoVersion.LoadOrStore(repl, f.Go.Version) - } - return r.modFileToList(f), nil - } - mod = repl + // Use the build list as it existed when r was constructed, not the current + // global build list. + return r.buildList[1:], nil } if mod.Version == "none" { return nil, nil } - if !semver.IsValid(mod.Version) { - // Disallow the broader queries supported by fetch.Lookup. - base.Fatalf("go: internal error: %s@%s: unexpected invalid semantic version", mod.Path, mod.Version) - } - - data, err := modfetch.GoMod(mod.Path, mod.Version) + summary, err := goModSummary(mod) if err != nil { return nil, err } - f, err := modfile.ParseLax("go.mod", data, nil) - if err != nil { - return nil, module.VersionError(mod, fmt.Errorf("parsing go.mod: %v", err)) - } - - if f.Module == nil { - return nil, module.VersionError(mod, errors.New("parsing go.mod: missing module line")) - } - if mpath := f.Module.Mod.Path; mpath != origPath && mpath != mod.Path { - return nil, module.VersionError(mod, fmt.Errorf(`parsing go.mod: - module declares its path as: %s - but was required as: %s`, mpath, origPath)) - } - if f.Go != nil { - rawGoVersion.LoadOrStore(mod, f.Go.Version) - } - - return r.modFileToList(f), nil + return summary.require, nil } // Max returns the maximum of v1 and v2 according to semver.Compare. diff --git a/src/cmd/go/testdata/script/mod_invalid_version.txt b/src/cmd/go/testdata/script/mod_invalid_version.txt index 7e1bc9ea4f..6dddd4b036 100644 --- a/src/cmd/go/testdata/script/mod_invalid_version.txt +++ b/src/cmd/go/testdata/script/mod_invalid_version.txt @@ -18,7 +18,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0: parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "14c0d48ead0c" invalid: must be of the form v1.2.3' +stderr 'go: example.com@v0.0.0 \(replaced by \./\..\): parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "14c0d48ead0c" invalid: must be of the form v1.2.3' cd .. go list -m golang.org/x/text stdout 'golang.org/x/text v0.1.1-0.20170915032832-14c0d48ead0c' @@ -46,7 +46,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v2.1.1-0.20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0: parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "v2.1.1-0.20170915032832-14c0d48ead0c" invalid: should be v0 or v1, not v2' +stderr 'go: example.com@v0.0.0 \(replaced by \./\.\.\): parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "v2.1.1-0.20170915032832-14c0d48ead0c" invalid: should be v0 or v1, not v2' cd .. ! go list -m golang.org/x/text stderr $WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "v2.1.1-0.20170915032832-14c0d48ead0c" invalid: should be v0 or v1, not v2' -- cgit v1.2.3-54-g00ecf From 865d72f962fffbba326067e803ca30193b63bd3a Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 21 Feb 2020 12:23:47 -0500 Subject: cmd/go: add baseline test cases for non-lazy module loading For #36460 For #40799 Change-Id: Id55934cc4d66743a4087b4c2644b6c3b95e7d2ce Reviewed-on: https://go-review.googlesource.com/c/go/+/222341 Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/testdata/script/mod_all.txt | 296 +++++++++++++++++++++ .../go/testdata/script/mod_lazy_import_allmod.txt | 155 +++++++++++ .../testdata/script/mod_lazy_import_test_dep.txt | 159 +++++++++++ src/cmd/go/testdata/script/mod_lazy_new_import.txt | 89 +++++++ src/cmd/go/testdata/script/mod_lazy_test_all.txt | 125 +++++++++ .../go/testdata/script/mod_lazy_test_horizon.txt | 115 ++++++++ .../testdata/script/mod_lazy_test_of_test_dep.txt | 118 ++++++++ src/cmd/go/testdata/script/mod_notall.txt | 98 +++++++ 8 files changed, 1155 insertions(+) create mode 100644 src/cmd/go/testdata/script/mod_all.txt create mode 100644 src/cmd/go/testdata/script/mod_lazy_import_allmod.txt create mode 100644 src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt create mode 100644 src/cmd/go/testdata/script/mod_lazy_new_import.txt create mode 100644 src/cmd/go/testdata/script/mod_lazy_test_all.txt create mode 100644 src/cmd/go/testdata/script/mod_lazy_test_horizon.txt create mode 100644 src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt create mode 100644 src/cmd/go/testdata/script/mod_notall.txt diff --git a/src/cmd/go/testdata/script/mod_all.txt b/src/cmd/go/testdata/script/mod_all.txt new file mode 100644 index 0000000000..a219913094 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_all.txt @@ -0,0 +1,296 @@ +# This test illustrates the relationship between the 'all' pattern and +# the dependencies of the main module. + +env PKGFMT='{{if .Module}}{{.ImportPath}}{{end}}' +env MODFMT='{{.Path}}' + + +# 'go list -deps' lists packages and tests in the main module, +# along with their transitive dependencies. + +go list -f $PKGFMT -deps ./... +stdout -count=4 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly' + + +# 'go list -deps -test' lists transitive imports of tests and non-tests in the +# main module. + +go list -f $PKGFMT -deps -test ./... +stdout -count=13 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/main$' +stdout '^example.com/main.test$' +stdout '^example.com/main \[example.com/main.test\]$' +stdout '^example.com/main_test \[example.com/main.test\]$' +stdout '^example.com/main/testonly$' +stdout '^example.com/main/testonly.test$' +stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/t$' +stdout '^example.com/u$' + + +# 'go list all' lists the fixpoint of iterating 'go list -deps -test' starting +# with the packages in the main module, then reducing to only the non-test +# variants of those packages. + +go list -f $PKGFMT all +stdout -count=11 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/c$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/s$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/w$' + + +# 'go list -test all' is equivalent to 'go list -test $(go list all)' +# and both should include tests for every package in 'all'. + +go list -test -f $PKGFMT example.com/a example.com/b example.com/c example.com/main example.com/main/testonly example.com/q example.com/r example.com/s example.com/t example.com/u example.com/w +cp stdout list-test-explicit.txt + +go list -test -f $PKGFMT all +cmp stdout list-test-explicit.txt +stdout -count=34 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/c$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/s$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/w$' +stdout '^example.com/a.test$' +stdout '^example.com/a_test \[example.com/a.test\]$' +stdout '^example.com/b.test$' +stdout '^example.com/b_test \[example.com/b.test\]$' +stdout '^example.com/c.test$' +stdout '^example.com/c_test \[example.com/c.test\]$' +stdout '^example.com/main.test$' +stdout '^example.com/main \[example.com/main.test\]$' +stdout '^example.com/main_test \[example.com/main.test\]$' +stdout '^example.com/main/testonly.test$' +stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' +stdout '^example.com/q.test$' +stdout '^example.com/q_test \[example.com/q.test\]$' +stdout '^example.com/r.test$' +stdout '^example.com/r_test \[example.com/r.test\]$' +stdout '^example.com/s.test$' +stdout '^example.com/s_test \[example.com/s.test\]$' +stdout '^example.com/t.test$' +stdout '^example.com/t_test \[example.com/t.test\]$' +stdout '^example.com/u.test$' +stdout '^example.com/u_test \[example.com/u.test\]$' +stdout '^example.com/w.test$' +stdout '^example.com/w_test \[example.com/w.test\]$' + + +# 'go list -m all' covers the packages in 'go list -test -deps all'. + +go list -m -f $MODFMT all +stdout -count=10 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/c$' +stdout '^example.com/main$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/s$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/w$' + + +# 'go mod vendor' copies in only the packages transitively imported by the main +# module, and omits their tests. As a result, the 'all' and '...' patterns +# report fewer packages when using '-mod=vendor'. + +go mod vendor + +go list -f $PKGFMT -mod=vendor all +stdout -count=8 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/t$' +stdout '^example.com/u$' + +go list -test -f $PKGFMT -mod=vendor all +stdout -count=13 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/main.test$' +stdout '^example.com/main \[example.com/main.test\]$' +stdout '^example.com/main_test \[example.com/main.test\]$' +stdout '^example.com/main/testonly.test$' +stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' + +# TODO(#36460): + +# With lazy loading, 'go list all' without -mod=vendor should match +# 'go mod vendor'. + +# 'go list -test all' should expand that to cover test dependencies +# of packages imported by the main module. + +# 'go list -m all' should cover the packages in 'go list -test all'. + + +-- go.mod -- +module example.com/main + +go 1.15 + +require ( + example.com/a v0.1.0 + example.com/b v0.1.0 + example.com/q v0.1.0 + example.com/t v0.1.0 +) + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b + example.com/c v0.1.0 => ./c + example.com/q v0.1.0 => ./q + example.com/r v0.1.0 => ./r + example.com/s v0.1.0 => ./s + example.com/t v0.1.0 => ./t + example.com/u v0.1.0 => ./u + example.com/w v0.1.0 => ./w +) +-- main.go -- +package main + +import _ "example.com/a" + +func main() {} +-- main_test.go -- +package main_test + +import _ "example.com/t" +-- testonly/testonly_test.go -- +package testonly_test + +import _ "example.com/q" +-- a/go.mod -- +module example.com/a + +go 1.15 + +require ( + example.com/b v0.1.0 + example.com/c v0.1.0 +) +-- a/a.go -- +package x + +import _ "example.com/b" +-- a/a_test.go -- +package x_test + +import _ "example.com/c" +-- b/go.mod -- +module example.com/b + +go 1.15 +-- b/b.go -- +package b +-- b/b_test.go -- +package b_test +-- c/go.mod -- +module example.com/c + +go 1.15 +-- c/c.go -- +package c +-- c/c_test.go -- +package c_test +-- q/go.mod -- +module example.com/q + +go 1.15 + +require ( + example.com/r v0.1.0 + example.com/s v0.1.0 +) +-- q/q.go -- +package q +import _ "example.com/r" +-- q/q_test.go -- +package q_test +import _ "example.com/s" +-- r/go.mod -- +module example.com/r + +go 1.15 +-- r/r.go -- +package r +-- r/r_test.go -- +package r_test +-- s/go.mod -- +module example.com/s + +go 1.15 +-- s/s.go -- +package s +-- s/s_test.go -- +package s_test +-- t/go.mod -- +module example.com/t + +go 1.15 + +require ( + example.com/u v0.1.0 + example.com/w v0.1.0 +) +-- t/t.go -- +package t + +import _ "example.com/u" +-- t/t_test.go -- +package t_test + +import _ "example.com/w" +-- u/go.mod -- +module example.com/u + +go 1.15 +-- u/u.go -- +package u +-- u/u_test.go -- +package u_test +-- w/go.mod -- +module example.com/w + +go 1.15 +-- w/w.go -- +package w +-- w/w_test.go -- +package w_test diff --git a/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt b/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt new file mode 100644 index 0000000000..aade00d602 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt @@ -0,0 +1,155 @@ +# This file demonstrates dependency resolution when the main module imports a +# new package from a previously-test-only dependency. +# +# When lazy loading is active, the loader will not load dependencies of any +# module whose packages are *only* imported by tests outside the main module. If +# the main module is changed to import a package from such a module, the +# dependencies of that module will need to be reloaded. + +# Control case: in Go 1.14, the original go.mod is tidy, +# and the dependency on c is eagerly loaded. + +cp go.mod go.mod.orig +go mod tidy +cmp go.mod.orig go.mod + +go list -m all +stdout '^a v0.1.0 ' +stdout '^b v0.1.0 ' +stdout '^c v0.1.0 ' + +# After adding a new import of b/y, +# the import of c from b/y should resolve to the version required by b. + +cp m.go m.go.orig +cp m.go.new m.go +go mod tidy +cmp go.mod.new go.mod + +go list -m all +stdout '^a v0.1.0 ' +stdout '^b v0.1.0 ' +stdout '^c v0.1.0 ' + +# With lazy loading, the go.mod requirements are the same, +# but the dependency on c is initially pruned out. + +cp m.go.orig m.go +cp go.mod.orig go.mod +go mod edit -go=1.16 +go mod edit -go=1.16 go.mod.new + +cp go.mod go.mod.orig +go mod tidy +cmp go.mod.orig go.mod + +go list -m all +stdout '^a v0.1.0 ' +stdout '^b v0.1.0 ' +stdout '^c v0.1.0 ' # TODO(#36460): This should be pruned out. + +# After adding a new import of b/y, +# the import of c from b/y should again resolve to the version required by b. + +cp m.go.new m.go +go mod tidy +cmp go.mod.new go.mod + +go list -m all +stdout '^a v0.1.0 ' +stdout '^b v0.1.0 ' +stdout '^c v0.1.0 ' + +-- m.go -- +package main + +import ( + "fmt" + + _ "a" // a_test imports b/x. +) + +func main() { +} +-- m.go.new -- +package main + +import ( + "fmt" + + _ "a" // a_test imports b/x. + "b/y" // This is a new import, not yet reflected in the go.mod file. +) + +func main() { + fmt.Println(b.CVersion()) +} +-- go.mod -- +module m + +go 1.14 + +require a v0.1.0 + +replace ( + a v0.1.0 => ./a1 + b v0.1.0 => ./b1 + c v0.1.0 => ./c1 + c v0.2.0 => ./c2 +) +-- go.mod.new -- +module m + +go 1.14 + +require ( + a v0.1.0 + b v0.1.0 +) + +replace ( + a v0.1.0 => ./a1 + b v0.1.0 => ./b1 + c v0.1.0 => ./c1 + c v0.2.0 => ./c2 +) +-- a1/go.mod -- +module a + +go 1.16 + +require b v0.1.0 +-- a1/a.go -- +package a +-- a1/a_test.go -- +package a_test + +import _ "b/x" +-- b1/go.mod -- +module b + +go 1.16 + +require c v0.1.0 +-- b1/x/x.go -- +package x +-- b1/y/y.go -- +package y + +import "c" + +func CVersion() string { + return c.Version +} +-- c1/go.mod -- +module c + +go 1.16 +-- c1/c.go -- +package c + +const Version = "v0.1.0" +-- c2/go.mod -- +This file should be unused. +-- c2/c.go -- +This file should be unused. diff --git a/src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt b/src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt new file mode 100644 index 0000000000..b7e3e6cb08 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt @@ -0,0 +1,159 @@ +# This file demonstrates the go.mod changes needed to ensure reproducibility +# when running 'go test' on a sequence of packages for which each package in the +# sequence is a test-only dependency of the previous package, as a user might do +# if they encounter a test failure while fixing a bug found in one of their +# dependencies. + +cp go.mod go.mod.old +cp lazy.go lazy.go.old +go mod tidy +cmp go.mod go.mod.old + +# Before adding a new import, the go.mod file should +# enumerate modules for all packages already imported. +go list -m all +stdout '^example.com/d v0.1.0' # not v0.2.0 as would be resolved by 'latest' +cp stdout list.old +cmp go.mod go.mod.old + +# Following the chain of dependencies by listing test dependencies +# or running tests should not change the go.mod file. +go list -test -deps example.com/a +stdout '^example.com/a' +stdout '^example.com/b' +! stdout '^example.com/c' +[!short] go test -c example.com/a +cmp go.mod go.mod.old + +go list -test -deps example.com/b +stdout '^example.com/b' +stdout '^example.com/c' +! stdout '^example.com/d' +[!short] go test -c example.com/b +cmp go.mod go.mod.old + +go list -test -deps example.com/c +stdout '^example.com/c' +stdout '^example.com/d' +[!short] go test -c example.com/c +cmp go.mod go.mod.old + +# When we add a new import of a package already imported by a test of a test of +# a dependency, and that dependency is already tidy, its transitive dependencies +# should already be present. +cp lazy.go.new lazy.go +go list all +go list -m all +cmp stdout list.old +cmp go.mod go.mod.new # Indirect dependency promoted to direct. + +# TODO(#36460): + +cp lazy.go.old lazy.go +cp go.mod.old go.mod +go mod edit -go=1.16 + +# If we reach d by running successive tests, we should end up with exactly the +# version required by c, with an update to the go.mod file as soon as we load a +# dependency not found in the deepening scan. + +# However, if we skip directly to adding a new import of d, the dependency is +# too far away for a deepening scan to find, which is fine because the package +# whose test imported it wasn't even in "all". It should resolve from the latest +# version of its module. + +-- go.mod -- +module example.com/lazy + +go 1.14 + +require example.com/a v0.1.0 + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b + example.com/c v0.1.0 => ./c + example.com/d v0.1.0 => ./d1 + example.com/d v0.2.0 => ./d2 +) +-- go.mod.new -- +module example.com/lazy + +go 1.14 + +require ( + example.com/a v0.1.0 + example.com/d v0.1.0 +) + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b + example.com/c v0.1.0 => ./c + example.com/d v0.1.0 => ./d1 + example.com/d v0.2.0 => ./d2 +) +-- lazy.go -- +package lazy + +import ( + _ "example.com/a" +) + +func main() {} +-- lazy.go.new -- +package lazy + +import ( + _ "example.com/a" + "example.com/d" +) + +func main() { + println(d.Version) +} +-- a/go.mod -- +module example.com/a + +go 1.14 + +require example.com/b v0.1.0 +-- a/a.go -- +package a +import _ "example.com/b" +-- b/go.mod -- +module example.com/b + +go 1.16 + +require example.com/c v0.1.0 +-- b/b.go -- +package b +-- b/b_test.go -- +package b +import _ "example.com/c" +-- c/go.mod -- +module example.com/c + +go 1.16 + +require example.com/d v0.1.0 +-- c/c.go -- +package c +-- c/c_test.go -- +package c +import _ "example.com/d" +-- d1/go.mod -- +module example.com/d + +go 1.16 +-- d1/d.go -- +package d +const Version = "v0.1.0" +-- d2/go.mod -- +module example.com/d + +go 1.16 +-- d2/d.go -- +package d +const Version = "v0.2.0" diff --git a/src/cmd/go/testdata/script/mod_lazy_new_import.txt b/src/cmd/go/testdata/script/mod_lazy_new_import.txt new file mode 100644 index 0000000000..76b915afaa --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_new_import.txt @@ -0,0 +1,89 @@ +cp go.mod go.mod.old +cp lazy.go lazy.go.old +go mod tidy +cmp go.mod go.mod.old + +# Before adding a new import, the go.mod file should +# enumerate modules for all packages already imported. +go list all +cmp go.mod go.mod.old + +# When we add a new import of a package in an existing dependency, +# and that dependency is already tidy, its transitive dependencies +# should already be present. +cp lazy.go.new lazy.go +go list all +go list -m all +stdout '^example.com/c v0.1.0' # not v0.2.0 as would be be resolved by 'latest' +cmp go.mod go.mod.old + +# TODO(#36460): +cp lazy.go.old lazy.go +cp go.mod.old go.mod +go mod edit -go=1.16 + +# When a new import is found, we should perform a deepening scan of the existing +# dependencies and add a requirement on the version required by those +# dependencies — not re-resolve 'latest'. + + +-- go.mod -- +module example.com/lazy + +go 1.14 + +require example.com/a v0.1.0 + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b + example.com/c v0.1.0 => ./c1 + example.com/c v0.2.0 => ./c2 +) +-- lazy.go -- +package lazy + +import ( + _ "example.com/a/x" +) +-- lazy.go.new -- +package lazy + +import ( + _ "example.com/a/x" + _ "example.com/a/y" +) +-- a/go.mod -- +module example.com/a + +go 1.14 + +require ( + example.com/b v0.1.0 + example.com/c v0.1.0 +) +-- a/x/x.go -- +package x +import _ "example.com/b" +-- a/y/y.go -- +package y +import _ "example.com/c" +-- b/go.mod -- +module example.com/b + +go 1.14 +-- b/b.go -- +package b +-- c1/go.mod -- +module example.com/c + +go 1.14 +-- c1/c.go -- +package c +-- c2/go.mod -- +module example.com/c + +go 1.14 +-- c2/c.go -- +package c +This file should not be used, so this syntax error should be ignored. diff --git a/src/cmd/go/testdata/script/mod_lazy_test_all.txt b/src/cmd/go/testdata/script/mod_lazy_test_all.txt new file mode 100644 index 0000000000..4ce9fb167b --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_test_all.txt @@ -0,0 +1,125 @@ +cp go.mod go.mod.old +go mod tidy +cmp go.mod go.mod.old + +# 'go list -m all' includes modules that cover the test dependencies of +# the packages imported by the main module. + +go list -m all +stdout 'example.com/b v0.1.0' +stdout 'example.com/c v0.1.0' +cmp go.mod go.mod.old + +# 'go test' (or equivalent) of any package in 'all' should use its existing +# dependencies without updating the go.mod file. + +go list all # Control case: example.com/b really is in 'all'. +stdout '^example.com/b$' +cmp go.mod go.mod.old # Already tidy, so dependencies shouldn't change. + +go list -test -deps example.com/b +stdout '^example.com/b$' +stdout '^example.com/c$' +! stdout '^example.com/d$' + +[!short] go test -c example.com/b + +cmp go.mod go.mod.old # Should have resolved the above without modifying go.mod. + + +# TODO(#36460): + +# 'go list -m all' should include modules that cover the test dependencies of +# the packages imported by the main module, found via a deepening scan. + +# 'go test' of any package in 'all' should use its existing dependencies without +# updating the go.mod file. This requires that we consider _dependencies of_ the +# explicit dependencies of the main module, and that we not record those +# dependencies explicitly after loading them. + + +-- go.mod -- +module example.com/lazy + +go 1.14 + +require example.com/a v0.1.0 + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b1 + example.com/b v0.2.0 => ./b2 + example.com/c v0.1.0 => ./c + example.com/d v0.1.0 => ./d +) +-- lazy.go -- +package lazy + +import ( + _ "example.com/a/x" +) +-- a/go.mod -- +module example.com/a + +go 1.14 + +require example.com/b v0.1.0 +-- a/x/x.go -- +package x +-- a/x/x_test.go -- +package x + +import ( + "testing" + + _ "example.com/b" +) + +func TestUsingB(t *testing.T) { + // … +} +-- b1/go.mod -- +module example.com/b + +go 1.14 + +require example.com/c v0.1.0 +-- b1/b.go -- +package b +-- b1/b_test.go -- +package b + +import _ "example.com/c" +-- b2/go.mod -- +module example.com/b + +go 1.14 + +require example.com/c v0.1.0 +-- b2/b.go -- +package b +-- b2/b_test.go -- +package b + +import _ "example.com/c" + +This file should not be used, so this syntax error should be ignored. +-- c/go.mod -- +module example.com/c + +go 1.14 + +require example.com/d v0.1.0 +-- c/c.go -- +package c +-- c/c_test.go -- +package c +import _ "example.com/d" +This file should not be used, so this syntax error should be ignored. +-- d/go.mod -- +module example.com/d + +go 1.14 +-- d/d.go -- +package d +This file should not be used, so this syntax error should be ignored. diff --git a/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt b/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt new file mode 100644 index 0000000000..29fc0aaa74 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt @@ -0,0 +1,115 @@ +# This file demonstrates the effect of lazy loading on the selected +# versions of test dependencies. + +# Control case: in Go 1.14, the version of c imported by 'go test x' is the +# version required by module b, even though b_test is not relevant to the main +# module. (The main module imports a, and a_test imports b, but all of the +# packages and tests in the main module can be built without b.) + +go list -m c +stdout '^c v0.2.0 ' + +[!short] go test -v x +[!short] stdout ' c v0.2.0$' + +# With lazy loading, the go.mod requirements are the same, +# but the irrelevant dependency on c v0.2.0 should be pruned out, +# leaving only the relevant dependency on c v0.1.0. + +go mod edit -go=1.16 +go list -m c +stdout '^c v0.2.0' # TODO(#36460): v0.1.0 + +[!short] go test -v x +[!short] stdout ' c v0.2.0$' # TODO(#36460): v0.1.0 + +-- m.go -- +package m + +import ( + _ "a" + _ "x" +) +-- go.mod -- +module m + +go 1.14 + +require ( + a v0.1.0 + x v0.1.0 +) + +replace ( + a v0.1.0 => ./a1 + b v0.1.0 => ./b1 + c v0.1.0 => ./c1 + c v0.2.0 => ./c2 + x v0.1.0 => ./x1 +) +-- a1/go.mod -- +module a + +go 1.16 + +require b v0.1.0 +-- a1/a.go -- +package a +-- a1/a_test.go -- +package a_test + +import _ "b" +-- b1/go.mod -- +module b + +go 1.16 + +require c v0.2.0 +-- b1/b.go -- +package b +-- b1/b_test.go -- +package b_test + +import ( + "c" + "testing" +) + +func TestCVersion(t *testing.T) { + t.Log(c.Version) +} +-- c1/go.mod -- +module c + +go 1.16 +-- c1/c.go -- +package c + +const Version = "v0.1.0" +-- c2/go.mod -- +module c + +go 1.16 +-- c2/c.go -- +package c + +const Version = "v0.2.0" +-- x1/go.mod -- +module x + +go 1.16 + +require c v0.1.0 +-- x1/x.go -- +package x +-- x1/x_test.go -- +package x_test + +import ( + "c" + "testing" +) + +func TestCVersion(t *testing.T) { + t.Log("c", c.Version) +} diff --git a/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt b/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt new file mode 100644 index 0000000000..bbb0772303 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt @@ -0,0 +1,118 @@ +cp go.mod go.mod.old +go mod tidy +cmp go.mod go.mod.old + +# In Go 1.14 mode, 'go list -m all' includes modules needed by the +# transitive closure of tests of dependencies of tests of dependencies of …. + +go list -m all +stdout 'example.com/b v0.1.0' +stdout 'example.com/c v0.1.0' +cmp go.mod go.mod.old + +# 'go test' (or equivalent) of any such dependency, no matter how remote, does +# not update the go.mod file. + +go list all +stdout example.com/a/x +stdout example.com/b # Test dependency of example.com/a/x. +stdout example.com/c # Test dependency of example.com/b. + +go list -test -deps all +stdout example.com/b +stdout example.com/c +cmp go.mod go.mod.old + +[!short] go test example.com/a/x +[!short] cmp go.mod go.mod.old + +[!short] go test example.com/b +[!short] cmp go.mod go.mod.old + +# TODO(#36460): + +# After changing to 'go 1.15` uniformly, 'go list -m all' should prune out +# example.com/c, because it is not imported by any package (or test of a package) +# transitively imported by the main module. +# example.com/a/x is transitively imported, +# and example.com/b is needed in order to run 'go test example.com/a/x', +# but example.com/c is not needed because we don't expect the user to need to run +# 'go test example.com/b'. + +-- go.mod -- +module example.com/lazy + +go 1.14 + +require example.com/a v0.1.0 + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b1 + example.com/b v0.2.0 => ./b2 + example.com/c v0.1.0 => ./c1 + example.com/c v0.2.0 => ./c2 +) +-- lazy.go -- +package lazy + +import ( + _ "example.com/a/x" +) +-- a/go.mod -- +module example.com/a + +go 1.14 + +require example.com/b v0.1.0 +-- a/x/x.go -- +package x +-- a/x/x_test.go -- +package x + +import ( + "testing" + + _ "example.com/b" +) + +func TestUsingB(t *testing.T) { + // … +} +-- b1/go.mod -- +module example.com/b + +go 1.14 + +require example.com/c v0.1.0 +-- b1/b.go -- +package b +-- b1/b_test.go -- +package b + +import _ "example.com/c" +-- b2/go.mod -- +module example.com/b + +go 1.14 + +require example.com/c v0.1.0 +-- b2/b.go -- +package b +This file should not be used, so this syntax error should be ignored. +-- b2/b_test.go -- +package b +This file should not be used, so this syntax error should be ignored. +-- c1/go.mod -- +module example.com/c + +go 1.14 +-- c1/c.go -- +package c +-- c2/go.mod -- +module example.com/c + +go 1.14 +-- c2/c.go -- +package c +This file should not be used, so this syntax error should be ignored. diff --git a/src/cmd/go/testdata/script/mod_notall.txt b/src/cmd/go/testdata/script/mod_notall.txt new file mode 100644 index 0000000000..72a02485a4 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_notall.txt @@ -0,0 +1,98 @@ +# This test demonstrates go commands that combine the 'all' pattern +# with packages outside of 'all'. + +# With -deps, 'all' should include test dependencies of packages in the main +# module, but not should not include test dependencies of packages imported only +# by other root patterns. + +cp go.mod go.mod.orig + +go list -deps all x/otherroot + +stdout '^x/inall$' +stdout '^x/inall/fromtest$' +stdout '^x/inall/fromtestinall$' +stdout '^x/otherroot$' +stdout '^x/otherdep$' + +! stdout '^x/fromotherroottest$' +! stdout '^y/fromotherdeptest$' + +# TODO(#40799): cmp go.mod go.mod.orig + +# With -deps -test, test dependencies of other roots should be included, +# but test dependencies of non-roots should not. + +go list -deps -test all x/otherroot +stdout '^x/inall$' +stdout '^x/inall/fromtest$' +stdout '^x/inall/fromtestinall$' +stdout '^x/otherroot$' +stdout '^x/otherdep$' + +stdout '^x/fromotherroottest$' +! stdout '^y/fromotherdeptest$' + +# TODO(#40799): cmp go.mod go.mod.orig + +-- m.go -- +package m + +import _ "x/inall" +-- m_test.go -- +package m_test + +import _ "x/inall/fromtest" +-- go.mod -- +module m + +go 1.15 + +require x v0.1.0 + +replace ( + x v0.1.0 => ./x + y v0.1.0 => ./y +) +-- x/go.mod -- +module x + +go 1.15 +-- x/inall/inall.go -- +package inall +-- x/inall/inall_test.go -- +package inall_test + +import _ "x/inall/fromtestinall" +-- x/inall/fromtest/fromtest.go -- +package fromtest +-- x/inall/fromtestinall/fromtestinall.go -- +package fromtestinall +-- x/otherroot/otherroot.go -- +package otherroot + +import _ "x/otherdep" +-- x/otherroot/otherroot_test.go -- +package otherroot_test + +import _ "x/fromotherroottest" +-- x/fromotherroottest/fromotherroottest.go -- +package fromotherroottest +-- x/otherdep/otherdep.go -- +package otherdep +-- x/otherdep/otherdep_test.go -- +package otherdep_test + +import _ "y/fromotherdeptest" +-- x/otherroot/testonly/testonly.go -- +package testonly +-- y/go.mod -- +module y + +go 1.15 +-- y/fromotherdeptest/fromotherdeptest.go -- +// Package fromotherdeptest is a test dependency of x/otherdep that is +// not declared in x/go.mod. If the loader resolves this package, +// it will add this module to the main module's go.mod file, +// and we can detect the mistake. +package fromotherdeptest -- cgit v1.2.3-54-g00ecf From 95df156e6ac53f98efd6c57e4586c1dfb43066dd Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Mon, 10 Aug 2020 15:11:07 -0400 Subject: cmd/go/internal/par: add Queue as a simpler alternative to Work par.Work performs two different tasks: deduplicating work (a task which overlaps with par.Cache), and executing limited active work in parallel. It also requires the caller to re-invoke Do whenever the workqueue transititions from empty to non-empty. The new par.Queue only performs the second of those two tasks, and presents a simpler API: it starts and stops its own goroutines as needed (indicating its idle state via a channel), rather than expecting the caller to drive the transitions explicitly. For #36460 Change-Id: I5c38657dda63ab55718497467d05d41744ff59f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/247766 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod --- src/cmd/go/internal/par/queue.go | 88 +++++++++++++++++++++++++++++++++++ src/cmd/go/internal/par/queue_test.go | 79 +++++++++++++++++++++++++++++++ 2 files changed, 167 insertions(+) create mode 100644 src/cmd/go/internal/par/queue.go create mode 100644 src/cmd/go/internal/par/queue_test.go diff --git a/src/cmd/go/internal/par/queue.go b/src/cmd/go/internal/par/queue.go new file mode 100644 index 0000000000..180bc75e34 --- /dev/null +++ b/src/cmd/go/internal/par/queue.go @@ -0,0 +1,88 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package par + +import "fmt" + +// Queue manages a set of work items to be executed in parallel. The number of +// active work items is limited, and excess items are queued sequentially. +type Queue struct { + maxActive int + st chan queueState +} + +type queueState struct { + active int // number of goroutines processing work; always nonzero when len(backlog) > 0 + backlog []func() + idle chan struct{} // if non-nil, closed when active becomes 0 +} + +// NewQueue returns a Queue that executes up to maxActive items in parallel. +// +// maxActive must be positive. +func NewQueue(maxActive int) *Queue { + if maxActive < 1 { + panic(fmt.Sprintf("par.NewQueue called with nonpositive limit (%d)", maxActive)) + } + + q := &Queue{ + maxActive: maxActive, + st: make(chan queueState, 1), + } + q.st <- queueState{} + return q +} + +// Add adds f as a work item in the queue. +// +// Add returns immediately, but the queue will be marked as non-idle until after +// f (and any subsequently-added work) has completed. +func (q *Queue) Add(f func()) { + st := <-q.st + if st.active == q.maxActive { + st.backlog = append(st.backlog, f) + q.st <- st + return + } + if st.active == 0 { + // Mark q as non-idle. + st.idle = nil + } + st.active++ + q.st <- st + + go func() { + for { + f() + + st := <-q.st + if len(st.backlog) == 0 { + if st.active--; st.active == 0 && st.idle != nil { + close(st.idle) + } + q.st <- st + return + } + f, st.backlog = st.backlog[0], st.backlog[1:] + q.st <- st + } + }() +} + +// Idle returns a channel that will be closed when q has no (active or enqueued) +// work outstanding. +func (q *Queue) Idle() <-chan struct{} { + st := <-q.st + defer func() { q.st <- st }() + + if st.idle == nil { + st.idle = make(chan struct{}) + if st.active == 0 { + close(st.idle) + } + } + + return st.idle +} diff --git a/src/cmd/go/internal/par/queue_test.go b/src/cmd/go/internal/par/queue_test.go new file mode 100644 index 0000000000..1331e65f98 --- /dev/null +++ b/src/cmd/go/internal/par/queue_test.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package par + +import ( + "sync" + "testing" +) + +func TestQueueIdle(t *testing.T) { + q := NewQueue(1) + select { + case <-q.Idle(): + default: + t.Errorf("NewQueue(1) is not initially idle.") + } + + started := make(chan struct{}) + unblock := make(chan struct{}) + q.Add(func() { + close(started) + <-unblock + }) + + <-started + idle := q.Idle() + select { + case <-idle: + t.Errorf("NewQueue(1) is marked idle while processing work.") + default: + } + + close(unblock) + <-idle // Should be closed as soon as the Add callback returns. +} + +func TestQueueBacklog(t *testing.T) { + const ( + maxActive = 2 + totalWork = 3 * maxActive + ) + + q := NewQueue(maxActive) + t.Logf("q = NewQueue(%d)", maxActive) + + var wg sync.WaitGroup + wg.Add(totalWork) + started := make([]chan struct{}, totalWork) + unblock := make(chan struct{}) + for i := range started { + started[i] = make(chan struct{}) + i := i + q.Add(func() { + close(started[i]) + <-unblock + wg.Done() + }) + } + + for i, c := range started { + if i < maxActive { + <-c // Work item i should be started immediately. + } else { + select { + case <-c: + t.Errorf("Work item %d started before previous items finished.", i) + default: + } + } + } + + close(unblock) + for _, c := range started[maxActive:] { + <-c + } + wg.Wait() +} -- cgit v1.2.3-54-g00ecf From d9a6bdf7ef4d0dd15608427b0f7ba3c45c221a3c Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 21 Aug 2020 20:20:12 -0700 Subject: cmd/compile: don't allow go:notinheap on the heap or stack Right now we just prevent such types from being on the heap. This CL makes it so they cannot appear on the stack either. The distinction between heap and stack is pretty vague at the language level (e.g. it is affected by -N), and we don't need the flexibility anyway. Once go:notinheap types cannot be in either place, we don't need to consider pointers to such types to be pointers, at least according to the garbage collector and stack copying. (This is the big win of this CL, in my opinion.) The distinction between HasPointers and HasHeapPointer no longer exists. There is only HasPointers. This CL is cleanup before possible use of go:notinheap to fix #40954. Update #13386 Change-Id: Ibd895aadf001c0385078a6d4809c3f374991231a Reviewed-on: https://go-review.googlesource.com/c/go/+/249917 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/escape.go | 3 +++ src/cmd/compile/internal/gc/pgen_test.go | 10 +--------- src/cmd/compile/internal/gc/plive.go | 14 +++++++------- src/cmd/compile/internal/gc/range.go | 2 +- src/cmd/compile/internal/gc/walk.go | 23 +++++++++++------------ src/cmd/compile/internal/ssa/decompose.go | 2 +- src/cmd/compile/internal/ssa/gen/dec.rules | 4 ++-- src/cmd/compile/internal/ssa/nilcheck.go | 2 +- src/cmd/compile/internal/ssa/rewritedec.go | 7 ++++--- src/cmd/compile/internal/ssa/writebarrier.go | 2 +- src/cmd/compile/internal/types/type.go | 24 ++++++------------------ src/runtime/export_test.go | 7 +++---- src/runtime/mgcmark.go | 3 ++- src/runtime/mgcstack.go | 2 -- src/runtime/runtime2.go | 5 +---- test/notinheap2.go | 12 ++++++++++-- 16 files changed, 54 insertions(+), 68 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index ddf89f6159..d5cca4a38b 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1029,6 +1029,9 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { if e.curfn == nil { Fatalf("e.curfn isn't set") } + if n != nil && n.Type != nil && n.Type.NotInHeap() { + yyerrorl(n.Pos, "%v is go:notinheap; stack allocation disallowed", n.Type) + } n = canonicalNode(n) loc := &EscLocation{ diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 41f0808a1c..b1db29825c 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -20,7 +20,7 @@ func typeWithoutPointers() *types.Type { func typeWithPointers() *types.Type { t := types.New(TSTRUCT) - f := &types.Field{Type: types.New(TPTR)} + f := &types.Field{Type: types.NewPtr(types.New(TINT))} t.SetFields([]*types.Field{f}) return t } @@ -181,14 +181,6 @@ func TestStackvarSort(t *testing.T) { nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO), } - // haspointers updates Type.Haspointers as a side effect, so - // exercise this function on all inputs so that reflect.DeepEqual - // doesn't produce false positives. - for i := range want { - want[i].Type.HasPointers() - inp[i].Type.HasPointers() - } - sort.Sort(byStackVar(inp)) if !reflect.DeepEqual(want, inp) { t.Error("sort failed") diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 398bfe5baa..8976ed657a 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -436,7 +436,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) { case ssa.LocalSlot: return mask case *ssa.Register: - if ptrOnly && !v.Type.HasHeapPointer() { + if ptrOnly && !v.Type.HasPointers() { return mask } regs[0] = loc @@ -451,7 +451,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) { if loc1 == nil { continue } - if ptrOnly && !v.Type.FieldType(i).HasHeapPointer() { + if ptrOnly && !v.Type.FieldType(i).HasPointers() { continue } regs[nreg] = loc1.(*ssa.Register) @@ -568,13 +568,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { if t.Align > 0 && off&int64(t.Align-1) != 0 { Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) } + if !t.HasPointers() { + // Note: this case ensures that pointers to go:notinheap types + // are not considered pointers by garbage collection and stack copying. + return + } switch t.Etype { - case TINT8, TUINT8, TINT16, TUINT16, - TINT32, TUINT32, TINT64, TUINT64, - TINT, TUINT, TUINTPTR, TBOOL, - TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128: - case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP: if off&int64(Widthptr-1) != 0 { Fatalf("onebitwalktype1: invalid alignment, %v", t) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index d78a5f0d8d..5434b0167a 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -586,7 +586,7 @@ func arrayClear(n, v1, v2, a *Node) bool { n.Nbody.Append(nod(OAS, hn, tmp)) var fn *Node - if a.Type.Elem().HasHeapPointer() { + if a.Type.Elem().HasPointers() { // memclrHasPointers(hp, hn) Curfn.Func.setWBPos(stmt.Pos) fn = mkcall("memclrHasPointers", nil, nil, hp, hn) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 77f88d8996..90ecb50d6a 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1156,6 +1156,9 @@ opswitch: } case ONEW: + if n.Type.Elem().NotInHeap() { + yyerror("%v is go:notinheap; heap allocation disallowed", n.Type.Elem()) + } if n.Esc == EscNone { if n.Type.Elem().Width >= maxImplicitStackVarSize { Fatalf("large ONEW with EscNone: %v", n) @@ -1324,6 +1327,9 @@ opswitch: l = r } t := n.Type + if t.Elem().NotInHeap() { + yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) + } if n.Esc == EscNone { if !isSmallMakeSlice(n) { Fatalf("non-small OMAKESLICE with EscNone: %v", n) @@ -1365,10 +1371,6 @@ opswitch: // When len and cap can fit into int, use makeslice instead of // makeslice64, which is faster and shorter on 32 bit platforms. - if t.Elem().NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) - } - len, cap := l, r fnname := "makeslice64" @@ -1403,7 +1405,7 @@ opswitch: t := n.Type if t.Elem().NotInHeap() { - Fatalf("%v is go:notinheap; heap allocation disallowed", t.Elem()) + yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) } length := conv(n.Left, types.Types[TINT]) @@ -2012,9 +2014,6 @@ func walkprint(nn *Node, init *Nodes) *Node { } func callnew(t *types.Type) *Node { - if t.NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", t) - } dowidth(t) n := nod(ONEWOBJ, typename(t), nil) n.Type = types.NewPtr(t) @@ -2589,7 +2588,7 @@ func mapfast(t *types.Type) int { } switch algtype(t.Key()) { case AMEM32: - if !t.Key().HasHeapPointer() { + if !t.Key().HasPointers() { return mapfast32 } if Widthptr == 4 { @@ -2597,7 +2596,7 @@ func mapfast(t *types.Type) int { } Fatalf("small pointer %v", t.Key()) case AMEM64: - if !t.Key().HasHeapPointer() { + if !t.Key().HasPointers() { return mapfast64 } if Widthptr == 8 { @@ -2744,7 +2743,7 @@ func appendslice(n *Node, init *Nodes) *Node { nodes.Append(nod(OAS, s, nt)) var ncopy *Node - if elemtype.HasHeapPointer() { + if elemtype.HasPointers() { // copy(s[len(l1):], l2) nptr1 := nod(OSLICE, s, nil) nptr1.Type = s.Type @@ -3082,7 +3081,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { // Also works if b is a string. // func copyany(n *Node, init *Nodes, runtimecall bool) *Node { - if n.Left.Type.Elem().HasHeapPointer() { + if n.Left.Type.Elem().HasPointers() { Curfn.Func.setWBPos(n.Pos) fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem()) n.Left = cheapexpr(n.Left, init) diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index c59ec4c77d..6e72e3825c 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -139,7 +139,7 @@ func decomposeStringPhi(v *Value) { func decomposeSlicePhi(v *Value) { types := &v.Block.Func.Config.Types - ptrType := types.BytePtr + ptrType := v.Type.Elem().PtrTo() lenType := types.Int ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType) diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules index 3fd2be409f..4c677f8418 100644 --- a/src/cmd/compile/internal/ssa/gen/dec.rules +++ b/src/cmd/compile/internal/ssa/gen/dec.rules @@ -66,14 +66,14 @@ (Load (OffPtr [2*config.PtrSize] ptr) mem)) -(Store dst (SliceMake ptr len cap) mem) => +(Store {t} dst (SliceMake ptr len cap) mem) => (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len - (Store {typ.BytePtr} dst ptr mem))) + (Store {t.Elem().PtrTo()} dst ptr mem))) // interface ops (ITab (IMake itab _)) => itab diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 6b24371ac7..d1bad529e7 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -235,7 +235,7 @@ func nilcheckelim2(f *Func) { continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { - if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasHeapPointer()) { + if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) { // These ops don't really change memory. continue // Note: OpVarDef requires that the defined variable not have pointers. diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index cef781ffaa..e0fa9768d9 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -328,9 +328,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v.AddArg3(v0, len, v1) return true } - // match: (Store dst (SliceMake ptr len cap) mem) - // result: (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))) + // match: (Store {t} dst (SliceMake ptr len cap) mem) + // result: (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem))) for { + t := auxToType(v.Aux) dst := v_0 if v_1.Op != OpSliceMake { break @@ -350,7 +351,7 @@ func rewriteValuedec_OpStore(v *Value) bool { v2.AuxInt = int64ToAuxInt(config.PtrSize) v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) - v3.Aux = typeToAux(typ.BytePtr) + v3.Aux = typeToAux(t.Elem().PtrTo()) v3.AddArg3(dst, ptr, mem) v1.AddArg3(v2, len, v3) v.AddArg3(v0, cap, v1) diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index c7fb059475..214798a1ab 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -31,7 +31,7 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion) bool { if !ok { v.Fatalf("store aux is not a type: %s", v.LongString()) } - if !t.HasHeapPointer() { + if !t.HasPointers() { return false } if IsStackAddr(v.Args[0]) { diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 20ae856bba..e4b3d885d9 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1398,14 +1398,9 @@ func (t *Type) IsUntyped() bool { return false } -// TODO(austin): We probably only need HasHeapPointer. See -// golang.org/cl/73412 for discussion. - +// HasPointers reports whether t contains a heap pointer. +// Note that this function ignores pointers to go:notinheap types. func (t *Type) HasPointers() bool { - return t.hasPointers1(false) -} - -func (t *Type) hasPointers1(ignoreNotInHeap bool) bool { switch t.Etype { case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA: @@ -1415,34 +1410,27 @@ func (t *Type) hasPointers1(ignoreNotInHeap bool) bool { if t.NumElem() == 0 { // empty array has no pointers return false } - return t.Elem().hasPointers1(ignoreNotInHeap) + return t.Elem().HasPointers() case TSTRUCT: for _, t1 := range t.Fields().Slice() { - if t1.Type.hasPointers1(ignoreNotInHeap) { + if t1.Type.HasPointers() { return true } } return false case TPTR, TSLICE: - return !(ignoreNotInHeap && t.Elem().NotInHeap()) + return !t.Elem().NotInHeap() case TTUPLE: ttup := t.Extra.(*Tuple) - return ttup.first.hasPointers1(ignoreNotInHeap) || ttup.second.hasPointers1(ignoreNotInHeap) + return ttup.first.HasPointers() || ttup.second.HasPointers() } return true } -// HasHeapPointer reports whether t contains a heap pointer. -// This is used for write barrier insertion, so it ignores -// pointers to go:notinheap types. -func (t *Type) HasHeapPointer() bool { - return t.hasPointers1(true) -} - func (t *Type) Symbol() *obj.LSym { return TypeLinkSym(t) } diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index d591fdc4e9..3307000c51 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -981,9 +981,8 @@ func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) { } func MSpanCountAlloc(bits []byte) int { - s := mspan{ - nelems: uintptr(len(bits) * 8), - gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])), - } + s := (*mspan)(mheap_.spanalloc.alloc()) + s.nelems = uintptr(len(bits) * 8) + s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0])) return s.countAlloc() } diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 2b84945471..79df59d6d6 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -837,7 +837,8 @@ func scanstack(gp *g, gcw *gcWork) { x := state.head state.head = x.next if stackTraceDebug { - for _, obj := range x.obj[:x.nobj] { + for i := 0; i < x.nobj; i++ { + obj := &x.obj[i] if obj.typ == nil { // reachable continue } diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go index 211d882fa6..8eb941a328 100644 --- a/src/runtime/mgcstack.go +++ b/src/runtime/mgcstack.go @@ -167,8 +167,6 @@ func (obj *stackObject) setType(typ *_type) { // A stackScanState keeps track of the state used during the GC walk // of a goroutine. -// -//go:notinheap type stackScanState struct { cache pcvalueCache diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index b7d0739e54..64c6cc7198 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -909,15 +909,12 @@ type _defer struct { // A _panic holds information about an active panic. // -// This is marked go:notinheap because _panic values must only ever -// live on the stack. +// A _panic value must only ever live on the stack. // // The argp and link fields are stack pointers, but don't need special // handling during stack growth: because they are pointer-typed and // _panic values only live on the stack, regular stack pointer // adjustment takes care of them. -// -//go:notinheap type _panic struct { argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink arg interface{} // argument to panic diff --git a/test/notinheap2.go b/test/notinheap2.go index 944f2993ab..de1e6db1d3 100644 --- a/test/notinheap2.go +++ b/test/notinheap2.go @@ -13,12 +13,14 @@ type nih struct { next *nih } -// Globals and stack variables are okay. +// Global variables are okay. var x nih +// Stack variables are not okay. + func f() { - var y nih + var y nih // ERROR "nih is go:notinheap; stack allocation disallowed" x = y } @@ -26,11 +28,17 @@ func f() { var y *nih var z []nih +var w []nih +var n int func g() { y = new(nih) // ERROR "heap allocation disallowed" z = make([]nih, 1) // ERROR "heap allocation disallowed" z = append(z, x) // ERROR "heap allocation disallowed" + // Test for special case of OMAKESLICECOPY + x := make([]nih, n) // ERROR "heap allocation disallowed" + copy(x, z) + z = x } // Writes don't produce write barriers. -- cgit v1.2.3-54-g00ecf From c78d215ce38288afe382d38af11b6692ce44c368 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 26 Jun 2020 11:48:37 -0400 Subject: go/build: ignore symlinks to directories when matching source files Fixes #39841 Change-Id: Icbdc37d40e9c10179d6eb704d04482175b139f57 Reviewed-on: https://go-review.googlesource.com/c/go/+/240120 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- src/cmd/go/testdata/script/mod_symlink_dotgo.txt | 17 +++++++++++++++++ src/go/build/build.go | 6 ++++++ 2 files changed, 23 insertions(+) create mode 100644 src/cmd/go/testdata/script/mod_symlink_dotgo.txt diff --git a/src/cmd/go/testdata/script/mod_symlink_dotgo.txt b/src/cmd/go/testdata/script/mod_symlink_dotgo.txt new file mode 100644 index 0000000000..d4cc143a36 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_symlink_dotgo.txt @@ -0,0 +1,17 @@ +env GO111MODULE=on +[!symlink] skip + +symlink dir.go -> dir + +# Issue #39841: symlinks to directories should be ignored, not treated as source files. +go list -f '{{range .GoFiles}}{{.}}{{"\n"}}{{end}}' . +stdout 'p\.go$' +! stdout 'dir\.go$' + +-- go.mod -- +module example.com +go 1.15 +-- p.go -- +package p +-- dir/README.txt -- +This file exists to ensure that dir is a directory. diff --git a/src/go/build/build.go b/src/go/build/build.go index 4a5da308a0..39bc3591a7 100644 --- a/src/go/build/build.go +++ b/src/go/build/build.go @@ -793,6 +793,12 @@ Found: if d.IsDir() { continue } + if (d.Mode() & os.ModeSymlink) != 0 { + if fi, err := os.Stat(filepath.Join(p.Dir, d.Name())); err == nil && fi.IsDir() { + // Symlinks to directories are not source files. + continue + } + } name := d.Name() ext := nameExt(name) -- cgit v1.2.3-54-g00ecf From b3d9cf7a07518020c6ec5032474aafef9345cdd5 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 13 May 2020 14:59:29 -0400 Subject: os: return a *PathError from Readdirnames and Readdir on POSIX platforms Previously, Readdirnames returned a *PathError on Windows and Plan 9, but a *SyscallError on POSIX systems. In contrast, similar methods (such as Stat) return a *PathError on all platforms. Fixes #38923 Change-Id: I26395905b1e723933f07b792c7aeee7c335949cd Reviewed-on: https://go-review.googlesource.com/c/go/+/233917 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/os/dir_darwin.go | 10 +++++----- src/os/dir_unix.go | 2 +- src/os/os_test.go | 4 ++++ 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/os/dir_darwin.go b/src/os/dir_darwin.go index 87797e2dda..476af6862e 100644 --- a/src/os/dir_darwin.go +++ b/src/os/dir_darwin.go @@ -28,7 +28,7 @@ func (f *File) readdirnames(n int) (names []string, err error) { if f.dirinfo == nil { dir, call, errno := f.pfd.OpenDir() if errno != nil { - return nil, wrapSyscallError(call, errno) + return nil, &PathError{call, f.name, errno} } f.dirinfo = &dirInfo{ dir: dir, @@ -46,11 +46,11 @@ func (f *File) readdirnames(n int) (names []string, err error) { var dirent syscall.Dirent var entptr *syscall.Dirent for len(names) < size || n == -1 { - if res := readdir_r(d.dir, &dirent, &entptr); res != 0 { - if syscall.Errno(res) == syscall.EINTR { + if errno := readdir_r(d.dir, &dirent, &entptr); errno != 0 { + if errno == syscall.EINTR { continue } - return names, wrapSyscallError("readdir", syscall.Errno(res)) + return names, &PathError{"readdir", f.name, errno} } if entptr == nil { // EOF break @@ -84,4 +84,4 @@ func (f *File) readdirnames(n int) (names []string, err error) { func closedir(dir uintptr) (err error) //go:linkname readdir_r syscall.readdir_r -func readdir_r(dir uintptr, entry *syscall.Dirent, result **syscall.Dirent) (res int) +func readdir_r(dir uintptr, entry *syscall.Dirent, result **syscall.Dirent) (res syscall.Errno) diff --git a/src/os/dir_unix.go b/src/os/dir_unix.go index e0c4989756..58ec406ab8 100644 --- a/src/os/dir_unix.go +++ b/src/os/dir_unix.go @@ -50,7 +50,7 @@ func (f *File) readdirnames(n int) (names []string, err error) { d.nbuf, errno = f.pfd.ReadDirent(d.buf) runtime.KeepAlive(f) if errno != nil { - return names, wrapSyscallError("readdirent", errno) + return names, &PathError{"readdirent", f.name, errno} } if d.nbuf <= 0 { break // EOF diff --git a/src/os/os_test.go b/src/os/os_test.go index e8c64510f5..520916d880 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -688,6 +688,10 @@ func TestReaddirOfFile(t *testing.T) { if err == nil { t.Error("Readdirnames succeeded; want non-nil error") } + var pe *PathError + if !errors.As(err, &pe) || pe.Path != f.Name() { + t.Errorf("Readdirnames returned %q; want a PathError with path %q", err, f.Name()) + } if len(names) > 0 { t.Errorf("unexpected dir names in regular file: %q", names) } -- cgit v1.2.3-54-g00ecf From bb54a855a9b5733569f40ac19a2c338b87c23d14 Mon Sep 17 00:00:00 2001 From: Andrew Gerrand Date: Wed, 13 May 2020 10:39:11 +1000 Subject: net/http: handle Request.URL.RawPath in StripPrefix The StripPrefix wrapper strips a prefix string from the request's URL.Path field, but doesn't touch the RawPath field. This leads to the confusing situation when StripPrefix handles a request with URL.RawPath populated (due to some escaped characters in the request path) and the wrapped request's RawPath contains the prefix but Path does not. This change modifies StripPrefix to strip the prefix from both Path and RawPath. If there are escaped characters in the prefix part of the request URL the stripped handler serves a 404 instead of invoking the underlying handler with a mismatched Path/RawPath pair. This is a backward incompatible change for a very small minority of requests; I would be surprised if anyone is depending on this behavior, but it is possible. If that's the case, we could make a more conservative change where the RawPath is trimmed if possible, but when the prefix contains escaped characters then we don't 404 but rather send through the invalid Path/RawPath pair as before. Fixes #24366 Change-Id: I7030b8c183a3dfce307bc0272bba9a18df4cfe08 Reviewed-on: https://go-review.googlesource.com/c/go/+/233637 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke Reviewed-by: Bryan C. Mills --- doc/go1.16.html | 14 +++++++++++++ src/net/http/serve_test.go | 52 +++++++++++++++++++++++++++++++--------------- src/net/http/server.go | 16 ++++++++------ 3 files changed, 59 insertions(+), 23 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 4753cf914d..09e974d07c 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -112,3 +112,17 @@ Do not send CLs removing the interior tags from such phrases.

TODO

+ +

+ In the net/http package, the + behavior of StripPrefix + has been changed to strip the prefix from the request URL's + RawPath field in addition to its Path field. + In past releases, only the Path field was trimmed, and so if the + request URL contained any escaped characters the URL would be modified to + have mismatched Path and RawPath fields. + In Go 1.16, StripPrefix trims both fields. + If there are escaped characters in the prefix part of the request URL the + handler serves a 404 instead of its previous behavior of invoking the + underlying handler with a mismatched Path/RawPath pair. +

diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index 5f56932778..635bf5dfc9 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -2849,29 +2849,47 @@ func TestStripPrefix(t *testing.T) { defer afterTest(t) h := HandlerFunc(func(w ResponseWriter, r *Request) { w.Header().Set("X-Path", r.URL.Path) + w.Header().Set("X-RawPath", r.URL.RawPath) }) - ts := httptest.NewServer(StripPrefix("/foo", h)) + ts := httptest.NewServer(StripPrefix("/foo/bar", h)) defer ts.Close() c := ts.Client() - res, err := c.Get(ts.URL + "/foo/bar") - if err != nil { - t.Fatal(err) - } - if g, e := res.Header.Get("X-Path"), "/bar"; g != e { - t.Errorf("test 1: got %s, want %s", g, e) - } - res.Body.Close() - - res, err = Get(ts.URL + "/bar") - if err != nil { - t.Fatal(err) - } - if g, e := res.StatusCode, 404; g != e { - t.Errorf("test 2: got status %v, want %v", g, e) + cases := []struct { + reqPath string + path string // If empty we want a 404. + rawPath string + }{ + {"/foo/bar/qux", "/qux", ""}, + {"/foo/bar%2Fqux", "/qux", "%2Fqux"}, + {"/foo%2Fbar/qux", "", ""}, // Escaped prefix does not match. + {"/bar", "", ""}, // No prefix match. + } + for _, tc := range cases { + t.Run(tc.reqPath, func(t *testing.T) { + res, err := c.Get(ts.URL + tc.reqPath) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if tc.path == "" { + if res.StatusCode != StatusNotFound { + t.Errorf("got %q, want 404 Not Found", res.Status) + } + return + } + if res.StatusCode != StatusOK { + t.Fatalf("got %q, want 200 OK", res.Status) + } + if g, w := res.Header.Get("X-Path"), tc.path; g != w { + t.Errorf("got Path %q, want %q", g, w) + } + if g, w := res.Header.Get("X-RawPath"), tc.rawPath; g != w { + t.Errorf("got RawPath %q, want %q", g, w) + } + }) } - res.Body.Close() } // https://golang.org/issue/18952. diff --git a/src/net/http/server.go b/src/net/http/server.go index d41b5f6f48..ed5de350a9 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -2062,22 +2062,26 @@ func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", Sta // that replies to each request with a ``404 page not found'' reply. func NotFoundHandler() Handler { return HandlerFunc(NotFound) } -// StripPrefix returns a handler that serves HTTP requests -// by removing the given prefix from the request URL's Path -// and invoking the handler h. StripPrefix handles a -// request for a path that doesn't begin with prefix by -// replying with an HTTP 404 not found error. +// StripPrefix returns a handler that serves HTTP requests by removing the +// given prefix from the request URL's Path (and RawPath if set) and invoking +// the handler h. StripPrefix handles a request for a path that doesn't begin +// with prefix by replying with an HTTP 404 not found error. The prefix must +// match exactly: if the prefix in the request contains escaped characters +// the reply is also an HTTP 404 not found error. func StripPrefix(prefix string, h Handler) Handler { if prefix == "" { return h } return HandlerFunc(func(w ResponseWriter, r *Request) { - if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { + p := strings.TrimPrefix(r.URL.Path, prefix) + rp := strings.TrimPrefix(r.URL.RawPath, prefix) + if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) { r2 := new(Request) *r2 = *r r2.URL = new(url.URL) *r2.URL = *r.URL r2.URL.Path = p + r2.URL.RawPath = rp h.ServeHTTP(w, r2) } else { NotFound(w, r) -- cgit v1.2.3-54-g00ecf From 02f445258c284f295d8648078d6cc15836670756 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 25 Aug 2020 15:19:50 +0200 Subject: cmd/link: remove superfluous check in TestIssue34788Android386TLSSequence err != nil is already checked in the if condition one line above. Change-Id: If36cdb41016f7be98a65be0a7211d85cd6017f87 Reviewed-on: https://go-review.googlesource.com/c/go/+/250477 Run-TryBot: Tobias Klauser TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/link/link_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 3b5efdf7a3..72ff01c932 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -455,9 +455,7 @@ func TestIssue34788Android386TLSSequence(t *testing.T) { cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-o", obj, src) cmd.Env = append(os.Environ(), "GOARCH=386", "GOOS=android") if out, err := cmd.CombinedOutput(); err != nil { - if err != nil { - t.Fatalf("failed to compile blah.go: %v, output: %s\n", err, out) - } + t.Fatalf("failed to compile blah.go: %v, output: %s\n", err, out) } // Run objdump on the resulting object. -- cgit v1.2.3-54-g00ecf From bca0b44629f1317cc177072560d9a1486620e48f Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 24 Aug 2020 22:57:01 -0400 Subject: cmd/compile: also check package.function for GOSSAFUNC match Old behavior is still enabled because it doesn't hurt to leave it in and existing users of this feature (there are dozens of us!) will not be surprised. Adding this finer control allows users to avoid writing ssa.html where they can't, shouldn't, or just don't want to. Example, both ways: $ GOSSAFUNC="(*Reader).Reset" go test -c -o ./a compress/gzip dumped SSA to bytes/ssa.html dumped SSA to strings/ssa.html dumped SSA to bufio/ssa.html dumped SSA to compress/gzip/ssa.html $ GOSSAFUNC="compress/gzip.(*Reader).Reset" go test -c -o ./a compress/gzip dumped SSA to compress/gzip/ssa.html Updates #40919. Change-Id: I06b77c3c1d326372a32651570b5dd6e56dfb1d7f Reviewed-on: https://go-review.googlesource.com/c/go/+/250340 Run-TryBot: David Chase TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index c8fb013ad0..104dd403ea 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -295,7 +295,10 @@ func (s *state) emitOpenDeferInfo() { // worker indicates which of the backend workers is doing the processing. func buildssa(fn *Node, worker int) *ssa.Func { name := fn.funcname() - printssa := name == ssaDump + printssa := false + if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" + printssa = name == ssaDump || myimportpath+"."+name == ssaDump + } var astBuf *bytes.Buffer if printssa { astBuf = &bytes.Buffer{} -- cgit v1.2.3-54-g00ecf From 27136419d4784d3f81af3ec1faf0962a60c69b7e Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 21 Aug 2020 10:02:44 -0700 Subject: encoding/binary: replace constant literals with named constant (cleanup) Follow-up on https://golang.org/cl/247120. Brought to my attention by Luke McCoy. Change-Id: I4530c96fb164d23b0ce5311f2cecb1964f2dea74 Reviewed-on: https://go-review.googlesource.com/c/go/+/249837 Reviewed-by: Katie Hockman --- src/encoding/binary/varint.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/encoding/binary/varint.go b/src/encoding/binary/varint.go index 38af61075c..1fa325dec7 100644 --- a/src/encoding/binary/varint.go +++ b/src/encoding/binary/varint.go @@ -62,7 +62,7 @@ func Uvarint(buf []byte) (uint64, int) { var s uint for i, b := range buf { if b < 0x80 { - if i > 9 || i == 9 && b > 1 { + if i >= MaxVarintLen64 || i == MaxVarintLen64-1 && b > 1 { return 0, -(i + 1) // overflow } return x | uint64(b)< 1 { + if i == MaxVarintLen64-1 && b > 1 { return x, overflow } return x | uint64(b)< Date: Mon, 24 Aug 2020 15:23:27 +0700 Subject: cmd/compile: report error for unexported name only once Fixes #22921 Change-Id: If29bd962335ac7676ea4f379727db3d55ae1bf8e Reviewed-on: https://go-review.googlesource.com/c/go/+/250177 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 10 ++++++++++ src/cmd/compile/internal/gc/noder.go | 8 ++++---- src/cmd/compile/internal/gc/subr.go | 7 ------- test/fixedbugs/bug229.go | 8 ++++---- test/fixedbugs/issue22921.go | 18 ++++++++++++++++++ test/runtime.go | 2 +- 6 files changed, 37 insertions(+), 16 deletions(-) create mode 100644 test/fixedbugs/issue22921.go diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index cd64d9a7bf..4f6fddd089 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -297,6 +297,16 @@ func oldname(s *types.Sym) *Node { return n } +// importName is like oldname, but it reports an error if sym is from another package and not exported. +func importName(sym *types.Sym) *Node { + n := oldname(sym) + if !types.IsExported(sym.Name) && sym.Pkg != localpkg { + n.SetDiag(true) + yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) + } + return n +} + // := declarations func colasname(n *Node) bool { switch n.Op { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 802aab2268..590c1a16de 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -653,7 +653,7 @@ func (p *noder) expr(expr syntax.Expr) *Node { obj := p.expr(expr.X) if obj.Op == OPACK { obj.Name.SetUsed(true) - return oldname(restrictlookup(expr.Sel.Value, obj.Name.Pkg)) + return importName(obj.Name.Pkg.Lookup(expr.Sel.Value)) } n := nodSym(OXDOT, obj, p.name(expr.Sel)) n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X) @@ -857,7 +857,7 @@ func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node { p.setlineno(method) var n *Node if method.Name == nil { - n = p.nodSym(method, ODCLFIELD, oldname(p.packname(method.Type)), nil) + n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil) } else { mname := p.name(method.Name) sig := p.typeExpr(method.Type) @@ -896,7 +896,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { def.Name.SetUsed(true) pkg = def.Name.Pkg } - return restrictlookup(expr.Sel.Value, pkg) + return pkg.Lookup(expr.Sel.Value) } panic(fmt.Sprintf("unexpected packname: %#v", expr)) } @@ -911,7 +911,7 @@ func (p *noder) embedded(typ syntax.Expr) *Node { } sym := p.packname(typ) - n := p.nodSym(typ, ODCLFIELD, oldname(sym), lookup(sym.Name)) + n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name)) n.SetEmbedded(true) if isStar { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 9362c74288..9c6cd24eb7 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -271,13 +271,6 @@ func autolabel(prefix string) *types.Sym { return lookupN(prefix, int(n)) } -func restrictlookup(name string, pkg *types.Pkg) *types.Sym { - if !types.IsExported(name) && pkg != localpkg { - yyerror("cannot refer to unexported name %s.%s", pkg.Name, name) - } - return pkg.Lookup(name) -} - // find all the exported symbols in package opkg // and make them available in the current package func importdot(opkg *types.Pkg, pack *Node) { diff --git a/test/fixedbugs/bug229.go b/test/fixedbugs/bug229.go index 4baf65e48b..a30202fa2c 100644 --- a/test/fixedbugs/bug229.go +++ b/test/fixedbugs/bug229.go @@ -10,11 +10,11 @@ import "testing" func main() { var t testing.T - + // make sure error mentions that // name is unexported, not just "name not found". - t.common.name = nil // ERROR "unexported" - - println(testing.anyLowercaseName("asdf")) // ERROR "unexported" "undefined: testing.anyLowercaseName" + t.common.name = nil // ERROR "unexported" + + println(testing.anyLowercaseName("asdf")) // ERROR "unexported" } diff --git a/test/fixedbugs/issue22921.go b/test/fixedbugs/issue22921.go new file mode 100644 index 0000000000..04f78b2c08 --- /dev/null +++ b/test/fixedbugs/issue22921.go @@ -0,0 +1,18 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "bytes" + +type _ struct{ bytes.nonexist } // ERROR "unexported" + +type _ interface{ bytes.nonexist } // ERROR "unexported" + +func main() { + var _ bytes.Buffer + var _ bytes.buffer // ERROR "unexported" +} diff --git a/test/runtime.go b/test/runtime.go index 0cf781b814..bccc9b53af 100644 --- a/test/runtime.go +++ b/test/runtime.go @@ -17,5 +17,5 @@ package main import "runtime" func main() { - runtime.printbool(true) // ERROR "unexported" "undefined" + runtime.printbool(true) // ERROR "unexported" } -- cgit v1.2.3-54-g00ecf From 41bc0a1713b9436e96c2d64211ad94e42cafd591 Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Mon, 24 Aug 2020 14:43:32 +0800 Subject: math/big: fix TestShiftOverlap for test -count arguments > 1 Don't overwrite incoming test data. The change uses copy instead of assigning statement to avoid this. Change-Id: Ib907101822d811de5c45145cb9d7961907e212c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/250137 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/math/big/arith_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/math/big/arith_test.go b/src/math/big/arith_test.go index 05136f1895..e2b982c89c 100644 --- a/src/math/big/arith_test.go +++ b/src/math/big/arith_test.go @@ -241,20 +241,20 @@ var argshrVU = []argVU{ } func testShiftFunc(t *testing.T, f func(z, x []Word, s uint) Word, a argVU) { - // save a.d for error message, or it will be overwritten. + // work on copy of a.d to preserve the original data. b := make([]Word, len(a.d)) copy(b, a.d) - z := a.d[a.zp : a.zp+a.l] - x := a.d[a.xp : a.xp+a.l] + z := b[a.zp : a.zp+a.l] + x := b[a.xp : a.xp+a.l] c := f(z, x, a.s) for i, zi := range z { if zi != a.r[i] { - t.Errorf("d := %v, %s(d[%d:%d], d[%d:%d], %d)\n\tgot z[%d] = %#x; want %#x", b, a.m, a.zp, a.zp+a.l, a.xp, a.xp+a.l, a.s, i, zi, a.r[i]) + t.Errorf("d := %v, %s(d[%d:%d], d[%d:%d], %d)\n\tgot z[%d] = %#x; want %#x", a.d, a.m, a.zp, a.zp+a.l, a.xp, a.xp+a.l, a.s, i, zi, a.r[i]) break } } if c != a.c { - t.Errorf("d := %v, %s(d[%d:%d], d[%d:%d], %d)\n\tgot c = %#x; want %#x", b, a.m, a.zp, a.zp+a.l, a.xp, a.xp+a.l, a.s, c, a.c) + t.Errorf("d := %v, %s(d[%d:%d], d[%d:%d], %d)\n\tgot c = %#x; want %#x", a.d, a.m, a.zp, a.zp+a.l, a.xp, a.xp+a.l, a.s, c, a.c) } } -- cgit v1.2.3-54-g00ecf From 91a52de5274a13fcaab68c0a78115eff632f68fc Mon Sep 17 00:00:00 2001 From: Katie Hockman Date: Tue, 25 Aug 2020 11:30:32 -0400 Subject: crypto/x509: fix duplicate import Updates dave/dst#45. Change-Id: I165e6b3d002407a33908bf90a66ad01f8003b260 Reviewed-on: https://go-review.googlesource.com/c/go/+/250497 TryBot-Result: Gobot Gobot Reviewed-by: Filippo Valsorda --- src/crypto/x509/x509.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 8ce57fb1ec..49ac059a0e 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -14,7 +14,6 @@ import ( "crypto/elliptic" "crypto/rsa" "crypto/sha1" - _ "crypto/sha1" _ "crypto/sha256" _ "crypto/sha512" "crypto/x509/pkix" -- cgit v1.2.3-54-g00ecf From 5e1e8c4c9f99da52419c99e618425794102c9769 Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Thu, 7 May 2020 21:12:21 +0000 Subject: net/http: fix data race due to writeLoop goroutine left running Fix a data race for clients that mutate requests after receiving a response error which is caused by the writeLoop goroutine left running, this can be seen on canceled requests. Fixes #37669 Change-Id: I0e0e4fd63266326b32587d8596456760bf848b13 Reviewed-on: https://go-review.googlesource.com/c/go/+/232799 Reviewed-by: Bryan C. Mills Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot --- src/net/http/transport.go | 10 ++++- src/net/http/transport_test.go | 92 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/src/net/http/transport.go b/src/net/http/transport.go index d37b52b13d..05ff3ba1c2 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -1963,6 +1963,15 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return nil } + // Wait for the writeLoop goroutine to terminate to avoid data + // races on callers who mutate the request on failure. + // + // When resc in pc.roundTrip and hence rc.ch receives a responseAndError + // with a non-nil error it implies that the persistConn is either closed + // or closing. Waiting on pc.writeLoopDone is hence safe as all callers + // close closech which in turn ensures writeLoop returns. + <-pc.writeLoopDone + // If the request was canceled, that's better than network // failures that were likely the result of tearing down the // connection. @@ -1988,7 +1997,6 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return err } if pc.isBroken() { - <-pc.writeLoopDone if pc.nwrite == startBytesWritten { return nothingWrittenError{err} } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 2d9ca10bf0..29d1ec3f46 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -25,6 +25,7 @@ import ( "io" "io/ioutil" "log" + mrand "math/rand" "net" . "net/http" "net/http/httptest" @@ -6284,3 +6285,94 @@ func TestTransportRejectsSignInContentLength(t *testing.T) { t.Fatalf("Error mismatch\nGot: %q\nWanted substring: %q", got, want) } } + +// dumpConn is a net.Conn which writes to Writer and reads from Reader +type dumpConn struct { + io.Writer + io.Reader +} + +func (c *dumpConn) Close() error { return nil } +func (c *dumpConn) LocalAddr() net.Addr { return nil } +func (c *dumpConn) RemoteAddr() net.Addr { return nil } +func (c *dumpConn) SetDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } + +// delegateReader is a reader that delegates to another reader, +// once it arrives on a channel. +type delegateReader struct { + c chan io.Reader + r io.Reader // nil until received from c +} + +func (r *delegateReader) Read(p []byte) (int, error) { + if r.r == nil { + r.r = <-r.c + } + return r.r.Read(p) +} + +func testTransportRace(req *Request) { + save := req.Body + pr, pw := io.Pipe() + defer pr.Close() + defer pw.Close() + dr := &delegateReader{c: make(chan io.Reader)} + + t := &Transport{ + Dial: func(net, addr string) (net.Conn, error) { + return &dumpConn{pw, dr}, nil + }, + } + defer t.CloseIdleConnections() + + quitReadCh := make(chan struct{}) + // Wait for the request before replying with a dummy response: + go func() { + defer close(quitReadCh) + + req, err := ReadRequest(bufio.NewReader(pr)) + if err == nil { + // Ensure all the body is read; otherwise + // we'll get a partial dump. + io.Copy(ioutil.Discard, req.Body) + req.Body.Close() + } + select { + case dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n"): + case quitReadCh <- struct{}{}: + } + }() + + t.RoundTrip(req) + + // Ensure the reader returns before we reset req.Body to prevent + // a data race on req.Body. + pw.Close() + <-quitReadCh + + req.Body = save +} + +// Issue 37669 +// Test that a cancellation doesn't result in a data race due to the writeLoop +// goroutine being left running, if the caller mutates the processed Request +// upon completion. +func TestErrorWriteLoopRace(t *testing.T) { + for i := 0; i < 1000; i++ { + ctx, cancel := context.WithCancel(context.Background()) + r := bytes.NewBuffer(make([]byte, 10000)) + delay := time.Duration(mrand.Intn(5)) * time.Millisecond + go func() { + time.Sleep(delay) + cancel() + }() + req, err := NewRequestWithContext(ctx, MethodPost, "http://example.com", r) + if err != nil { + t.Fatal(err) + } + + testTransportRace(req) + } +} -- cgit v1.2.3-54-g00ecf From 8d31ca255bd6b00d04f1673d26110b702e96662b Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 25 Aug 2020 18:01:27 +0000 Subject: Revert "net/http: fix data race due to writeLoop goroutine left running" This reverts CL 232799. Reason for revert: net/http test is failing on all longtest builders. Change-Id: I4694e34f35419bab2d0b45fa6d8c3ac2aa1f51a0 Reviewed-on: https://go-review.googlesource.com/c/go/+/250597 Run-TryBot: Bryan C. Mills Reviewed-by: Dmitri Shuralyov Reviewed-by: Katie Hockman TryBot-Result: Gobot Gobot --- src/net/http/transport.go | 10 +---- src/net/http/transport_test.go | 92 ------------------------------------------ 2 files changed, 1 insertion(+), 101 deletions(-) diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 05ff3ba1c2..d37b52b13d 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -1963,15 +1963,6 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return nil } - // Wait for the writeLoop goroutine to terminate to avoid data - // races on callers who mutate the request on failure. - // - // When resc in pc.roundTrip and hence rc.ch receives a responseAndError - // with a non-nil error it implies that the persistConn is either closed - // or closing. Waiting on pc.writeLoopDone is hence safe as all callers - // close closech which in turn ensures writeLoop returns. - <-pc.writeLoopDone - // If the request was canceled, that's better than network // failures that were likely the result of tearing down the // connection. @@ -1997,6 +1988,7 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return err } if pc.isBroken() { + <-pc.writeLoopDone if pc.nwrite == startBytesWritten { return nothingWrittenError{err} } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 29d1ec3f46..2d9ca10bf0 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -25,7 +25,6 @@ import ( "io" "io/ioutil" "log" - mrand "math/rand" "net" . "net/http" "net/http/httptest" @@ -6285,94 +6284,3 @@ func TestTransportRejectsSignInContentLength(t *testing.T) { t.Fatalf("Error mismatch\nGot: %q\nWanted substring: %q", got, want) } } - -// dumpConn is a net.Conn which writes to Writer and reads from Reader -type dumpConn struct { - io.Writer - io.Reader -} - -func (c *dumpConn) Close() error { return nil } -func (c *dumpConn) LocalAddr() net.Addr { return nil } -func (c *dumpConn) RemoteAddr() net.Addr { return nil } -func (c *dumpConn) SetDeadline(t time.Time) error { return nil } -func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } -func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } - -// delegateReader is a reader that delegates to another reader, -// once it arrives on a channel. -type delegateReader struct { - c chan io.Reader - r io.Reader // nil until received from c -} - -func (r *delegateReader) Read(p []byte) (int, error) { - if r.r == nil { - r.r = <-r.c - } - return r.r.Read(p) -} - -func testTransportRace(req *Request) { - save := req.Body - pr, pw := io.Pipe() - defer pr.Close() - defer pw.Close() - dr := &delegateReader{c: make(chan io.Reader)} - - t := &Transport{ - Dial: func(net, addr string) (net.Conn, error) { - return &dumpConn{pw, dr}, nil - }, - } - defer t.CloseIdleConnections() - - quitReadCh := make(chan struct{}) - // Wait for the request before replying with a dummy response: - go func() { - defer close(quitReadCh) - - req, err := ReadRequest(bufio.NewReader(pr)) - if err == nil { - // Ensure all the body is read; otherwise - // we'll get a partial dump. - io.Copy(ioutil.Discard, req.Body) - req.Body.Close() - } - select { - case dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n"): - case quitReadCh <- struct{}{}: - } - }() - - t.RoundTrip(req) - - // Ensure the reader returns before we reset req.Body to prevent - // a data race on req.Body. - pw.Close() - <-quitReadCh - - req.Body = save -} - -// Issue 37669 -// Test that a cancellation doesn't result in a data race due to the writeLoop -// goroutine being left running, if the caller mutates the processed Request -// upon completion. -func TestErrorWriteLoopRace(t *testing.T) { - for i := 0; i < 1000; i++ { - ctx, cancel := context.WithCancel(context.Background()) - r := bytes.NewBuffer(make([]byte, 10000)) - delay := time.Duration(mrand.Intn(5)) * time.Millisecond - go func() { - time.Sleep(delay) - cancel() - }() - req, err := NewRequestWithContext(ctx, MethodPost, "http://example.com", r) - if err != nil { - t.Fatal(err) - } - - testTransportRace(req) - } -} -- cgit v1.2.3-54-g00ecf From 00a053bd4b2c19b2d9680f78f4c8657fcc6f1c88 Mon Sep 17 00:00:00 2001 From: Michał Łowicki Date: Sun, 23 Aug 2020 23:53:04 +0100 Subject: testing: fix Cleanup race with Logf and Errorf Fixes #40908 Change-Id: I25561a3f18e730a50e6fbf85aa7bd85bf1b73b6e Reviewed-on: https://go-review.googlesource.com/c/go/+/250078 Reviewed-by: Tobias Klauser Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot --- src/cmd/go/testdata/script/testing_issue40908.txt | 21 +++++++++++++++++++++ src/testing/testing.go | 4 ++++ 2 files changed, 25 insertions(+) create mode 100644 src/cmd/go/testdata/script/testing_issue40908.txt diff --git a/src/cmd/go/testdata/script/testing_issue40908.txt b/src/cmd/go/testdata/script/testing_issue40908.txt new file mode 100644 index 0000000000..4939de080c --- /dev/null +++ b/src/cmd/go/testdata/script/testing_issue40908.txt @@ -0,0 +1,21 @@ +[short] skip +[!race] skip + +go test -race testrace + +-- testrace/race_test.go -- +package testrace + +import "testing" + +func TestRace(t *testing.T) { + helperDone := make(chan struct{}) + go func() { + t.Logf("Something happened before cleanup.") + close(helperDone) + }() + + t.Cleanup(func() { + <-helperDone + }) +} diff --git a/src/testing/testing.go b/src/testing/testing.go index 6fc8c4fa9f..bf83df8863 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -860,11 +860,15 @@ func (c *common) Cleanup(f func()) { c.cleanup = func() { if oldCleanup != nil { defer func() { + c.mu.Lock() c.cleanupPc = oldCleanupPc + c.mu.Unlock() oldCleanup() }() } + c.mu.Lock() c.cleanupName = callerName(0) + c.mu.Unlock() f() } var pc [maxStackLen]uintptr -- cgit v1.2.3-54-g00ecf From e3d608a8664b2cb0054f4d4706cdf911fd699b82 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 12 Jul 2020 16:20:02 -0400 Subject: go/types: factor out some methods that compute a single error In order to generate more accurate or informative error messages from the type checker, it can be helpful to interpret error messages in context. This is currently achieved in a number of ways: + Return a boolean value, and then reverse-engineer the error at the callsite (as in representable->representableConst). + Return a value causing the error (as in Checker.missingMethod), and add the error at the callsite. + Pass a "reason" string pointer to capture the error (as in Checker.assignableTo), and add the error at the callsite. + Pass a "context" string pointer, and use this when writing errors in the delegated method. In all cases, it is the responsibility of whatever code calls Checker.error* to set the operand mode to invalid. These methods are used as appropriate, depending on whether multiple errors are generated, whether additional context is needed, and whether the mere presence of an error needs to be interpreted at the callsite. However, this practice has some downsides: the plurality of error handling techniques can be a barrier to readability and composability. In this CL, we introduce Yet Another Pattern, with the hope that it can replace some or all of the existing techniques: factor out side-effect free functions that evaluate a single error, and add helpers for recording this error in the Checker. As a proof of concept this is done for Checker.representable and Checker.convertUntyped. If the general pattern does not seem appropriate for replacing some or all of the error-handling techniques listed above, we should revert to an established technique. Some internal error APIs are refactored to operate on an error, rather than a types.Error, with internal error metadata extracted using errors.As. This seemed to have negligible impact on performance, but we should be careful about actually wrapping errors: I expect that many users will expect err to be a types.Error. Change-Id: Ic5c6edcdc02768cd84e04638fad648934bcf3c17 Reviewed-on: https://go-review.googlesource.com/c/go/+/242082 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/errors.go | 39 +++++++++++++++++++++++++++++++++------ src/go/types/expr.go | 36 ++++++++++++++++++++++++------------ 2 files changed, 57 insertions(+), 18 deletions(-) diff --git a/src/go/types/errors.go b/src/go/types/errors.go index 91b077163c..88e41c5713 100644 --- a/src/go/types/errors.go +++ b/src/go/types/errors.go @@ -7,6 +7,7 @@ package types import ( + "errors" "fmt" "go/ast" "go/token" @@ -72,22 +73,33 @@ func (check *Checker) dump(format string, args ...interface{}) { fmt.Println(check.sprintf(format, args...)) } -func (check *Checker) err(pos token.Pos, msg string, soft bool) { +func (check *Checker) err(err error) { + if err == nil { + return + } + var e Error + isInternal := errors.As(err, &e) // Cheap trick: Don't report errors with messages containing // "invalid operand" or "invalid type" as those tend to be // follow-on errors which don't add useful information. Only // exclude them if these strings are not at the beginning, // and only if we have at least one error already reported. - if check.firstErr != nil && (strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0) { + isInvalidErr := isInternal && (strings.Index(e.Msg, "invalid operand") > 0 || strings.Index(e.Msg, "invalid type") > 0) + if check.firstErr != nil && isInvalidErr { return } - err := Error{check.fset, pos, msg, soft} if check.firstErr == nil { check.firstErr = err } if trace { + pos := e.Pos + msg := e.Msg + if !isInternal { + msg = err.Error() + pos = token.NoPos + } check.trace(pos, "ERROR: %s", msg) } @@ -99,15 +111,30 @@ func (check *Checker) err(pos token.Pos, msg string, soft bool) { } func (check *Checker) error(pos token.Pos, msg string) { - check.err(pos, msg, false) + check.err(Error{Fset: check.fset, Pos: pos, Msg: msg}) +} + +// newErrorf creates a new Error, but does not handle it. +func (check *Checker) newErrorf(pos token.Pos, format string, args ...interface{}) error { + return Error{ + Fset: check.fset, + Pos: pos, + Msg: check.sprintf(format, args...), + Soft: false, + } } func (check *Checker) errorf(pos token.Pos, format string, args ...interface{}) { - check.err(pos, check.sprintf(format, args...), false) + check.error(pos, check.sprintf(format, args...)) } func (check *Checker) softErrorf(pos token.Pos, format string, args ...interface{}) { - check.err(pos, check.sprintf(format, args...), true) + check.err(Error{ + Fset: check.fset, + Pos: pos, + Msg: check.sprintf(format, args...), + Soft: true, + }) } func (check *Checker) invalidAST(pos token.Pos, format string, args ...interface{}) { diff --git a/src/go/types/expr.go b/src/go/types/expr.go index d1e892a9b7..8503a521f6 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -329,8 +329,16 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c return false } -// representable checks that a constant operand is representable in the given basic type. +// representable checks that a constant operand is representable in the given +// basic type. func (check *Checker) representable(x *operand, typ *Basic) { + if err := check.isRepresentable(x, typ); err != nil { + x.mode = invalid + check.err(err) + } +} + +func (check *Checker) isRepresentable(x *operand, typ *Basic) error { assert(x.mode == constant_) if !representableConst(x.val, check, typ, &x.val) { var msg string @@ -350,9 +358,9 @@ func (check *Checker) representable(x *operand, typ *Basic) { } else { msg = "cannot convert %s to %s" } - check.errorf(x.pos(), msg, x, typ) - x.mode = invalid + return check.newErrorf(x.pos(), msg, x, typ) } + return nil } // updateExprType updates the type of x to typ and invokes itself @@ -488,10 +496,16 @@ func (check *Checker) updateExprVal(x ast.Expr, val constant.Value) { // convertUntyped attempts to set the type of an untyped value to the target type. func (check *Checker) convertUntyped(x *operand, target Type) { - if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] { - return + if err := check.canConvertUntyped(x, target); err != nil { + x.mode = invalid + check.err(err) } +} +func (check *Checker) canConvertUntyped(x *operand, target Type) error { + if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] { + return nil + } // TODO(gri) Sloppy code - clean up. This function is central // to assignment and expression checking. @@ -507,16 +521,15 @@ func (check *Checker) convertUntyped(x *operand, target Type) { } else if xkind != tkind { goto Error } - return + return nil } // typed target switch t := target.Underlying().(type) { case *Basic: if x.mode == constant_ { - check.representable(x, t) - if x.mode == invalid { - return + if err := check.isRepresentable(x, t); err != nil { + return err } // expression value may have been rounded - update if needed check.updateExprVal(x.expr, x.val) @@ -576,11 +589,10 @@ func (check *Checker) convertUntyped(x *operand, target Type) { x.typ = target check.updateExprType(x.expr, target, true) // UntypedNils are final - return + return nil Error: - check.errorf(x.pos(), "cannot convert %s to %s", x, target) - x.mode = invalid + return check.newErrorf(x.pos(), "cannot convert %s to %s", x, target) } func (check *Checker) comparison(x, y *operand, op token.Token) { -- cgit v1.2.3-54-g00ecf From 3d774611feee49aa44b4f5ed65d40497a47ad4c8 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sat, 22 Aug 2020 05:47:06 -0700 Subject: src/go.mod, net/http: update bundled and latest golang.org/x/net Updates x/net/http2 to git rev c89045814202410a2d67ec20ecf177ec77ceae7f http2: perform connection health check https://golang.org/cl/198040 (fixes #31643) http2: use ASCII space trimming for parsing Trailer header https://golang.org/cl/231437 all: update golang.org/x/crypto to v0.0.0-20200622213623-75b288015ac9 https://golang.org/cl/239700 (updates #30965) net/http2: fix erringRoundTripper https://golang.org/cl/243257 (updates #40213) also updates the vendored version of golang.org/x/net as per $ go get golang.org/x/net@c890458142 $ go mod tidy $ go mod vendor $ go generate -run bundle std Change-Id: Iea2473ef086df760144d9656f03a0218eb9da91f Reviewed-on: https://go-review.googlesource.com/c/go/+/249842 Run-TryBot: Emmanuel Odeke Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/go.mod | 2 +- src/go.sum | 4 +-- src/net/http/h2_bundle.go | 70 +++++++++++++++++++++++++++++++++++++++++++---- src/vendor/modules.txt | 2 +- 4 files changed, 68 insertions(+), 10 deletions(-) diff --git a/src/go.mod b/src/go.mod index b002f8e516..c75f74b916 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.15 require ( golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect golang.org/x/text v0.3.3-0.20200430171850-afb9336c4530 // indirect ) diff --git a/src/go.sum b/src/go.sum index 528f7e460e..dc9641be1a 100644 --- a/src/go.sum +++ b/src/go.sum @@ -2,8 +2,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 81c3671f85..463e7e8ce9 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -5629,7 +5629,7 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re var trailer Header for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { - key = CanonicalHeaderKey(strings.TrimSpace(key)) + key = CanonicalHeaderKey(textproto.TrimString(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": // Bogus. (copy of http1 rules) @@ -6606,6 +6606,19 @@ type http2Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if + // there is no other traffic on the connection, the health check will + // be performed every ReadIdleTimeout interval. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + // Defaults to 15s. + PingTimeout time.Duration + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -6629,6 +6642,14 @@ func (t *http2Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } +func (t *http2Transport) pingTimeout() time.Duration { + if t.PingTimeout == 0 { + return 15 * time.Second + } + return t.PingTimeout + +} + // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. func http2ConfigureTransport(t1 *Transport) error { @@ -7174,6 +7195,20 @@ func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2Client return cc, nil } +func (cc *http2ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + err := cc.Ping(ctx) + if err != nil { + cc.closeForLostPing() + cc.t.connPool().MarkDead(cc) + return + } +} + func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() @@ -7345,14 +7380,12 @@ func (cc *http2ClientConn) sendGoAway() error { return nil } -// Close closes the client connection immediately. -// -// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. -func (cc *http2ClientConn) Close() error { +// closes the client connection immediately. In-flight requests are interrupted. +// err is sent to streams. +func (cc *http2ClientConn) closeForError(err error) error { cc.mu.Lock() defer cc.cond.Broadcast() defer cc.mu.Unlock() - err := errors.New("http2: client connection force closed via ClientConn.Close") for id, cs := range cc.streams { select { case cs.resc <- http2resAndError{err: err}: @@ -7365,6 +7398,20 @@ func (cc *http2ClientConn) Close() error { return cc.tconn.Close() } +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *http2ClientConn) Close() error { + err := errors.New("http2: client connection force closed via ClientConn.Close") + return cc.closeForError(err) +} + +// closes the client connection immediately. In-flight requests are interrupted. +func (cc *http2ClientConn) closeForLostPing() error { + err := errors.New("http2: client connection lost") + return cc.closeForError(err) +} + const http2maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -8236,8 +8283,17 @@ func (rl *http2clientConnReadLoop) run() error { rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse gotReply := false // ever saw a HEADERS reply gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout + var t *time.Timer + if readIdleTimeout != 0 { + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) + defer t.Stop() + } for { f, err := cc.fr.ReadFrame() + if t != nil { + t.Reset(readIdleTimeout) + } if err != nil { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } @@ -8968,6 +9024,8 @@ func http2strSliceContains(ss []string, s string) bool { type http2erringRoundTripper struct{ err error } +func (rt http2erringRoundTripper) RoundTripErr() error { return rt.err } + func (rt http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { return nil, rt.err } // gzipReader wraps a response body so it can lazily diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index e687d77b4d..d1e4f28e21 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/subtle golang.org/x/crypto/poly1305 -# golang.org/x/net v0.0.0-20200707034311-ab3426394381 +# golang.org/x/net v0.0.0-20200822124328-c89045814202 ## explicit golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts -- cgit v1.2.3-54-g00ecf From 8381408048018aa2b6eec874f3161b4641191522 Mon Sep 17 00:00:00 2001 From: Michael Fraenkel Date: Thu, 16 Jul 2020 21:30:12 -0600 Subject: net/http: fix detection of Roundtrippers that always error CL 220905 added code to identify alternate transports that always error by using http2erringRoundTripper. This does not work when the transport is from another package, e.g., http2.erringRoundTripper. Expose a new method that allow detection of such a RoundTripper. Switch to an interface that is both a RoundTripper and can return the underlying error. Fixes #40213 Change-Id: I170739857ab9e99dffb5fa55c99b24b23c2f9c54 Reviewed-on: https://go-review.googlesource.com/c/go/+/243258 Reviewed-by: Emmanuel Odeke Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot --- src/net/http/omithttp2.go | 4 ---- src/net/http/transport.go | 8 ++++++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/net/http/omithttp2.go b/src/net/http/omithttp2.go index 7e2f492579..c8f5c28a59 100644 --- a/src/net/http/omithttp2.go +++ b/src/net/http/omithttp2.go @@ -32,10 +32,6 @@ type http2Transport struct { func (*http2Transport) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) } func (*http2Transport) CloseIdleConnections() {} -type http2erringRoundTripper struct{ err error } - -func (http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) } - type http2noDialH2RoundTripper struct{} func (http2noDialH2RoundTripper) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) } diff --git a/src/net/http/transport.go b/src/net/http/transport.go index d37b52b13d..c23042b1e3 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -1528,6 +1528,10 @@ func (pconn *persistConn) addTLS(name string, trace *httptrace.ClientTrace) erro return nil } +type erringRoundTripper interface { + RoundTripErr() error +} + func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *persistConn, err error) { pconn = &persistConn{ t: t, @@ -1694,9 +1698,9 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" { if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok { alt := next(cm.targetAddr, pconn.conn.(*tls.Conn)) - if e, ok := alt.(http2erringRoundTripper); ok { + if e, ok := alt.(erringRoundTripper); ok { // pconn.conn was closed by next (http2configureTransport.upgradeFn). - return nil, e.err + return nil, e.RoundTripErr() } return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil } -- cgit v1.2.3-54-g00ecf From 758ac371ab930734053ed226ac62681e62ab8eea Mon Sep 17 00:00:00 2001 From: Richard Musiol Date: Sat, 15 Aug 2020 21:15:35 +0200 Subject: misc/wasm: make wasm_exec more robust against uncommon environments JavaScript environments are quite unpredictable because bundlers add mocks for compatibility and libraries can polute the global namespace. Detect more of such situations: - Add check that require("fs") returns an object. - Fix check that require("fs") returns an non-empty object. - Add check that "module" is defined. Fixes #40730 Change-Id: I2ce65fc7db64bbbb0b60eec79a4cfe5c3fec99c0 Reviewed-on: https://go-review.googlesource.com/c/go/+/248758 Run-TryBot: Richard Musiol TryBot-Result: Gobot Gobot Reviewed-by: Dmitri Shuralyov Reviewed-by: Cherry Zhang --- misc/wasm/wasm_exec.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/misc/wasm/wasm_exec.js b/misc/wasm/wasm_exec.js index 8501ae7cd8..ef97c4e311 100644 --- a/misc/wasm/wasm_exec.js +++ b/misc/wasm/wasm_exec.js @@ -11,6 +11,7 @@ // - Node.js // - Electron // - Parcel + // - Webpack if (typeof global !== "undefined") { // global already exists @@ -28,7 +29,7 @@ if (!global.fs && global.require) { const fs = require("fs"); - if (Object.keys(fs) !== 0) { + if (typeof fs === "object" && fs !== null && Object.keys(fs).length !== 0) { global.fs = fs; } } @@ -556,6 +557,7 @@ } if ( + typeof module !== "undefined" && global.require && global.require.main === module && global.process && -- cgit v1.2.3-54-g00ecf From 03eb7e20e4d4d0a9cc3d34787049f101c9f72761 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Tue, 25 Aug 2020 06:43:27 -0700 Subject: cmd/compile: apply strong typing to all remaining s390x rewrite rules This CL applies strong aux typing to the remaining s390x rewrite rules in preparation for strong aux typing becoming the default. Passes toolstash-check on s390x. Change-Id: Id585b0db492780737818024e1b22b4837435b525 Reviewed-on: https://go-review.googlesource.com/c/go/+/250558 Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/S390X.rules | 677 +++-- src/cmd/compile/internal/ssa/rewriteS390X.go | 3723 +++++++++++++------------- 2 files changed, 2189 insertions(+), 2211 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index 5e4c436ca1..f7d391cf3a 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -641,41 +641,37 @@ (BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no) // Fold constants into instructions. -(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) -(ADDW x (MOVDconst [c])) -> (ADDWconst [int64(int32(c))] x) +(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [int32(c)] x) +(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x) -(SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c]) -(SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst x [c])) -(SUBW x (MOVDconst [c])) -> (SUBWconst x [int64(int32(c))]) -(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst x [int64(int32(c))])) +(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)]) +(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst x [int32(c)])) +(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)]) +(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst x [int32(c)])) -(MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x) -(MULLW x (MOVDconst [c])) -> (MULLWconst [int64(int32(c))] x) +(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x) +(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x) // NILF instructions leave the high 32 bits unchanged which is // equivalent to the leftmost 32 bits being set. // TODO(mundaym): modify the assembler to accept 64-bit values // and use isU32Bit(^c). (AND x (MOVDconst [c])) && is32Bit(c) && c < 0 => (ANDconst [c] x) -(AND x (MOVDconst [c])) && is32Bit(c) && c >= 0 -> (MOVWZreg (ANDWconst [int64(int32(c))] x)) -(ANDW x (MOVDconst [c])) -> (ANDWconst [int64(int32(c))] x) +(AND x (MOVDconst [c])) && is32Bit(c) && c >= 0 => (MOVWZreg (ANDWconst [int32(c)] x)) +(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x) -(ANDWconst [c] (ANDWconst [d] x)) => (ANDWconst [c & d] x) -(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c & d] x) +((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x) -(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x) -(ORW x (MOVDconst [c])) -> (ORWconst [int64(int32(c))] x) - -(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x) -(XORW x (MOVDconst [c])) -> (XORWconst [int64(int32(c))] x) +((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x) +((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x) // Constant shifts. (S(LD|RD|RAD|LW|RW|RAW) x (MOVDconst [c])) - -> (S(LD|RD|RAD|LW|RW|RAW)const x [c&63]) + => (S(LD|RD|RAD|LW|RW|RAW)const x [int8(c&63)]) // Shifts only use the rightmost 6 bits of the shift value. (S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y)) - -> (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c&63] y)) + => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [int32(c&63)] y)) (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63 => (S(LD|RD|RAD|LW|RW|RAW) x y) (SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y) @@ -686,8 +682,8 @@ (SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y) // Constant rotate generation -(RLL x (MOVDconst [c])) -> (RLLconst x [c&31]) -(RLLG x (MOVDconst [c])) -> (RLLGconst x [c&63]) +(RLL x (MOVDconst [c])) => (RLLconst x [int8(c&31)]) +(RLLG x (MOVDconst [c])) => (RLLGconst x [int8(c&63)]) (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (RLLGconst [c] x) ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (RLLGconst [c] x) @@ -697,14 +693,17 @@ ( ORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (RLLconst [c] x) (XORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (RLLconst [c] x) -(CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c]) -(CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c])) -(CMPW x (MOVDconst [c])) -> (CMPWconst x [int64(int32(c))]) -(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int32(c))])) -(CMPU x (MOVDconst [c])) && isU32Bit(c) -> (CMPUconst x [int64(int32(c))]) -(CMPU (MOVDconst [c]) x) && isU32Bit(c) -> (InvertFlags (CMPUconst x [int64(int32(c))])) -(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(int32(c))]) -(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(int32(c))])) +// Signed 64-bit comparison with immediate. +(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)]) +(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)])) + +// Unsigned 64-bit comparison with immediate. +(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)]) +(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)])) + +// Signed and unsigned 32-bit comparison with immediate. +(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)]) +(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)])) // Canonicalize the order of arguments to comparisons - helps with CSE. ((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) @@ -752,14 +751,14 @@ (SL(D|W)const x [int8(log32(-c+(-c&^(-c-1))))])) // Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them). -(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x) -(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(c+d) -> (MOVDaddr [c+d] {s} x) +(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x) +(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x) (ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB && idx.Op != OpSB => (MOVDaddridx [c] {s} ptr idx) // fold ADDconst into MOVDaddrx -(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(c+d) -> (MOVDaddridx [c+d] {s} x y) -(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(c+d) && x.Op != OpSB -> (MOVDaddridx [c+d] {s} x y) -(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(c+d) && y.Op != OpSB -> (MOVDaddridx [c+d] {s} x y) +(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) && x.Op != OpSB => (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) && y.Op != OpSB => (MOVDaddridx [c+d] {s} x y) // reverse ordering of compare instruction (LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp) @@ -799,11 +798,11 @@ // detect copysign (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR y))) => (LGDR (CPSDR y x)) -(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) && c & -1<<63 == 0 -> (LGDR (CPSDR (FMOVDconst [c]) x)) +(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) && c & -1<<63 == 0 => (LGDR (CPSDR (FMOVDconst [math.Float64frombits(uint64(c))]) x)) (OR (AND (MOVDconst [-1<<63]) (LGDR x)) (LGDR (LPDFR y))) => (LGDR (CPSDR y x)) -(OR (AND (MOVDconst [-1<<63]) (LGDR x)) (MOVDconst [c])) && c & -1<<63 == 0 -> (LGDR (CPSDR (FMOVDconst [c]) x)) -(CPSDR y (FMOVDconst [c])) && c & -1<<63 == 0 -> (LPDFR y) -(CPSDR y (FMOVDconst [c])) && c & -1<<63 != 0 -> (LNDFR y) +(OR (AND (MOVDconst [-1<<63]) (LGDR x)) (MOVDconst [c])) && c & -1<<63 == 0 => (LGDR (CPSDR (FMOVDconst [math.Float64frombits(uint64(c))]) x)) +(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y) +(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y) // absorb negations into set/clear sign bit (FNEG (LPDFR x)) => (LNDFR x) @@ -832,150 +831,150 @@ // the ADDconst get eliminated, we still have to compute the ADDconst and we now // have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one. // Nevertheless, let's do it! -(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem) -(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) -(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVHload [off1+off2] {sym} ptr mem) -(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) -(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} ptr mem) -(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} ptr mem) -(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} ptr mem) -(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem) -(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem) - -(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem) -(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) -(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem) -(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) -(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem) -(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem) - -(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ADDload [off1+off2] {sym} x ptr mem) -(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ADDWload [off1+off2] {sym} x ptr mem) -(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (MULLDload [off1+off2] {sym} x ptr mem) -(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (MULLWload [off1+off2] {sym} x ptr mem) -(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (SUBload [off1+off2] {sym} x ptr mem) -(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (SUBWload [off1+off2] {sym} x ptr mem) - -(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ANDload [off1+off2] {sym} x ptr mem) -(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ANDWload [off1+off2] {sym} x ptr mem) -(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ORload [off1+off2] {sym} x ptr mem) -(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ORWload [off1+off2] {sym} x ptr mem) -(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (XORload [off1+off2] {sym} x ptr mem) -(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (XORWload [off1+off2] {sym} x ptr mem) +(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem) +(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem) +(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem) +(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem) +(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem) +(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem) +(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem) + +(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem) +(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem) +(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem) +(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem) + +(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem) +(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem) +(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem) +(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem) +(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem) +(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem) + +(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem) +(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem) +(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem) +(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem) +(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem) +(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem) // Fold constants into stores. -(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB -> - (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB -> - (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) -(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(off) && ptr.Op != OpSB -> - (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) -(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(off) && ptr.Op != OpSB -> - (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) +(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB => + (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB => + (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB => + (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB => + (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) // Fold address offsets into constant stores. -(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(ValAndOff(sc).Off()+off) -> - (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) -(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(ValAndOff(sc).Off()+off) -> - (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) -(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(ValAndOff(sc).Off()+off) -> - (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) -(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(ValAndOff(sc).Off()+off) -> - (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) +(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) => + (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) => + (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) => + (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off()+int64(off)) => + (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) // Merge address calculations into loads and stores. // Offsets from SB must not be merged into unaligned memory accesses because // loads/stores using PC-relative addressing directly must be aligned to the // size of the target. -(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) -> - (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) -> - (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) -> - (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) - -(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) -> - (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) -> - (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) - -(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) -> - (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) -> - (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) -> - (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - -(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) - -(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) -(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) => + (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) => + (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) => + (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) + +(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) => + (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) => + (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) + +(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) => + (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) => + (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) => + (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) + +(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) + +(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) +(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) // Cannot store constant to SB directly (no 'move relative long immediate' instructions). -(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) -(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) -(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) -(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVDstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) +(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) +(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVHstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) +(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) // generating indexed loads and stores -(MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVBloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVHloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVWloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) -(FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - -(MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) -(MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) -(MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) -(MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) -(FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) -(FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVHZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVHloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVWZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVWloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVSloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) +(FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) + +(MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) +(MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVHstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) +(MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVWstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) +(MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) +(FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVSstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) +(FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) (MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVBZloadidx [off] {sym} ptr idx mem) (MOVBload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVBloadidx [off] {sym} ptr idx mem) @@ -995,53 +994,53 @@ (FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (FMOVDstoreidx [off] {sym} ptr idx val mem) // combine ADD into indexed loads and stores -(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) -(MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVBloadidx [c+d] {sym} ptr idx mem) -(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) -(MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVHloadidx [c+d] {sym} ptr idx mem) -(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) -(MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVWloadidx [c+d] {sym} ptr idx mem) -(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVDloadidx [c+d] {sym} ptr idx mem) -(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) -(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) - -(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) -(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) -(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) -(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) -(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) -(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) - -(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) -(MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVBloadidx [c+d] {sym} ptr idx mem) -(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) -(MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVHloadidx [c+d] {sym} ptr idx mem) -(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) -(MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVWloadidx [c+d] {sym} ptr idx mem) -(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVDloadidx [c+d] {sym} ptr idx mem) -(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) -(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) - -(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) -(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) -(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) -(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) -(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) -(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) +(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVBZloadidx [c+d] {sym} ptr idx mem) +(MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVBloadidx [c+d] {sym} ptr idx mem) +(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVHZloadidx [c+d] {sym} ptr idx mem) +(MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVHloadidx [c+d] {sym} ptr idx mem) +(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVWZloadidx [c+d] {sym} ptr idx mem) +(MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVWloadidx [c+d] {sym} ptr idx mem) +(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVDloadidx [c+d] {sym} ptr idx mem) +(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (FMOVSloadidx [c+d] {sym} ptr idx mem) +(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (FMOVDloadidx [c+d] {sym} ptr idx mem) + +(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVBstoreidx [c+d] {sym} ptr idx val mem) +(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVHstoreidx [c+d] {sym} ptr idx val mem) +(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVWstoreidx [c+d] {sym} ptr idx val mem) +(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVDstoreidx [c+d] {sym} ptr idx val mem) +(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (FMOVSstoreidx [c+d] {sym} ptr idx val mem) +(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (FMOVDstoreidx [c+d] {sym} ptr idx val mem) + +(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVBZloadidx [c+d] {sym} ptr idx mem) +(MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVBloadidx [c+d] {sym} ptr idx mem) +(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVHZloadidx [c+d] {sym} ptr idx mem) +(MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVHloadidx [c+d] {sym} ptr idx mem) +(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVWZloadidx [c+d] {sym} ptr idx mem) +(MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVWloadidx [c+d] {sym} ptr idx mem) +(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVDloadidx [c+d] {sym} ptr idx mem) +(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (FMOVSloadidx [c+d] {sym} ptr idx mem) +(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (FMOVDloadidx [c+d] {sym} ptr idx mem) + +(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVBstoreidx [c+d] {sym} ptr idx val mem) +(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVHstoreidx [c+d] {sym} ptr idx val mem) +(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVWstoreidx [c+d] {sym} ptr idx val mem) +(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVDstoreidx [c+d] {sym} ptr idx val mem) +(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (FMOVSstoreidx [c+d] {sym} ptr idx val mem) +(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (FMOVDstoreidx [c+d] {sym} ptr idx val mem) // MOVDaddr into MOVDaddridx -(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> - (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) -(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB -> - (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) +(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y) +(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB => + (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y) // Absorb InvertFlags into branches. (BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no) // Constant comparisons. -(CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ) -(CMPconst (MOVDconst [x]) [y]) && x (FlagLT) -(CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT) +(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ) +(CMPconst (MOVDconst [x]) [y]) && x (FlagLT) +(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT) (CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ) (CMPUconst (MOVDconst [x]) [y]) && uint64(x) (FlagLT) (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT) @@ -1158,31 +1157,31 @@ // Convert constant subtracts to constant adds. (SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x) -(SUBWconst [c] x) -> (ADDWconst [int64(int32(-c))] x) +(SUBWconst [c] x) => (ADDWconst [-int32(c)] x) // generic constant folding // TODO: more of this -(ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d]) -(ADDWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c+d))]) -(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x) -(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int32(c+d))] x) -(SUBconst (MOVDconst [d]) [c]) -> (MOVDconst [d-c]) -(SUBconst (SUBconst x [d]) [c]) && is32Bit(-c-d) -> (ADDconst [-c-d] x) +(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d]) +(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d]) +(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x) +(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x) +(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)]) +(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x) (SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)]) (SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)]) (NEG (MOVDconst [c])) => (MOVDconst [-c]) (NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))]) -(MULLDconst [c] (MOVDconst [d])) -> (MOVDconst [c*d]) -(MULLWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c*d))]) +(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d]) +(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))]) (AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d]) (ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d]) -(ANDWconst [c] (MOVDconst [d])) -> (MOVDconst [c&d]) +(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d]) (OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d]) (ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d]) -(ORWconst [c] (MOVDconst [d])) -> (MOVDconst [c|d]) +(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d]) (XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d]) (XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d]) -(XORWconst [c] (MOVDconst [d])) -> (MOVDconst [c^d]) +(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d]) (LoweredRound32F x:(FMOVSconst)) => x (LoweredRound64F x:(FMOVDconst)) => x @@ -1199,19 +1198,19 @@ (XOR x x) => (MOVDconst [0]) (XORW x x) => (MOVDconst [0]) (NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x) -(MOVBZreg (ANDWconst [m] x)) -> (MOVWZreg (ANDWconst [int64( uint8(m))] x)) -(MOVHZreg (ANDWconst [m] x)) -> (MOVWZreg (ANDWconst [int64(uint16(m))] x)) -(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 -> (MOVWZreg (ANDWconst [int64( uint8(m))] x)) -(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 -> (MOVWZreg (ANDWconst [int64(uint16(m))] x)) +(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst [int32( uint8(m))] x)) +(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst [int32(uint16(m))] x)) +(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst [int32( uint8(m))] x)) +(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst [int32(uint16(m))] x)) // carry flag generation // (only constant fold carry of zero) (Select1 (ADDCconst (MOVDconst [c]) [d])) - && uint64(c+d) >= uint64(c) && c+d == 0 - -> (FlagEQ) + && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0 + => (FlagEQ) (Select1 (ADDCconst (MOVDconst [c]) [d])) - && uint64(c+d) >= uint64(c) && c+d != 0 - -> (FlagLT) + && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0 + => (FlagLT) // borrow flag generation // (only constant fold borrow of zero) @@ -1225,8 +1224,8 @@ // add with carry (ADDE x y (FlagEQ)) => (ADDC x y) (ADDE x y (FlagLT)) => (ADDC x y) -(ADDC x (MOVDconst [c])) && is16Bit(c) -> (ADDCconst x [c]) -(Select0 (ADDCconst (MOVDconst [c]) [d])) -> (MOVDconst [c+d]) +(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)]) +(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)]) // subtract with borrow (SUBE x y (FlagGT)) => (SUBC x y) @@ -1256,14 +1255,12 @@ (C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow) // fused multiply-add -(Select0 (F(ADD|SUB) (FMUL y z) x)) -> (FM(ADD|SUB) x y z) -(Select0 (F(ADDS|SUBS) (FMULS y z) x)) -> (FM(ADDS|SUBS) x y z) +(Select0 (F(ADD|SUB) (FMUL y z) x)) => (FM(ADD|SUB) x y z) +(Select0 (F(ADDS|SUBS) (FMULS y z) x)) => (FM(ADDS|SUBS) x y z) // Convert floating point comparisons against zero into 'load and test' instructions. -(FCMP x (FMOVDconst [c])) && auxTo64F(c) == 0 -> (LTDBR x) -(FCMPS x (FMOVSconst [c])) && auxTo32F(c) == 0 -> (LTEBR x) -(FCMP (FMOVDconst [c]) x) && auxTo64F(c) == 0 -> (InvertFlags (LTDBR x)) -(FCMPS (FMOVSconst [c]) x) && auxTo32F(c) == 0 -> (InvertFlags (LTEBR x)) +(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x) +(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR x)) // FSUB, FSUBS, FADD, FADDS now produce a condition code representing the // comparison of the result with 0.0. If a compare with zero instruction @@ -1274,30 +1271,30 @@ // but moving the flag generating value to a different block seems to // increase the likelihood that the flags value will have to be regenerated // by flagalloc which is not what we want. -(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block -> (Select1 x) -(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block -> (Select1 x) +(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x) +(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x) // Fold memory operations into operations. // Exclude global data (SB) because these instructions cannot handle relative addresses. // TODO(mundaym): indexed versions of these? ((ADD|SUB|MULLD|AND|OR|XOR) x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB - && is20Bit(off) + && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) - -> ((ADD|SUB|MULLD|AND|OR|XOR)load [off] {sym} x ptr mem) + => ((ADD|SUB|MULLD|AND|OR|XOR)load [off] {sym} x ptr mem) ((ADD|SUB|MULL|AND|OR|XOR)W x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB - && is20Bit(off) + && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) - -> ((ADD|SUB|MULL|AND|OR|XOR)Wload [off] {sym} x ptr mem) + => ((ADD|SUB|MULL|AND|OR|XOR)Wload [off] {sym} x ptr mem) ((ADD|SUB|MULL|AND|OR|XOR)W x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB - && is20Bit(off) + && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) - -> ((ADD|SUB|MULL|AND|OR|XOR)Wload [off] {sym} x ptr mem) + => ((ADD|SUB|MULL|AND|OR|XOR)Wload [off] {sym} x ptr mem) // Combine constant stores into larger (unaligned) stores. // Avoid SB because constant stores to relative offsets are @@ -1305,21 +1302,21 @@ (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) && p.Op != OpSB && x.Uses == 1 - && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() + && a.Off() + 1 == c.Off() && clobber(x) - -> (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem) + => (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem) (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) && p.Op != OpSB && x.Uses == 1 - && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && a.Off() + 2 == c.Off() && clobber(x) - -> (MOVWstore [ValAndOff(a).Off()] {s} p (MOVDconst [int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16))]) mem) + => (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) && p.Op != OpSB && x.Uses == 1 - && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && a.Off() + 4 == c.Off() && clobber(x) - -> (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem) + => (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem) // Combine stores into larger (unaligned) stores. // It doesn't work on global data (based on SB) because stores with relative addressing @@ -1328,93 +1325,93 @@ && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHstore [i-1] {s} p w mem) + => (MOVHstore [i-1] {s} p w mem) (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHstore [i-1] {s} p w0 mem) + => (MOVHstore [i-1] {s} p w0 mem) (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHstore [i-1] {s} p w mem) + => (MOVHstore [i-1] {s} p w mem) (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHstore [i-1] {s} p w0 mem) + => (MOVHstore [i-1] {s} p w0 mem) (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVWstore [i-2] {s} p w mem) + => (MOVWstore [i-2] {s} p w mem) (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVWstore [i-2] {s} p w0 mem) + => (MOVWstore [i-2] {s} p w0 mem) (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVWstore [i-2] {s} p w mem) + => (MOVWstore [i-2] {s} p w mem) (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVWstore [i-2] {s} p w0 mem) + => (MOVWstore [i-2] {s} p w0 mem) (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVDstore [i-4] {s} p w mem) + => (MOVDstore [i-4] {s} p w mem) (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVDstore [i-4] {s} p w0 mem) + => (MOVDstore [i-4] {s} p w0 mem) (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVHstoreidx [i-1] {s} p idx w mem) + => (MOVHstoreidx [i-1] {s} p idx w mem) (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVHstoreidx [i-1] {s} p idx w0 mem) + => (MOVHstoreidx [i-1] {s} p idx w0 mem) (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVHstoreidx [i-1] {s} p idx w mem) + => (MOVHstoreidx [i-1] {s} p idx w mem) (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVHstoreidx [i-1] {s} p idx w0 mem) + => (MOVHstoreidx [i-1] {s} p idx w0 mem) (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstoreidx [i-2] {s} p idx w mem) + => (MOVWstoreidx [i-2] {s} p idx w mem) (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstoreidx [i-2] {s} p idx w0 mem) + => (MOVWstoreidx [i-2] {s} p idx w0 mem) (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstoreidx [i-2] {s} p idx w mem) + => (MOVWstoreidx [i-2] {s} p idx w mem) (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstoreidx [i-2] {s} p idx w0 mem) + => (MOVWstoreidx [i-2] {s} p idx w0 mem) (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVDstoreidx [i-4] {s} p idx w mem) + => (MOVDstoreidx [i-4] {s} p idx w mem) (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVDstoreidx [i-4] {s} p idx w0 mem) + => (MOVDstoreidx [i-4] {s} p idx w0 mem) // Combine stores into larger (unaligned) stores with the bytes reversed (little endian). // Store-with-bytes-reversed instructions do not support relative memory addresses, @@ -1423,87 +1420,87 @@ && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHBRstore [i-1] {s} p w mem) + => (MOVHBRstore [i-1] {s} p w mem) (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHBRstore [i-1] {s} p w0 mem) + => (MOVHBRstore [i-1] {s} p w0 mem) (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHBRstore [i-1] {s} p w mem) + => (MOVHBRstore [i-1] {s} p w mem) (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem)) && p.Op != OpSB && x.Uses == 1 && clobber(x) - -> (MOVHBRstore [i-1] {s} p w0 mem) + => (MOVHBRstore [i-1] {s} p w0 mem) (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstore [i-2] {s} p w mem) + => (MOVWBRstore [i-2] {s} p w mem) (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstore [i-2] {s} p w0 mem) + => (MOVWBRstore [i-2] {s} p w0 mem) (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstore [i-2] {s} p w mem) + => (MOVWBRstore [i-2] {s} p w mem) (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstore [i-2] {s} p w0 mem) + => (MOVWBRstore [i-2] {s} p w0 mem) (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem)) && x.Uses == 1 && clobber(x) - -> (MOVDBRstore [i-4] {s} p w mem) + => (MOVDBRstore [i-4] {s} p w mem) (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVDBRstore [i-4] {s} p w0 mem) + => (MOVDBRstore [i-4] {s} p w0 mem) (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) && x.Uses == 1 && clobber(x) - -> (MOVHBRstoreidx [i-1] {s} p idx w mem) + => (MOVHBRstoreidx [i-1] {s} p idx w mem) (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVHBRstoreidx [i-1] {s} p idx w0 mem) + => (MOVHBRstoreidx [i-1] {s} p idx w0 mem) (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) && x.Uses == 1 && clobber(x) - -> (MOVHBRstoreidx [i-1] {s} p idx w mem) + => (MOVHBRstoreidx [i-1] {s} p idx w mem) (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVHBRstoreidx [i-1] {s} p idx w0 mem) + => (MOVHBRstoreidx [i-1] {s} p idx w0 mem) (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstoreidx [i-2] {s} p idx w mem) + => (MOVWBRstoreidx [i-2] {s} p idx w mem) (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstoreidx [i-2] {s} p idx w0 mem) + => (MOVWBRstoreidx [i-2] {s} p idx w0 mem) (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstoreidx [i-2] {s} p idx w mem) + => (MOVWBRstoreidx [i-2] {s} p idx w mem) (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWBRstoreidx [i-2] {s} p idx w0 mem) + => (MOVWBRstoreidx [i-2] {s} p idx w0 mem) (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) && x.Uses == 1 && clobber(x) - -> (MOVDBRstoreidx [i-4] {s} p idx w mem) + => (MOVDBRstoreidx [i-4] {s} p idx w mem) (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVDBRstoreidx [i-4] {s} p idx w0 mem) + => (MOVDBRstoreidx [i-4] {s} p idx w0 mem) // Combining byte loads into larger (unaligned) loads. @@ -1518,7 +1515,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) (OR x1:(MOVBZload [i1] {s} p mem) sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem))) @@ -1529,7 +1526,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) (ORW x1:(MOVHZload [i1] {s} p mem) sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem))) @@ -1540,7 +1537,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) (OR x1:(MOVHZload [i1] {s} p mem) sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem))) @@ -1551,7 +1548,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) (OR x1:(MOVWZload [i1] {s} p mem) sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem))) @@ -1562,7 +1559,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) @@ -1579,7 +1576,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) + => @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) @@ -1596,7 +1593,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) @@ -1613,7 +1610,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) // Big-endian indexed loads @@ -1626,7 +1623,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) (OR x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) @@ -1637,7 +1634,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) (ORW x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) @@ -1648,7 +1645,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) (OR x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) @@ -1659,7 +1656,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) (OR x1:(MOVWZloadidx [i1] {s} p idx mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) @@ -1670,7 +1667,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) @@ -1687,7 +1684,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + => @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) @@ -1704,7 +1701,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) @@ -1721,7 +1718,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) // Little-endian loads @@ -1734,7 +1731,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) + => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) (OR x0:(MOVBZload [i0] {s} p mem) sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem))) @@ -1745,7 +1742,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) + => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) (ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) @@ -1757,7 +1754,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) (OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) @@ -1769,7 +1766,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) + => @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) (OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem)))) @@ -1781,7 +1778,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) @@ -1799,7 +1796,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) + => @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) @@ -1817,7 +1814,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) @@ -1836,7 +1833,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) // Little-endian indexed loads @@ -1849,7 +1846,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) + => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) (OR x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) @@ -1860,7 +1857,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) + => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) @@ -1872,7 +1869,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) @@ -1884,7 +1881,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) + => @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) @@ -1896,7 +1893,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) + => @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) @@ -1914,7 +1911,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + => @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) @@ -1932,7 +1929,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) @@ -1951,53 +1948,53 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) // Combine stores into store multiples. // 32-bit (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) && p.Op != OpSB && x.Uses == 1 - && is20Bit(i-4) + && is20Bit(int64(i)-4) && clobber(x) - -> (STM2 [i-4] {s} p w0 w1 mem) + => (STM2 [i-4] {s} p w0 w1 mem) (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) && x.Uses == 1 - && is20Bit(i-8) + && is20Bit(int64(i)-8) && clobber(x) - -> (STM3 [i-8] {s} p w0 w1 w2 mem) + => (STM3 [i-8] {s} p w0 w1 w2 mem) (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) && x.Uses == 1 - && is20Bit(i-12) + && is20Bit(int64(i)-12) && clobber(x) - -> (STM4 [i-12] {s} p w0 w1 w2 w3 mem) + => (STM4 [i-12] {s} p w0 w1 w2 w3 mem) (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem)) && x.Uses == 1 - && is20Bit(i-8) + && is20Bit(int64(i)-8) && clobber(x) - -> (STM4 [i-8] {s} p w0 w1 w2 w3 mem) + => (STM4 [i-8] {s} p w0 w1 w2 w3 mem) // 64-bit (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) && p.Op != OpSB && x.Uses == 1 - && is20Bit(i-8) + && is20Bit(int64(i)-8) && clobber(x) - -> (STMG2 [i-8] {s} p w0 w1 mem) + => (STMG2 [i-8] {s} p w0 w1 mem) (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) && x.Uses == 1 - && is20Bit(i-16) + && is20Bit(int64(i)-16) && clobber(x) - -> (STMG3 [i-16] {s} p w0 w1 w2 mem) + => (STMG3 [i-16] {s} p w0 w1 w2 mem) (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) && x.Uses == 1 - && is20Bit(i-24) + && is20Bit(int64(i)-24) && clobber(x) - -> (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) + => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem)) && x.Uses == 1 - && is20Bit(i-16) + && is20Bit(int64(i)-16) && clobber(x) - -> (STMG4 [i-16] {s} p w0 w1 w2 w3 mem) + => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem) // Convert 32-bit store multiples into 64-bit stores. -(STM2 [i] {s} p (SRDconst [32] x) x mem) -> (MOVDstore [i] {s} p x mem) +(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 536f8db320..2dba8163bc 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -3,6 +3,7 @@ package ssa +import "math" import "cmd/compile/internal/types" import "cmd/internal/obj/s390x" @@ -5281,19 +5282,19 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v_0 := v.Args[0] // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) - // result: (ADDconst [c] x) + // result: (ADDconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpS390XADDconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -5362,7 +5363,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { break } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -5372,17 +5373,17 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { if g.Op != OpS390XMOVDload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XADDload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -5395,19 +5396,19 @@ func rewriteValueS390X_OpS390XADDC(v *Value) bool { v_0 := v.Args[0] // match: (ADDC x (MOVDconst [c])) // cond: is16Bit(c) - // result: (ADDCconst x [c]) + // result: (ADDCconst x [int16(c)]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is16Bit(c)) { continue } v.reset(OpS390XADDCconst) - v.AuxInt = c + v.AuxInt = int16ToAuxInt(int16(c)) v.AddArg(x) return true } @@ -5482,16 +5483,16 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDW x (MOVDconst [c])) - // result: (ADDWconst [int64(int32(c))] x) + // result: (ADDWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XADDWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -5537,7 +5538,7 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { break } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -5547,24 +5548,24 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { if g.Op != OpS390XMOVWload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XADDWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } break } // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -5574,17 +5575,17 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { if g.Op != OpS390XMOVWZload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XADDWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -5607,28 +5608,28 @@ func rewriteValueS390X_OpS390XADDWconst(v *Value) bool { return true } // match: (ADDWconst [c] (MOVDconst [d])) - // result: (MOVDconst [int64(int32(c+d))]) + // result: (MOVDconst [int64(c)+d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int64ToAuxInt(int64(c) + d) return true } // match: (ADDWconst [c] (ADDWconst [d] x)) - // result: (ADDWconst [int64(int32(c+d))] x) + // result: (ADDWconst [int32(c+d)] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XADDWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpS390XADDWconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(int32(c + d)) v.AddArg(x) return true } @@ -5639,47 +5640,47 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (ADDWload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XADDWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ADDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XADDWload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -5688,63 +5689,63 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool { func rewriteValueS390X_OpS390XADDconst(v *Value) bool { v_0 := v.Args[0] // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) - // cond: ((c+d)&1 == 0) && is32Bit(c+d) + // cond: ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) // result: (MOVDaddr [c+d] {s} x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDaddr { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) x := v_0.Args[0] - if x.Op != OpSB || !(((c+d)&1 == 0) && is32Bit(c+d)) { + if x.Op != OpSB || !(((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))) { break } v.reset(OpS390XMOVDaddr) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg(x) return true } // match: (ADDconst [c] (MOVDaddr [d] {s} x)) - // cond: x.Op != OpSB && is20Bit(c+d) + // cond: x.Op != OpSB && is20Bit(int64(c)+int64(d)) // result: (MOVDaddr [c+d] {s} x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDaddr { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) x := v_0.Args[0] - if !(x.Op != OpSB && is20Bit(c+d)) { + if !(x.Op != OpSB && is20Bit(int64(c)+int64(d))) { break } v.reset(OpS390XMOVDaddr) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg(x) return true } // match: (ADDconst [c] (MOVDaddridx [d] {s} x y)) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDaddridx [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDaddridx { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XMOVDaddridx) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -5759,32 +5760,32 @@ func rewriteValueS390X_OpS390XADDconst(v *Value) bool { return true } // match: (ADDconst [c] (MOVDconst [d])) - // result: (MOVDconst [c+d]) + // result: (MOVDconst [int64(c)+d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(int64(c) + d) return true } // match: (ADDconst [c] (ADDconst [d] x)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (ADDconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpS390XADDconst) - v.AuxInt = c + d + v.AuxInt = int32ToAuxInt(c + d) v.AddArg(x) return true } @@ -5819,47 +5820,47 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool { return true } // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (ADDload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XADDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ADDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XADDload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -5892,20 +5893,20 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { } // match: (AND x (MOVDconst [c])) // cond: is32Bit(c) && c >= 0 - // result: (MOVWZreg (ANDWconst [int64(int32(c))] x)) + // result: (MOVWZreg (ANDWconst [int32(c)] x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c) && c >= 0) { continue } v.reset(OpS390XMOVWZreg) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(int32(c)) + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -6001,7 +6002,7 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -6011,17 +6012,17 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { if g.Op != OpS390XMOVDload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XANDload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -6033,16 +6034,16 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ANDW x (MOVDconst [c])) - // result: (ANDWconst [int64(int32(c))] x) + // result: (ANDWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XANDWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -6059,7 +6060,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -6069,24 +6070,24 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { if g.Op != OpS390XMOVWload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XANDWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } break } // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -6096,17 +6097,17 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { if g.Op != OpS390XMOVWZload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XANDWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -6117,7 +6118,7 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { func rewriteValueS390X_OpS390XANDWconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDWconst [c] (ANDWconst [d] x)) - // result: (ANDWconst [c & d] x) + // result: (ANDWconst [c&d] x) for { c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XANDWconst { @@ -6177,15 +6178,15 @@ func rewriteValueS390X_OpS390XANDWconst(v *Value) bool { return true } // match: (ANDWconst [c] (MOVDconst [d])) - // result: (MOVDconst [c&d]) + // result: (MOVDconst [int64(c)&d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = c & d + v.AuxInt = int64ToAuxInt(int64(c) & d) return true } return false @@ -6195,47 +6196,47 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (ANDWload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XANDWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ANDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XANDWload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -6244,7 +6245,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool { func rewriteValueS390X_OpS390XANDconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDconst [c] (ANDconst [d] x)) - // result: (ANDconst [c & d] x) + // result: (ANDconst [c&d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpS390XANDconst { @@ -6320,47 +6321,47 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool { return true } // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (ANDload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XANDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ANDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XANDload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -6372,36 +6373,36 @@ func rewriteValueS390X_OpS390XCMP(v *Value) bool { b := v.Block // match: (CMP x (MOVDconst [c])) // cond: is32Bit(c) - // result: (CMPconst x [c]) + // result: (CMPconst x [int32(c)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { break } v.reset(OpS390XCMPconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (CMP (MOVDconst [c]) x) // cond: is32Bit(c) - // result: (InvertFlags (CMPconst x [c])) + // result: (InvertFlags (CMPconst x [int32(c)])) for { if v_0.Op != OpS390XMOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { break } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags) - v0.AuxInt = c + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -6429,36 +6430,36 @@ func rewriteValueS390X_OpS390XCMPU(v *Value) bool { b := v.Block // match: (CMPU x (MOVDconst [c])) // cond: isU32Bit(c) - // result: (CMPUconst x [int64(int32(c))]) + // result: (CMPUconst x [int32(c)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isU32Bit(c)) { break } v.reset(OpS390XCMPUconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (CMPU (MOVDconst [c]) x) // cond: isU32Bit(c) - // result: (InvertFlags (CMPUconst x [int64(int32(c))])) + // result: (InvertFlags (CMPUconst x [int32(c)])) for { if v_0.Op != OpS390XMOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(isU32Bit(c)) { break } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) - v0.AuxInt = int64(int32(c)) + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -6656,29 +6657,29 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVDconst [c])) - // result: (CMPWconst x [int64(int32(c))]) + // result: (CMPWconst x [int32(c)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XCMPWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (CMPW (MOVDconst [c]) x) - // result: (InvertFlags (CMPWconst x [int64(int32(c))])) + // result: (InvertFlags (CMPWconst x [int32(c)])) for { if v_0.Op != OpS390XMOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags) - v0.AuxInt = int64(int32(c)) + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -6753,29 +6754,29 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (CMPWU x (MOVDconst [c])) - // result: (CMPWUconst x [int64(int32(c))]) + // result: (CMPWUconst x [int32(c)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XCMPWUconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (CMPWU (MOVDconst [c]) x) - // result: (InvertFlags (CMPWUconst x [int64(int32(c))])) + // result: (InvertFlags (CMPWUconst x [int32(c)])) for { if v_0.Op != OpS390XMOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) - v0.AuxInt = int64(int32(c)) + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -7120,45 +7121,45 @@ func rewriteValueS390X_OpS390XCMPWconst(v *Value) bool { func rewriteValueS390X_OpS390XCMPconst(v *Value) bool { v_0 := v.Args[0] // match: (CMPconst (MOVDconst [x]) [y]) - // cond: x==y + // cond: x==int64(y) // result: (FlagEQ) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - x := v_0.AuxInt - if !(x == y) { + x := auxIntToInt64(v_0.AuxInt) + if !(x == int64(y)) { break } v.reset(OpS390XFlagEQ) return true } // match: (CMPconst (MOVDconst [x]) [y]) - // cond: xy + // cond: x>int64(y) // result: (FlagGT) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - x := v_0.AuxInt - if !(x > y) { + x := auxIntToInt64(v_0.AuxInt) + if !(x > int64(y)) { break } v.reset(OpS390XFlagGT) @@ -7310,15 +7311,15 @@ func rewriteValueS390X_OpS390XCPSDR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CPSDR y (FMOVDconst [c])) - // cond: c & -1<<63 == 0 + // cond: !math.Signbit(c) // result: (LPDFR y) for { y := v_0 if v_1.Op != OpS390XFMOVDconst { break } - c := v_1.AuxInt - if !(c&-1<<63 == 0) { + c := auxIntToFloat64(v_1.AuxInt) + if !(!math.Signbit(c)) { break } v.reset(OpS390XLPDFR) @@ -7326,15 +7327,15 @@ func rewriteValueS390X_OpS390XCPSDR(v *Value) bool { return true } // match: (CPSDR y (FMOVDconst [c])) - // cond: c & -1<<63 != 0 + // cond: math.Signbit(c) // result: (LNDFR y) for { y := v_0 if v_1.Op != OpS390XFMOVDconst { break } - c := v_1.AuxInt - if !(c&-1<<63 != 0) { + c := auxIntToFloat64(v_1.AuxInt) + if !(math.Signbit(c)) { break } v.reset(OpS390XLNDFR) @@ -7347,34 +7348,24 @@ func rewriteValueS390X_OpS390XFCMP(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FCMP x (FMOVDconst [c])) - // cond: auxTo64F(c) == 0 + // match: (FCMP x (FMOVDconst [0.0])) // result: (LTDBR x) for { x := v_0 - if v_1.Op != OpS390XFMOVDconst { - break - } - c := v_1.AuxInt - if !(auxTo64F(c) == 0) { + if v_1.Op != OpS390XFMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0.0 { break } v.reset(OpS390XLTDBR) v.AddArg(x) return true } - // match: (FCMP (FMOVDconst [c]) x) - // cond: auxTo64F(c) == 0 + // match: (FCMP (FMOVDconst [0.0]) x) // result: (InvertFlags (LTDBR x)) for { - if v_0.Op != OpS390XFMOVDconst { + if v_0.Op != OpS390XFMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0.0 { break } - c := v_0.AuxInt x := v_1 - if !(auxTo64F(c) == 0) { - break - } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XLTDBR, v.Type) v0.AddArg(x) @@ -7387,34 +7378,24 @@ func rewriteValueS390X_OpS390XFCMPS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FCMPS x (FMOVSconst [c])) - // cond: auxTo32F(c) == 0 + // match: (FCMPS x (FMOVSconst [0.0])) // result: (LTEBR x) for { x := v_0 - if v_1.Op != OpS390XFMOVSconst { - break - } - c := v_1.AuxInt - if !(auxTo32F(c) == 0) { + if v_1.Op != OpS390XFMOVSconst || auxIntToFloat32(v_1.AuxInt) != 0.0 { break } v.reset(OpS390XLTEBR) v.AddArg(x) return true } - // match: (FCMPS (FMOVSconst [c]) x) - // cond: auxTo32F(c) == 0 + // match: (FCMPS (FMOVSconst [0.0]) x) // result: (InvertFlags (LTEBR x)) for { - if v_0.Op != OpS390XFMOVSconst { + if v_0.Op != OpS390XFMOVSconst || auxIntToFloat32(v_0.AuxInt) != 0.0 { break } - c := v_0.AuxInt x := v_1 - if !(auxTo32F(c) == 0) { - break - } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XLTEBR, v.Type) v0.AddArg(x) @@ -7464,68 +7445,68 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { return true } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (FMOVDload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XFMOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVDloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -7563,46 +7544,46 @@ func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XADDconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -7613,71 +7594,71 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (FMOVDstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XFMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg4(ptr, idx, val, mem) return true } @@ -7717,48 +7698,48 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XADDconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } @@ -7786,68 +7767,68 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { return true } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (FMOVSload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XFMOVSload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVSloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVSloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -7885,46 +7866,46 @@ func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVSloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XADDconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVSloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -7935,71 +7916,71 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (FMOVSstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XFMOVSstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVSstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVSstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg4(ptr, idx, val, mem) return true } @@ -8039,48 +8020,48 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XADDconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } @@ -8536,68 +8517,68 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { return true } // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVBZload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVBZload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVBZload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVBZloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -8635,50 +8616,50 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -8909,16 +8890,16 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { return true } // match: (MOVBZreg (ANDWconst [m] x)) - // result: (MOVWZreg (ANDWconst [int64( uint8(m))] x)) + // result: (MOVWZreg (ANDWconst [int32( uint8(m))] x)) for { if v_0.Op != OpS390XANDWconst { break } - m := v_0.AuxInt + m := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpS390XMOVWZreg) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(uint8(m)) + v0.AuxInt = int32ToAuxInt(int32(uint8(m))) v0.AddArg(x) v.AddArg(v0) return true @@ -8948,68 +8929,68 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { return true } // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVBload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVBloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -9047,50 +9028,50 @@ func rewriteValueS390X_OpS390XMOVBloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVBloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVBloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVBloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVBloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -9297,19 +9278,19 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { } // match: (MOVBreg (ANDWconst [m] x)) // cond: int8(m) >= 0 - // result: (MOVWZreg (ANDWconst [int64( uint8(m))] x)) + // result: (MOVWZreg (ANDWconst [int32( uint8(m))] x)) for { if v_0.Op != OpS390XANDWconst { break } - m := v_0.AuxInt + m := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(int8(m) >= 0) { break } v.reset(OpS390XMOVWZreg) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(uint8(m)) + v0.AuxInt = int32ToAuxInt(int32(uint8(m))) v0.AddArg(x) v.AddArg(v0) return true @@ -9355,92 +9336,92 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { return true } // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVBstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: is20Bit(off) && ptr.Op != OpSB - // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + // cond: is20Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(is20Bit(off) && ptr.Op != OpSB) { + if !(is20Bit(int64(off)) && ptr.Op != OpSB) { break } v.reset(OpS390XMOVBstoreconst) - v.AuxInt = makeValAndOff(int64(int8(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVBstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg4(ptr, idx, val, mem) return true } @@ -9476,12 +9457,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9489,12 +9470,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRDconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVHstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -9502,17 +9483,17 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w0 := v_1 if w0.Op != OpS390XSRDconst { break } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9520,12 +9501,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst || x_1.AuxInt != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRDconst || auxIntToInt8(x_1.AuxInt) != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVHstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -9533,12 +9514,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9546,12 +9527,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVHstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -9559,17 +9540,17 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w0 := v_1 if w0.Op != OpS390XSRWconst { break } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9577,12 +9558,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRWconst || x_1.AuxInt != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRWconst || auxIntToInt8(x_1.AuxInt) != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVHstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -9590,15 +9571,15 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHBRstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 8 { + if v_1.Op != OpS390XSRDconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9606,8 +9587,8 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } v.reset(OpS390XMOVHBRstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -9615,16 +9596,16 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHBRstore [i-1] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpS390XSRDconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9632,12 +9613,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVHBRstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -9645,15 +9626,15 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHBRstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRWconst || v_1.AuxInt != 8 { + if v_1.Op != OpS390XSRWconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9661,8 +9642,8 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } v.reset(OpS390XMOVHBRstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -9670,16 +9651,16 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHBRstore [i-1] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpS390XSRWconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -9687,12 +9668,12 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRWconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVHBRstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -9702,70 +9683,70 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: is20Bit(ValAndOff(sc).Off()+off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: is20Bit(sc.Off()+int64(off)) + // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(ValAndOff(sc).Off() + off)) { + if !(is20Bit(sc.Off() + int64(off))) { break } v.reset(OpS390XMOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpS390XMOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) - // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) - // result: (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem) + // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) + // result: (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpS390XMOVBstoreconst { break } - a := x.AuxInt - if x.Aux != s { + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) { break } v.reset(OpS390XMOVHstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xff|ValAndOff(a).Val()<<8, ValAndOff(a).Off()) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c.Val32()&0xff|a.Val32()<<8, a.Off32())) + v.Aux = symToAux(s) v.AddArg2(p, mem) return true } @@ -9777,52 +9758,52 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } break } // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } @@ -9832,14 +9813,14 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 w := v_2 x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -9850,12 +9831,12 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -9866,8 +9847,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 @@ -9875,10 +9856,10 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if w0.Op != OpS390XSRDconst { continue } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -9889,12 +9870,12 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -9905,14 +9886,14 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 w := v_2 x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -9923,12 +9904,12 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -9939,8 +9920,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 @@ -9948,10 +9929,10 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if w0.Op != OpS390XSRWconst { continue } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -9962,12 +9943,12 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -9978,17 +9959,17 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { + if v_2.Op != OpS390XSRDconst || auxIntToInt8(v_2.AuxInt) != 8 { continue } w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -9999,8 +9980,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -10011,18 +9992,18 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 if v_2.Op != OpS390XSRDconst { continue } - j := v_2.AuxInt + j := auxIntToInt8(v_2.AuxInt) w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10033,12 +10014,12 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -10049,17 +10030,17 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { + if v_2.Op != OpS390XSRWconst || auxIntToInt8(v_2.AuxInt) != 8 { continue } w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10070,8 +10051,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -10082,18 +10063,18 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 if v_2.Op != OpS390XSRWconst { continue } - j := v_2.AuxInt + j := auxIntToInt8(v_2.AuxInt) w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10104,12 +10085,12 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { continue } w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRWconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -10122,88 +10103,88 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) - // cond: is20Bit(c+d) && x.Op != OpSB + // cond: is20Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (MOVDaddridx [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is20Bit(c+d) && x.Op != OpSB) { + if !(is20Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpS390XMOVDaddridx) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) - // cond: is20Bit(c+d) && y.Op != OpSB + // cond: is20Bit(int64(c)+int64(d)) && y.Op != OpSB // result: (MOVDaddridx [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is20Bit(c+d) && y.Op != OpSB) { + if !(is20Bit(int64(c)+int64(d)) && y.Op != OpSB) { break } v.reset(OpS390XMOVDaddridx) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) x := v_0.Args[0] y := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } v.reset(OpS390XMOVDaddridx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB - // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) y := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB) { break } v.reset(OpS390XMOVDaddridx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } @@ -10250,69 +10231,69 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { return true } // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVDload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) + // result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { break } v.reset(OpS390XMOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVDloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -10350,50 +10331,50 @@ func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -10406,93 +10387,93 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB - // result: (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB) { + if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) { break } v.reset(OpS390XMOVDstoreconst) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) + // result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { break } v.reset(OpS390XMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVDstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg4(ptr, idx, val, mem) return true } @@ -10525,15 +10506,15 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { break } // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) - // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x) // result: (STMG2 [i-8] {s} p w0 w1 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w1 := v_1 x := v_2 - if x.Op != OpS390XMOVDstore || x.AuxInt != i-8 || x.Aux != s { + if x.Op != OpS390XMOVDstore || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -10541,25 +10522,25 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { break } w0 := x.Args[1] - if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)) { + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) { break } v.reset(OpS390XSTMG2) - v.AuxInt = i - 8 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) v.AddArg4(p, w0, w1, mem) return true } // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) - // cond: x.Uses == 1 && is20Bit(i-16) && clobber(x) + // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x) // result: (STMG3 [i-16] {s} p w0 w1 w2 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w2 := v_1 x := v_2 - if x.Op != OpS390XSTMG2 || x.AuxInt != i-16 || x.Aux != s { + if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s { break } mem := x.Args[3] @@ -10568,25 +10549,25 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { } w0 := x.Args[1] w1 := x.Args[2] - if !(x.Uses == 1 && is20Bit(i-16) && clobber(x)) { + if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) { break } v.reset(OpS390XSTMG3) - v.AuxInt = i - 16 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 16) + v.Aux = symToAux(s) v.AddArg5(p, w0, w1, w2, mem) return true } // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) - // cond: x.Uses == 1 && is20Bit(i-24) && clobber(x) + // cond: x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x) // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w3 := v_1 x := v_2 - if x.Op != OpS390XSTMG3 || x.AuxInt != i-24 || x.Aux != s { + if x.Op != OpS390XSTMG3 || auxIntToInt32(x.AuxInt) != i-24 || auxToSym(x.Aux) != s { break } mem := x.Args[4] @@ -10596,12 +10577,12 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { w0 := x.Args[1] w1 := x.Args[2] w2 := x.Args[3] - if !(x.Uses == 1 && is20Bit(i-24) && clobber(x)) { + if !(x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)) { break } v.reset(OpS390XSTMG4) - v.AuxInt = i - 24 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 24) + v.Aux = symToAux(s) v.AddArg6(p, w0, w1, w2, w3, mem) return true } @@ -10611,45 +10592,45 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: isU12Bit(ValAndOff(sc).Off()+off) - // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: isU12Bit(sc.Off()+int64(off)) + // result: (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(isU12Bit(ValAndOff(sc).Off() + off)) { + if !(isU12Bit(sc.Off() + int64(off))) { break } v.reset(OpS390XMOVDstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpS390XMOVDstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -10661,52 +10642,52 @@ func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } break } // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } @@ -10722,15 +10703,15 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstore [i-2] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 16 { + if v_1.Op != OpS390XSRDconst || auxIntToInt8(v_1.AuxInt) != 16 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -10738,8 +10719,8 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { break } v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -10747,16 +10728,16 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstore [i-2] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpS390XSRDconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -10764,12 +10745,12 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -10777,15 +10758,15 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstore [i-2] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRWconst || v_1.AuxInt != 16 { + if v_1.Op != OpS390XSRWconst || auxIntToInt8(v_1.AuxInt) != 16 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -10793,8 +10774,8 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { break } v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -10802,16 +10783,16 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstore [i-2] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpS390XSRWconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -10819,12 +10800,12 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRWconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -10839,17 +10820,17 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { + if v_2.Op != OpS390XSRDconst || auxIntToInt8(v_2.AuxInt) != 16 { continue } w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10860,8 +10841,8 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { continue } v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -10872,18 +10853,18 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 if v_2.Op != OpS390XSRDconst { continue } - j := v_2.AuxInt + j := auxIntToInt8(v_2.AuxInt) w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10894,12 +10875,12 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { continue } w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -10910,17 +10891,17 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { + if v_2.Op != OpS390XSRWconst || auxIntToInt8(v_2.AuxInt) != 16 { continue } w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10931,8 +10912,8 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { continue } v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -10943,18 +10924,18 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 if v_2.Op != OpS390XSRWconst { continue } - j := v_2.AuxInt + j := auxIntToInt8(v_2.AuxInt) w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -10965,12 +10946,12 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { continue } w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRWconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -11002,69 +10983,69 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { return true } // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVHZload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVHZload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) - // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) + // result: (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { break } v.reset(OpS390XMOVHZload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVHZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVHZloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -11102,50 +11083,50 @@ func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -11357,16 +11338,16 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { return true } // match: (MOVHZreg (ANDWconst [m] x)) - // result: (MOVWZreg (ANDWconst [int64(uint16(m))] x)) + // result: (MOVWZreg (ANDWconst [int32(uint16(m))] x)) for { if v_0.Op != OpS390XANDWconst { break } - m := v_0.AuxInt + m := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpS390XMOVWZreg) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(uint16(m)) + v0.AuxInt = int32ToAuxInt(int32(uint16(m))) v0.AddArg(x) v.AddArg(v0) return true @@ -11396,69 +11377,69 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { return true } // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVHload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVHload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) + // result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { break } v.reset(OpS390XMOVHload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVHloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVHloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -11496,50 +11477,50 @@ func rewriteValueS390X_OpS390XMOVHloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVHloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVHloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVHloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVHloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -11774,19 +11755,19 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { } // match: (MOVHreg (ANDWconst [m] x)) // cond: int16(m) >= 0 - // result: (MOVWZreg (ANDWconst [int64(uint16(m))] x)) + // result: (MOVWZreg (ANDWconst [int32(uint16(m))] x)) for { if v_0.Op != OpS390XANDWconst { break } - m := v_0.AuxInt + m := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(int16(m) >= 0) { break } v.reset(OpS390XMOVWZreg) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(uint16(m)) + v0.AuxInt = int32ToAuxInt(int32(uint16(m))) v0.AddArg(x) v.AddArg(v0) return true @@ -11832,93 +11813,93 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { return true } // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVHstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: isU12Bit(off) && ptr.Op != OpSB - // result: (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + // cond: isU12Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(isU12Bit(off) && ptr.Op != OpSB) { + if !(isU12Bit(int64(off)) && ptr.Op != OpSB) { break } v.reset(OpS390XMOVHstoreconst) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) + // result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { break } v.reset(OpS390XMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVHstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg4(ptr, idx, val, mem) return true } @@ -11954,12 +11935,12 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11967,12 +11948,12 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst || x_1.AuxInt != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRDconst || auxIntToInt8(x_1.AuxInt) != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11980,17 +11961,17 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w0 := v_1 if w0.Op != OpS390XSRDconst { break } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_2 - if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11998,12 +11979,12 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst || x_1.AuxInt != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRDconst || auxIntToInt8(x_1.AuxInt) != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -12011,12 +11992,12 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12024,12 +12005,12 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRWconst || x_1.AuxInt != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRWconst || auxIntToInt8(x_1.AuxInt) != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -12037,17 +12018,17 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w0 := v_1 if w0.Op != OpS390XSRWconst { break } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_2 - if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12055,12 +12036,12 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRWconst || x_1.AuxInt != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRWconst || auxIntToInt8(x_1.AuxInt) != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -12072,72 +12053,72 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: isU12Bit(ValAndOff(sc).Off()+off) - // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: isU12Bit(sc.Off()+int64(off)) + // result: (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(isU12Bit(ValAndOff(sc).Off() + off)) { + if !(isU12Bit(sc.Off() + int64(off))) { break } v.reset(OpS390XMOVHstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpS390XMOVHstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) - // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVWstore [ValAndOff(a).Off()] {s} p (MOVDconst [int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16))]) mem) + // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) + // result: (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpS390XMOVHstoreconst { break } - a := x.AuxInt - if x.Aux != s { + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s + v.AuxInt = int32ToAuxInt(a.Off32()) + v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) - v0.AuxInt = int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16)) + v0.AuxInt = int64ToAuxInt(int64(c.Val32()&0xffff | a.Val32()<<16)) v.AddArg3(p, v0, mem) return true } @@ -12149,52 +12130,52 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } break } // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } @@ -12204,14 +12185,14 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstoreidx [i-2] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 w := v_2 x := v_3 - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -12222,12 +12203,12 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -12238,8 +12219,8 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 @@ -12247,10 +12228,10 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { if w0.Op != OpS390XSRDconst { continue } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_3 - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -12261,12 +12242,12 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -12277,14 +12258,14 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstoreidx [i-2] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 w := v_2 x := v_3 - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -12295,12 +12276,12 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -12311,8 +12292,8 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 @@ -12320,10 +12301,10 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { if w0.Op != OpS390XSRWconst { continue } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_3 - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -12334,12 +12315,12 @@ func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -12356,15 +12337,15 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDBRstore [i-4] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 32 { + if v_1.Op != OpS390XSRDconst || auxIntToInt8(v_1.AuxInt) != 32 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVWBRstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWBRstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12372,8 +12353,8 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { break } v.reset(OpS390XMOVDBRstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -12381,16 +12362,16 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDBRstore [i-4] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpS390XSRDconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVWBRstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWBRstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12398,12 +12379,12 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVDBRstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -12418,17 +12399,17 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { + if v_2.Op != OpS390XSRDconst || auxIntToInt8(v_2.AuxInt) != 32 { continue } w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWBRstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -12439,8 +12420,8 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { continue } v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -12451,18 +12432,18 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 if v_2.Op != OpS390XSRDconst { continue } - j := v_2.AuxInt + j := auxIntToInt8(v_2.AuxInt) w := v_2.Args[0] x := v_3 - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWBRstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -12473,12 +12454,12 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { continue } w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -12510,69 +12491,69 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { return true } // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVWZload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVWZload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) - // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) + // result: (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { break } v.reset(OpS390XMOVWZload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVWZloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -12610,50 +12591,50 @@ func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -12894,69 +12875,69 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { return true } // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVWload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) + // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { break } v.reset(OpS390XMOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVWloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, idx, mem) return true } @@ -12994,50 +12975,50 @@ func rewriteValueS390X_OpS390XMOVWloadidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVWloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVWloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } break } // match: (MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVWloadidx [c+d] {sym} ptr idx mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] mem := v_2 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVWloadidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg3(ptr, idx, mem) return true } @@ -13338,93 +13319,93 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { return true } // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) + // cond: is20Bit(int64(off1)+int64(off2)) // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is20Bit(off1 + off2)) { + if !(is20Bit(int64(off1) + int64(off2))) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB - // result: (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB) { + if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) { break } v.reset(OpS390XMOVWstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) + // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } t := v_0.Type - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { break } v.reset(OpS390XMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddridx { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg4(ptr, idx, val, mem) return true } @@ -13460,15 +13441,15 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVDstore [i-4] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 32 { + if v_1.Op != OpS390XSRDconst || auxIntToInt8(v_1.AuxInt) != 32 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpS390XMOVWstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -13476,8 +13457,8 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { break } v.reset(OpS390XMOVDstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -13485,17 +13466,17 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVDstore [i-4] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w0 := v_1 if w0.Op != OpS390XSRDconst { break } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_2 - if x.Op != OpS390XMOVWstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -13503,25 +13484,25 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst || x_1.AuxInt != j+32 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { + if x_1.Op != OpS390XSRDconst || auxIntToInt8(x_1.AuxInt) != j+32 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } v.reset(OpS390XMOVDstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) - // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x) // result: (STM2 [i-4] {s} p w0 w1 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w1 := v_1 x := v_2 - if x.Op != OpS390XMOVWstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -13529,25 +13510,25 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { break } w0 := x.Args[1] - if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)) { + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)) { break } v.reset(OpS390XSTM2) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg4(p, w0, w1, mem) return true } // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) - // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) + // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x) // result: (STM3 [i-8] {s} p w0 w1 w2 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w2 := v_1 x := v_2 - if x.Op != OpS390XSTM2 || x.AuxInt != i-8 || x.Aux != s { + if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { break } mem := x.Args[3] @@ -13556,25 +13537,25 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { } w0 := x.Args[1] w1 := x.Args[2] - if !(x.Uses == 1 && is20Bit(i-8) && clobber(x)) { + if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) { break } v.reset(OpS390XSTM3) - v.AuxInt = i - 8 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) v.AddArg5(p, w0, w1, w2, mem) return true } // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) - // cond: x.Uses == 1 && is20Bit(i-12) && clobber(x) + // cond: x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x) // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w3 := v_1 x := v_2 - if x.Op != OpS390XSTM3 || x.AuxInt != i-12 || x.Aux != s { + if x.Op != OpS390XSTM3 || auxIntToInt32(x.AuxInt) != i-12 || auxToSym(x.Aux) != s { break } mem := x.Args[4] @@ -13584,12 +13565,12 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { w0 := x.Args[1] w1 := x.Args[2] w2 := x.Args[3] - if !(x.Uses == 1 && is20Bit(i-12) && clobber(x)) { + if !(x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)) { break } v.reset(OpS390XSTM4) - v.AuxInt = i - 12 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 12) + v.Aux = symToAux(s) v.AddArg6(p, w0, w1, w2, w3, mem) return true } @@ -13601,72 +13582,72 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: isU12Bit(ValAndOff(sc).Off()+off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: isU12Bit(sc.Off()+int64(off)) + // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpS390XADDconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(isU12Bit(ValAndOff(sc).Off() + off)) { + if !(isU12Bit(sc.Off() + int64(off))) { break } v.reset(OpS390XMOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpS390XMOVDaddr { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpS390XMOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) - // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem) + // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) + // result: (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpS390XMOVWstoreconst { break } - a := x.AuxInt - if x.Aux != s { + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) { break } v.reset(OpS390XMOVDstore) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s + v.AuxInt = int32ToAuxInt(a.Off32()) + v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) - v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 + v0.AuxInt = int64ToAuxInt(c.Val()&0xffffffff | a.Val()<<32) v.AddArg3(p, v0, mem) return true } @@ -13678,52 +13659,52 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] idx := v_1 val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } break } // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(c+d) + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { - c := v.AuxInt - sym := v.Aux + c := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ptr := v_0 if v_1.Op != OpS390XADDconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) idx := v_1.Args[0] val := v_2 mem := v_3 - if !(is20Bit(c + d)) { + if !(is20Bit(int64(c) + int64(d))) { continue } v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(sym) v.AddArg4(ptr, idx, val, mem) return true } @@ -13733,14 +13714,14 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDstoreidx [i-4] {s} p idx w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 w := v_2 x := v_3 - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -13751,12 +13732,12 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg4(p, idx, w, mem) return true } @@ -13767,8 +13748,8 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { p := v_0 idx := v_1 @@ -13776,10 +13757,10 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { if w0.Op != OpS390XSRDconst { continue } - j := w0.AuxInt + j := auxIntToInt8(w0.AuxInt) w := w0.Args[0] x := v_3 - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpS390XMOVWstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { continue } mem := x.Args[3] @@ -13790,12 +13771,12 @@ func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { continue } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg4(p, idx, w0, mem) return true } @@ -13809,26 +13790,26 @@ func rewriteValueS390X_OpS390XMULLD(v *Value) bool { v_0 := v.Args[0] // match: (MULLD x (MOVDconst [c])) // cond: is32Bit(c) - // result: (MULLDconst [c] x) + // result: (MULLDconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpS390XMULLDconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } break } // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -13838,17 +13819,17 @@ func rewriteValueS390X_OpS390XMULLD(v *Value) bool { if g.Op != OpS390XMOVDload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XMULLDload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -13920,15 +13901,15 @@ func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { return true } // match: (MULLDconst [c] (MOVDconst [d])) - // result: (MOVDconst [c*d]) + // result: (MOVDconst [int64(c)*d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = c * d + v.AuxInt = int64ToAuxInt(int64(c) * d) return true } return false @@ -13962,47 +13943,47 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { return true } // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (MULLDload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XMULLDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (MULLDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XMULLDload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -14012,23 +13993,23 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MULLW x (MOVDconst [c])) - // result: (MULLWconst [int64(int32(c))] x) + // result: (MULLWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XMULLWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } break } // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -14038,24 +14019,24 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { if g.Op != OpS390XMOVWload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XMULLWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } break } // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -14065,17 +14046,17 @@ func rewriteValueS390X_OpS390XMULLW(v *Value) bool { if g.Op != OpS390XMOVWZload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XMULLWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -14147,15 +14128,15 @@ func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { return true } // match: (MULLWconst [c] (MOVDconst [d])) - // result: (MOVDconst [int64(int32(c*d))]) + // result: (MOVDconst [int64(c*int32(d))]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = int64(int32(c * d)) + v.AuxInt = int64ToAuxInt(int64(c * int32(d))) return true } return false @@ -14165,47 +14146,47 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (MULLWload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XMULLWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (MULLWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XMULLWload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -14391,14 +14372,14 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } // match: (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) // cond: c & -1<<63 == 0 - // result: (LGDR (CPSDR (FMOVDconst [c]) x)) + // result: (LGDR (CPSDR (FMOVDconst [math.Float64frombits(uint64(c))]) x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { + if v_0.Op != OpS390XSLDconst || auxIntToInt8(v_0.AuxInt) != 63 { continue } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XSRDconst || v_0_0.AuxInt != 63 { + if v_0_0.Op != OpS390XSRDconst || auxIntToInt8(v_0_0.AuxInt) != 63 { continue } v_0_0_0 := v_0_0.Args[0] @@ -14409,14 +14390,14 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c&-1<<63 == 0) { continue } v.reset(OpS390XLGDR) v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c + v1.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c))) v0.AddArg2(v1, x) v.AddArg(v0) return true @@ -14458,7 +14439,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } // match: (OR (AND (MOVDconst [-1<<63]) (LGDR x)) (MOVDconst [c])) // cond: c & -1<<63 == 0 - // result: (LGDR (CPSDR (FMOVDconst [c]) x)) + // result: (LGDR (CPSDR (FMOVDconst [math.Float64frombits(uint64(c))]) x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XAND { @@ -14468,21 +14449,21 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 || v_0_1.Op != OpS390XLGDR { + if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != -1<<63 || v_0_1.Op != OpS390XLGDR { continue } x := v_0_1.Args[0] if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c&-1<<63 == 0) { continue } v.reset(OpS390XLGDR) v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c + v1.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c))) v0.AddArg2(v1, x) v.AddArg(v0) return true @@ -14519,7 +14500,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -14529,17 +14510,17 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if g.Op != OpS390XMOVDload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XORload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -14554,20 +14535,20 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -14577,8 +14558,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -14593,20 +14574,20 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVHZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVHZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -14616,8 +14597,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -14632,20 +14613,20 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVWZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 32 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVWZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -14655,8 +14636,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -14671,13 +14652,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] or := v_1 @@ -14692,13 +14673,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -14713,10 +14694,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg2(p, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -14734,13 +14715,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVHZload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] or := v_1 @@ -14755,13 +14736,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVHZload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -14776,10 +14757,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg2(p, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -14797,8 +14778,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -14806,15 +14787,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { p := x1_0 idx := x1_1 sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -14827,8 +14808,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -14845,8 +14826,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVHZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -14854,15 +14835,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { p := x1_0 idx := x1_1 sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVHZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -14875,8 +14856,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -14893,8 +14874,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVWZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -14902,15 +14883,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { p := x1_0 idx := x1_1 sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 32 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVWZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -14923,8 +14904,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -14941,13 +14922,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -14966,13 +14947,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -14990,10 +14971,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg3(p, idx, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -15013,13 +14994,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVHZloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -15038,13 +15019,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVHZloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -15062,10 +15043,10 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg3(p, idx, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -15085,20 +15066,20 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -15109,8 +15090,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -15130,12 +15111,12 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVHBRload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r1 := sh.Args[0] @@ -15146,8 +15127,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVHBRload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -15158,8 +15139,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -15179,12 +15160,12 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVWBRload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 32 { continue } r1 := sh.Args[0] @@ -15195,8 +15176,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVWBRload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -15206,8 +15187,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -15222,13 +15203,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] or := v_1 @@ -15243,13 +15224,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -15264,11 +15245,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -15287,7 +15268,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) r1 := s1.Args[0] if r1.Op != OpS390XMOVHZreg { continue @@ -15296,8 +15277,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVHBRload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] or := v_1 @@ -15312,7 +15293,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) r0 := s0.Args[0] if r0.Op != OpS390XMOVHZreg { continue @@ -15321,8 +15302,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVHBRload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -15337,11 +15318,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -15360,8 +15341,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -15369,15 +15350,15 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { p := x0_0 idx := x0_1 sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -15391,8 +15372,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true @@ -15414,8 +15395,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVHBRloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -15423,7 +15404,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { p := x0_0 idx := x0_1 sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r1 := sh.Args[0] @@ -15434,8 +15415,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVHBRloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -15449,8 +15430,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true @@ -15472,8 +15453,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVWBRloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -15481,7 +15462,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { p := x0_0 idx := x0_1 sh := v_1 - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 32 { continue } r1 := sh.Args[0] @@ -15492,8 +15473,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVWBRloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -15506,8 +15487,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -15524,13 +15505,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -15549,13 +15530,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -15573,11 +15554,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -15598,7 +15579,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s1.Op != OpS390XSLDconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) r1 := s1.Args[0] if r1.Op != OpS390XMOVHZreg { continue @@ -15607,8 +15588,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x1.Op != OpS390XMOVHBRloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -15627,7 +15608,7 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if s0.Op != OpS390XSLDconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) r0 := s0.Args[0] if r0.Op != OpS390XMOVHZreg { continue @@ -15636,8 +15617,8 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { if x0.Op != OpS390XMOVHBRloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -15655,11 +15636,11 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -15679,16 +15660,16 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ORW x (MOVDconst [c])) - // result: (ORWconst [int64(int32(c))] x) + // result: (ORWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XORWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -15729,7 +15710,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -15739,24 +15720,24 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if g.Op != OpS390XMOVWload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XORWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } break } // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -15766,17 +15747,17 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if g.Op != OpS390XMOVWZload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XORWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -15791,20 +15772,20 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -15814,8 +15795,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -15830,20 +15811,20 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x1.Op != OpS390XMOVHZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVHZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -15853,8 +15834,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -15869,13 +15850,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s0.Op != OpS390XSLWconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] or := v_1 @@ -15890,13 +15871,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s1.Op != OpS390XSLWconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -15911,10 +15892,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg2(p, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -15932,8 +15913,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -15941,15 +15922,15 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { p := x1_0 idx := x1_1 sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -15962,8 +15943,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -15980,8 +15961,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x1.Op != OpS390XMOVHZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -15989,15 +15970,15 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { p := x1_0 idx := x1_1 sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x0 := sh.Args[0] if x0.Op != OpS390XMOVHZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -16010,8 +15991,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -16028,13 +16009,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s0.Op != OpS390XSLWconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -16053,13 +16034,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s1.Op != OpS390XSLWconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -16077,10 +16058,10 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg3(p, idx, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -16100,20 +16081,20 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -16124,8 +16105,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -16145,12 +16126,12 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x0.Op != OpS390XMOVHBRload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r1 := sh.Args[0] @@ -16161,8 +16142,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x1.Op != OpS390XMOVHBRload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -16172,8 +16153,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -16188,13 +16169,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s1.Op != OpS390XSLWconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] or := v_1 @@ -16209,13 +16190,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s0.Op != OpS390XSLWconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -16230,11 +16211,11 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -16253,8 +16234,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -16262,15 +16243,15 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { p := x0_0 idx := x0_1 sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -16284,8 +16265,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg3(p, idx, mem) v0.AddArg(v1) return true @@ -16307,8 +16288,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x0.Op != OpS390XMOVHBRloadidx { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[2] x0_0 := x0.Args[0] x0_1 := x0.Args[1] @@ -16316,7 +16297,7 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { p := x0_0 idx := x0_1 sh := v_1 - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r1 := sh.Args[0] @@ -16327,8 +16308,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if x1.Op != OpS390XMOVHBRloadidx { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[2] @@ -16341,8 +16322,8 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg3(p, idx, mem) return true } @@ -16359,13 +16340,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s1.Op != OpS390XSLWconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpS390XMOVBZloadidx { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[2] x1_0 := x1.Args[0] x1_1 := x1.Args[1] @@ -16384,13 +16365,13 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if s0.Op != OpS390XSLWconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpS390XMOVBZloadidx { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[2] @@ -16408,11 +16389,11 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) v.copyOf(v0) v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg3(p, idx, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -16453,15 +16434,15 @@ func rewriteValueS390X_OpS390XORWconst(v *Value) bool { return true } // match: (ORWconst [c] (MOVDconst [d])) - // result: (MOVDconst [c|d]) + // result: (MOVDconst [int64(c)|d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = c | d + v.AuxInt = int64ToAuxInt(int64(c) | d) return true } return false @@ -16471,47 +16452,47 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (ORWload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XORWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XORWload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -16582,47 +16563,47 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool { return true } // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (ORload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XORload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XORload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -16632,15 +16613,15 @@ func rewriteValueS390X_OpS390XRLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (RLL x (MOVDconst [c])) - // result: (RLLconst x [c&31]) + // result: (RLLconst x [int8(c&31)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XRLLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -16650,15 +16631,15 @@ func rewriteValueS390X_OpS390XRLLG(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (RLLG x (MOVDconst [c])) - // result: (RLLGconst x [c&63]) + // result: (RLLGconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XRLLGconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } @@ -16670,20 +16651,20 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SLD x (MOVDconst [c])) - // result: (SLDconst x [c&63]) + // result: (SLDconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSLDconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SLD x (AND (MOVDconst [c]) y)) - // result: (SLD x (ANDWconst [c&63] y)) + // result: (SLD x (ANDWconst [int32(c&63)] y)) for { x := v_0 if v_1.Op != OpS390XAND { @@ -16696,11 +16677,11 @@ func rewriteValueS390X_OpS390XSLD(v *Value) bool { if v_1_0.Op != OpS390XMOVDconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) y := v_1_1 v.reset(OpS390XSLD) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 + v0.AuxInt = int32ToAuxInt(int32(c & 63)) v0.AddArg(y) v.AddArg2(x, v0) return true @@ -16818,20 +16799,20 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SLW x (MOVDconst [c])) - // result: (SLWconst x [c&63]) + // result: (SLWconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSLWconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SLW x (AND (MOVDconst [c]) y)) - // result: (SLW x (ANDWconst [c&63] y)) + // result: (SLW x (ANDWconst [int32(c&63)] y)) for { x := v_0 if v_1.Op != OpS390XAND { @@ -16844,11 +16825,11 @@ func rewriteValueS390X_OpS390XSLW(v *Value) bool { if v_1_0.Op != OpS390XMOVDconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) y := v_1_1 v.reset(OpS390XSLW) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 + v0.AuxInt = int32ToAuxInt(int32(c & 63)) v0.AddArg(y) v.AddArg2(x, v0) return true @@ -16966,20 +16947,20 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SRAD x (MOVDconst [c])) - // result: (SRADconst x [c&63]) + // result: (SRADconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSRADconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SRAD x (AND (MOVDconst [c]) y)) - // result: (SRAD x (ANDWconst [c&63] y)) + // result: (SRAD x (ANDWconst [int32(c&63)] y)) for { x := v_0 if v_1.Op != OpS390XAND { @@ -16992,11 +16973,11 @@ func rewriteValueS390X_OpS390XSRAD(v *Value) bool { if v_1_0.Op != OpS390XMOVDconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) y := v_1_1 v.reset(OpS390XSRAD) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 + v0.AuxInt = int32ToAuxInt(int32(c & 63)) v0.AddArg(y) v.AddArg2(x, v0) return true @@ -17126,20 +17107,20 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SRAW x (MOVDconst [c])) - // result: (SRAWconst x [c&63]) + // result: (SRAWconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSRAWconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SRAW x (AND (MOVDconst [c]) y)) - // result: (SRAW x (ANDWconst [c&63] y)) + // result: (SRAW x (ANDWconst [int32(c&63)] y)) for { x := v_0 if v_1.Op != OpS390XAND { @@ -17152,11 +17133,11 @@ func rewriteValueS390X_OpS390XSRAW(v *Value) bool { if v_1_0.Op != OpS390XMOVDconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) y := v_1_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 + v0.AuxInt = int32ToAuxInt(int32(c & 63)) v0.AddArg(y) v.AddArg2(x, v0) return true @@ -17286,20 +17267,20 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SRD x (MOVDconst [c])) - // result: (SRDconst x [c&63]) + // result: (SRDconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSRDconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SRD x (AND (MOVDconst [c]) y)) - // result: (SRD x (ANDWconst [c&63] y)) + // result: (SRD x (ANDWconst [int32(c&63)] y)) for { x := v_0 if v_1.Op != OpS390XAND { @@ -17312,11 +17293,11 @@ func rewriteValueS390X_OpS390XSRD(v *Value) bool { if v_1_0.Op != OpS390XMOVDconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) y := v_1_1 v.reset(OpS390XSRD) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 + v0.AuxInt = int32ToAuxInt(int32(c & 63)) v0.AddArg(y) v.AddArg2(x, v0) return true @@ -17454,20 +17435,20 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SRW x (MOVDconst [c])) - // result: (SRWconst x [c&63]) + // result: (SRWconst x [int8(c&63)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSRWconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SRW x (AND (MOVDconst [c]) y)) - // result: (SRW x (ANDWconst [c&63] y)) + // result: (SRW x (ANDWconst [int32(c&63)] y)) for { x := v_0 if v_1.Op != OpS390XAND { @@ -17480,11 +17461,11 @@ func rewriteValueS390X_OpS390XSRW(v *Value) bool { if v_1_0.Op != OpS390XMOVDconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) y := v_1_1 v.reset(OpS390XSRW) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 + v0.AuxInt = int32ToAuxInt(int32(c & 63)) v0.AddArg(y) v.AddArg2(x, v0) return true @@ -17602,16 +17583,16 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem)) - // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) + // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x) // result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w2 := v_1 w3 := v_2 x := v_3 - if x.Op != OpS390XSTM2 || x.AuxInt != i-8 || x.Aux != s { + if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { break } mem := x.Args[3] @@ -17620,22 +17601,22 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { } w0 := x.Args[1] w1 := x.Args[2] - if !(x.Uses == 1 && is20Bit(i-8) && clobber(x)) { + if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) { break } v.reset(OpS390XSTM4) - v.AuxInt = i - 8 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) v.AddArg6(p, w0, w1, w2, w3, mem) return true } // match: (STM2 [i] {s} p (SRDconst [32] x) x mem) // result: (MOVDstore [i] {s} p x mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 32 { + if v_1.Op != OpS390XSRDconst || auxIntToInt8(v_1.AuxInt) != 32 { break } x := v_1.Args[0] @@ -17644,8 +17625,8 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool { } mem := v_3 v.reset(OpS390XMOVDstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p, x, mem) return true } @@ -17657,16 +17638,16 @@ func rewriteValueS390X_OpS390XSTMG2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem)) - // cond: x.Uses == 1 && is20Bit(i-16) && clobber(x) + // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x) // result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w2 := v_1 w3 := v_2 x := v_3 - if x.Op != OpS390XSTMG2 || x.AuxInt != i-16 || x.Aux != s { + if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s { break } mem := x.Args[3] @@ -17675,12 +17656,12 @@ func rewriteValueS390X_OpS390XSTMG2(v *Value) bool { } w0 := x.Args[1] w1 := x.Args[2] - if !(x.Uses == 1 && is20Bit(i-16) && clobber(x)) { + if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) { break } v.reset(OpS390XSTMG4) - v.AuxInt = i - 16 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 16) + v.Aux = symToAux(s) v.AddArg6(p, w0, w1, w2, w3, mem) return true } @@ -17692,36 +17673,36 @@ func rewriteValueS390X_OpS390XSUB(v *Value) bool { b := v.Block // match: (SUB x (MOVDconst [c])) // cond: is32Bit(c) - // result: (SUBconst x [c]) + // result: (SUBconst x [int32(c)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { break } v.reset(OpS390XSUBconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (SUB (MOVDconst [c]) x) // cond: is32Bit(c) - // result: (NEG (SUBconst x [c])) + // result: (NEG (SUBconst x [int32(c)])) for { if v_0.Op != OpS390XMOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { break } v.reset(OpS390XNEG) v0 := b.NewValue0(v.Pos, OpS390XSUBconst, v.Type) - v0.AuxInt = c + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -17738,7 +17719,7 @@ func rewriteValueS390X_OpS390XSUB(v *Value) bool { return true } // match: (SUB x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBload [off] {sym} x ptr mem) for { t := v.Type @@ -17747,17 +17728,17 @@ func rewriteValueS390X_OpS390XSUB(v *Value) bool { if g.Op != OpS390XMOVDload { break } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -17840,29 +17821,29 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SUBW x (MOVDconst [c])) - // result: (SUBWconst x [int64(int32(c))]) + // result: (SUBWconst x [int32(c)]) for { x := v_0 if v_1.Op != OpS390XMOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XSUBWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (SUBW (MOVDconst [c]) x) - // result: (NEGW (SUBWconst x [int64(int32(c))])) + // result: (NEGW (SUBWconst x [int32(c)])) for { if v_0.Op != OpS390XMOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpS390XNEGW) v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type) - v0.AuxInt = int64(int32(c)) + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -17879,7 +17860,7 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { return true } // match: (SUBW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type @@ -17888,22 +17869,22 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { if g.Op != OpS390XMOVWload { break } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (SUBW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type @@ -17912,17 +17893,17 @@ func rewriteValueS390X_OpS390XSUBW(v *Value) bool { if g.Op != OpS390XMOVWZload { break } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -17943,12 +17924,12 @@ func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool { return true } // match: (SUBWconst [c] x) - // result: (ADDWconst [int64(int32(-c))] x) + // result: (ADDWconst [-int32(c)] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 v.reset(OpS390XADDWconst) - v.AuxInt = int64(int32(-c)) + v.AuxInt = int32ToAuxInt(-int32(c)) v.AddArg(x) return true } @@ -17958,47 +17939,47 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (SUBWload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XSUBWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (SUBWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XSUBWload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -18031,32 +18012,32 @@ func rewriteValueS390X_OpS390XSUBconst(v *Value) bool { return true } // match: (SUBconst (MOVDconst [d]) [c]) - // result: (MOVDconst [d-c]) + // result: (MOVDconst [d-int64(c)]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = d - c + v.AuxInt = int64ToAuxInt(d - int64(c)) return true } // match: (SUBconst (SUBconst x [d]) [c]) - // cond: is32Bit(-c-d) + // cond: is32Bit(-int64(c)-int64(d)) // result: (ADDconst [-c-d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(-c - d)) { + if !(is32Bit(-int64(c) - int64(d))) { break } v.reset(OpS390XADDconst) - v.AuxInt = -c - d + v.AuxInt = int32ToAuxInt(-c - d) v.AddArg(x) return true } @@ -18091,47 +18072,47 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool { return true } // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (SUBload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XSUBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (SUBload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XSUBload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -18266,7 +18247,7 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { return true } // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -18276,17 +18257,17 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { if g.Op != OpS390XMOVDload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XXORload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -18298,16 +18279,16 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XORW x (MOVDconst [c])) - // result: (XORWconst [int64(int32(c))] x) + // result: (XORWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpS390XMOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpS390XXORWconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -18349,7 +18330,7 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { return true } // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -18359,24 +18340,24 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { if g.Op != OpS390XMOVWload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XXORWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } break } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -18386,17 +18367,17 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { if g.Op != OpS390XMOVWZload { continue } - off := g.AuxInt - sym := g.Aux + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) mem := g.Args[1] ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { continue } v.reset(OpS390XXORWload) v.Type = t - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -18419,15 +18400,15 @@ func rewriteValueS390X_OpS390XXORWconst(v *Value) bool { return true } // match: (XORWconst [c] (MOVDconst [d])) - // result: (MOVDconst [c^d]) + // result: (MOVDconst [int64(c)^d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpS390XMOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = c ^ d + v.AuxInt = int64ToAuxInt(int64(c) ^ d) return true } return false @@ -18437,47 +18418,47 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (XORWload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XXORWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (XORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XXORWload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -18538,47 +18519,47 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool { return true } // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) // result: (XORload [off1+off2] {sym} x ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XADDconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { break } v.reset(OpS390XXORload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) - // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) - // result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (XORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem) for { - o1 := v.AuxInt - s1 := v.Aux + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpS390XMOVDaddr { break } - o2 := v_1.AuxInt - s2 := v_1.Aux + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { break } v.reset(OpS390XXORload) - v.AuxInt = o1 + o2 - v.Aux = mergeSym(s1, s2) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSymTyped(s1, s2)) v.AddArg3(x, ptr, mem) return true } @@ -18662,19 +18643,19 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { return true } // match: (Select0 (ADDCconst (MOVDconst [c]) [d])) - // result: (MOVDconst [c+d]) + // result: (MOVDconst [c+int64(d)]) for { if v_0.Op != OpS390XADDCconst { break } - d := v_0.AuxInt + d := auxIntToInt16(v_0.AuxInt) v_0_0 := v_0.Args[0] if v_0_0.Op != OpS390XMOVDconst { break } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(c + int64(d)) return true } // match: (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) @@ -18859,38 +18840,38 @@ func rewriteValueS390X_OpSelect1(v *Value) bool { return true } // match: (Select1 (ADDCconst (MOVDconst [c]) [d])) - // cond: uint64(c+d) >= uint64(c) && c+d == 0 + // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0 // result: (FlagEQ) for { if v_0.Op != OpS390XADDCconst { break } - d := v_0.AuxInt + d := auxIntToInt16(v_0.AuxInt) v_0_0 := v_0.Args[0] if v_0_0.Op != OpS390XMOVDconst { break } - c := v_0_0.AuxInt - if !(uint64(c+d) >= uint64(c) && c+d == 0) { + c := auxIntToInt64(v_0_0.AuxInt) + if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0) { break } v.reset(OpS390XFlagEQ) return true } // match: (Select1 (ADDCconst (MOVDconst [c]) [d])) - // cond: uint64(c+d) >= uint64(c) && c+d != 0 + // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0 // result: (FlagLT) for { if v_0.Op != OpS390XADDCconst { break } - d := v_0.AuxInt + d := auxIntToInt16(v_0.AuxInt) v_0_0 := v_0.Args[0] if v_0_0.Op != OpS390XMOVDconst { break } - c := v_0_0.AuxInt - if !(uint64(c+d) >= uint64(c) && c+d != 0) { + c := auxIntToInt64(v_0_0.AuxInt) + if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0) { break } v.reset(OpS390XFlagLT) -- cgit v1.2.3-54-g00ecf From 3ffa1381ec6e44ce1858da8c26267f2888480a4d Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sat, 22 Aug 2020 14:31:49 -0400 Subject: cmd/internal/objabi: delete doc.go cmd/internal/objabi/doc.go has comments decribing the (old) object file format. But cmd/internal/objabi has nothing to do with object files, and never did. Delete. Move some comment to cmd/internal/goobj, where the (new) object file format is actually defined, and update to reflect the current status. Change-Id: Ied96089df4be35e5d259a572ed60ee00f2cd0d1d Reviewed-on: https://go-review.googlesource.com/c/go/+/249958 Reviewed-by: Than McIntosh --- src/cmd/internal/goobj/objfile.go | 14 ++++- src/cmd/internal/objabi/doc.go | 122 -------------------------------------- 2 files changed, 13 insertions(+), 123 deletions(-) delete mode 100644 src/cmd/internal/objabi/doc.go diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go index 5d4a253024..8ec7c481d6 100644 --- a/src/cmd/internal/goobj/objfile.go +++ b/src/cmd/internal/goobj/objfile.go @@ -2,7 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Go new object file format, reading and writing. +// This package defines the Go object file format, and provide "low-level" functions +// for reading and writing object files. + +// The object file is understood by the compiler, assembler, linker, and tools. They +// have "high level" code that operates on object files, handling application-specific +// logics, and use this package for the actual reading and writing. Specifically, the +// code below: +// +// - cmd/internal/obj/objfile.go (used by cmd/asm and cmd/compile) +// - cmd/internal/objfile/goobj.go (used cmd/nm, cmd/objdump) +// - cmd/link/internal/loader package (used by cmd/link) +// +// If the object file format changes, they may (or may not) need to change. package goobj diff --git a/src/cmd/internal/objabi/doc.go b/src/cmd/internal/objabi/doc.go deleted file mode 100644 index 08e922b11f..0000000000 --- a/src/cmd/internal/objabi/doc.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// NOTE: There are *three* independent implementations of this object -// file format in the Go source tree: -// -// - cmd/internal/goobj/read.go (used by cmd/addr2line, cmd/nm, cmd/objdump, cmd/pprof) -// - cmd/internal/obj/objfile.go (used by cmd/asm and cmd/compile) -// - cmd/link/internal/objfile.go (used by cmd/link) -// -// When changing the object file format, remember to change all three. - -// Originally, Go object files were Plan 9 object files, but no longer. -// Now they are more like standard object files, in that each symbol is defined -// by an associated memory image (bytes) and a list of relocations to apply -// during linking. We do not (yet?) use a standard file format, however. -// For now, the format is chosen to be as simple as possible to read and write. -// It may change for reasons of efficiency, or we may even switch to a -// standard file format if there are compelling benefits to doing so. -// See golang.org/s/go13linker for more background. -// -// The file format is: -// -// - magic header: "\x00go114ld" -// - byte 1 - version number -// - sequence of strings giving dependencies (imported packages) -// - empty string (marks end of sequence) -// - number of entries in the following sequence -// - sequence of filename strings to generate debug information -// - sequence of symbol references used by the defined symbols -// - byte 0xff (marks end of sequence) -// - sequence of integer lengths: -// - total data length -// - total number of relocations -// - total number of pcdata -// - total number of automatics -// - total number of funcdata -// - total number of files -// - data, the content of the defined symbols -// - sequence of defined symbols -// - byte 0xff (marks end of sequence) -// - magic footer: "\xffgo114ld" -// -// All integers are stored in a zigzag varint format. -// See golang.org/s/go12symtab for a definition. -// -// Data blocks and strings are both stored as an integer -// followed by that many bytes. -// -// A symbol reference is a string name followed by an ABI or -1 for static. -// -// A symbol points to other symbols using an index into the symbol -// reference sequence. Index 0 corresponds to a nil symbol pointer. -// In the symbol layout described below "symref index" stands for this -// index. -// -// Each symbol is laid out as the following fields: -// -// - byte 0xfe (sanity check for synchronization) -// - type [byte] -// - name & ABI [symref index] -// - flags [int] -// 1<<0 dupok -// 1<<1 local -// 1<<2 add to typelink table -// - size [int] -// - gotype [symref index] -// - p [data block] -// - nr [int] -// - r [nr relocations, sorted by off] -// -// If type == STEXT, there are a few more fields: -// -// - args [int] -// - locals [int] -// - nosplit [int] -// - flags [int] -// 1<<0 leaf -// 1<<1 C function -// 1<<2 function may call reflect.Type.Method -// 1<<3 function compiled with -shared -// - nlocal [int] -// - local [nlocal automatics] -// - pcln [pcln table] -// -// Each relocation has the encoding: -// -// - off [int] -// - siz [int] -// - type [int] -// - add [int] -// - sym [symref index] -// -// Each local has the encoding: -// -// - asym [symref index] -// - offset [int] -// - type [int] -// - gotype [symref index] -// -// The pcln table has the encoding: -// -// - pcsp [data block] -// - pcfile [data block] -// - pcline [data block] -// - pcinline [data block] -// - npcdata [int] -// - pcdata [npcdata data blocks] -// - nfuncdata [int] -// - funcdata [nfuncdata symref index] -// - funcdatasym [nfuncdata ints] -// - nfile [int] -// - file [nfile symref index] -// - ninlinedcall [int] -// - inlinedcall [ninlinedcall int symref int symref] -// -// The file layout and meaning of type integers are architecture-independent. -// -// TODO(rsc): The file format is good for a first pass but needs work. -// - There are SymID in the object file that should really just be strings. -package objabi -- cgit v1.2.3-54-g00ecf From 63828096f61593cd766d6e25fce74133efeb17f8 Mon Sep 17 00:00:00 2001 From: zhouzhongyuan Date: Fri, 8 May 2020 18:13:58 +0800 Subject: math/big: add function example While reading the source code of the math/big package, I found the SetString function example of float type missing. Change-Id: Id8c16a58e2e24f9463e8ff38adbc98f8c418ab26 Reviewed-on: https://go-review.googlesource.com/c/go/+/232804 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/math/big/example_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/math/big/example_test.go b/src/math/big/example_test.go index cfc77351d4..31ca784154 100644 --- a/src/math/big/example_test.go +++ b/src/math/big/example_test.go @@ -25,6 +25,13 @@ func ExampleInt_SetString() { // Output: 420 } +func ExampleFloat_SetString() { + f := new(big.Float) + f.SetString("3.14159") + fmt.Println(f) + // Output: 3.14159 +} + func ExampleRat_Scan() { // The Scan function is rarely used directly; // the fmt package recognizes it as an implementation of fmt.Scanner. -- cgit v1.2.3-54-g00ecf From 6f561e65b16645fea771375d3af6d7896ab025e6 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 26 Aug 2020 23:18:41 +0700 Subject: cmd/fix: remove un-used code Change-Id: Ibbd9da6fd1f3219c9c6103d8f858347ab3a21210 Reviewed-on: https://go-review.googlesource.com/c/go/+/250857 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/fix/fix.go | 292 ----------------------------------------------------- 1 file changed, 292 deletions(-) diff --git a/src/cmd/fix/fix.go b/src/cmd/fix/fix.go index 2c64e9b414..b49db37571 100644 --- a/src/cmd/fix/fix.go +++ b/src/cmd/fix/fix.go @@ -7,13 +7,9 @@ package main import ( "fmt" "go/ast" - "go/parser" "go/token" - "os" "path" - "reflect" "strconv" - "strings" ) type fix struct { @@ -323,160 +319,12 @@ func declImports(gen *ast.GenDecl, path string) bool { return false } -// isPkgDot reports whether t is the expression "pkg.name" -// where pkg is an imported identifier. -func isPkgDot(t ast.Expr, pkg, name string) bool { - sel, ok := t.(*ast.SelectorExpr) - return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name -} - -// isPtrPkgDot reports whether f is the expression "*pkg.name" -// where pkg is an imported identifier. -func isPtrPkgDot(t ast.Expr, pkg, name string) bool { - ptr, ok := t.(*ast.StarExpr) - return ok && isPkgDot(ptr.X, pkg, name) -} - // isTopName reports whether n is a top-level unresolved identifier with the given name. func isTopName(n ast.Expr, name string) bool { id, ok := n.(*ast.Ident) return ok && id.Name == name && id.Obj == nil } -// isName reports whether n is an identifier with the given name. -func isName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.String() == name -} - -// isCall reports whether t is a call to pkg.name. -func isCall(t ast.Expr, pkg, name string) bool { - call, ok := t.(*ast.CallExpr) - return ok && isPkgDot(call.Fun, pkg, name) -} - -// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil. -func isIdent(n interface{}) *ast.Ident { - id, _ := n.(*ast.Ident) - return id -} - -// refersTo reports whether n is a reference to the same object as x. -func refersTo(n ast.Node, x *ast.Ident) bool { - id, ok := n.(*ast.Ident) - // The test of id.Name == x.Name handles top-level unresolved - // identifiers, which all have Obj == nil. - return ok && id.Obj == x.Obj && id.Name == x.Name -} - -// isBlank reports whether n is the blank identifier. -func isBlank(n ast.Expr) bool { - return isName(n, "_") -} - -// isEmptyString reports whether n is an empty string literal. -func isEmptyString(n ast.Expr) bool { - lit, ok := n.(*ast.BasicLit) - return ok && lit.Kind == token.STRING && len(lit.Value) == 2 -} - -func warn(pos token.Pos, msg string, args ...interface{}) { - if pos.IsValid() { - msg = "%s: " + msg - arg1 := []interface{}{fset.Position(pos).String()} - args = append(arg1, args...) - } - fmt.Fprintf(os.Stderr, msg+"\n", args...) -} - -// countUses returns the number of uses of the identifier x in scope. -func countUses(x *ast.Ident, scope []ast.Stmt) int { - count := 0 - ff := func(n interface{}) { - if n, ok := n.(ast.Node); ok && refersTo(n, x) { - count++ - } - } - for _, n := range scope { - walk(n, ff) - } - return count -} - -// rewriteUses replaces all uses of the identifier x and !x in scope -// with f(x.Pos()) and fnot(x.Pos()). -func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) { - var lastF ast.Expr - ff := func(n interface{}) { - ptr, ok := n.(*ast.Expr) - if !ok { - return - } - nn := *ptr - - // The child node was just walked and possibly replaced. - // If it was replaced and this is a negation, replace with fnot(p). - not, ok := nn.(*ast.UnaryExpr) - if ok && not.Op == token.NOT && not.X == lastF { - *ptr = fnot(nn.Pos()) - return - } - if refersTo(nn, x) { - lastF = f(nn.Pos()) - *ptr = lastF - } - } - for _, n := range scope { - walk(n, ff) - } -} - -// assignsTo reports whether any of the code in scope assigns to or takes the address of x. -func assignsTo(x *ast.Ident, scope []ast.Stmt) bool { - assigned := false - ff := func(n interface{}) { - if assigned { - return - } - switch n := n.(type) { - case *ast.UnaryExpr: - // use of &x - if n.Op == token.AND && refersTo(n.X, x) { - assigned = true - return - } - case *ast.AssignStmt: - for _, l := range n.Lhs { - if refersTo(l, x) { - assigned = true - return - } - } - } - } - for _, n := range scope { - if assigned { - break - } - walk(n, ff) - } - return assigned -} - -// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos. -func newPkgDot(pos token.Pos, pkg, name string) ast.Expr { - return &ast.SelectorExpr{ - X: &ast.Ident{ - NamePos: pos, - Name: pkg, - }, - Sel: &ast.Ident{ - NamePos: pos, - Name: name, - }, - } -} - // renameTop renames all references to the top-level name old. // It reports whether it makes any changes. func renameTop(f *ast.File, old, new string) bool { @@ -707,143 +555,3 @@ func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) { } return } - -func usesImport(f *ast.File, path string) (used bool) { - spec := importSpec(f, path) - if spec == nil { - return - } - - name := spec.Name.String() - switch name { - case "": - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - case "_", ".": - // Not sure if this import is used - err on the side of caution. - return true - } - - walk(f, func(n interface{}) { - sel, ok := n.(*ast.SelectorExpr) - if ok && isTopName(sel.X, name) { - used = true - } - }) - - return -} - -func expr(s string) ast.Expr { - x, err := parser.ParseExpr(s) - if err != nil { - panic("parsing " + s + ": " + err.Error()) - } - // Remove position information to avoid spurious newlines. - killPos(reflect.ValueOf(x)) - return x -} - -var posType = reflect.TypeOf(token.Pos(0)) - -func killPos(v reflect.Value) { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - if !v.IsNil() { - killPos(v.Elem()) - } - case reflect.Slice: - n := v.Len() - for i := 0; i < n; i++ { - killPos(v.Index(i)) - } - case reflect.Struct: - n := v.NumField() - for i := 0; i < n; i++ { - f := v.Field(i) - if f.Type() == posType { - f.SetInt(0) - continue - } - killPos(f) - } - } -} - -// A Rename describes a single renaming. -type rename struct { - OldImport string // only apply rename if this import is present - NewImport string // add this import during rewrite - Old string // old name: p.T or *p.T - New string // new name: p.T or *p.T -} - -func renameFix(tab []rename) func(*ast.File) bool { - return func(f *ast.File) bool { - return renameFixTab(f, tab) - } -} - -func parseName(s string) (ptr bool, pkg, nam string) { - i := strings.Index(s, ".") - if i < 0 { - panic("parseName: invalid name " + s) - } - if strings.HasPrefix(s, "*") { - ptr = true - s = s[1:] - i-- - } - pkg = s[:i] - nam = s[i+1:] - return -} - -func renameFixTab(f *ast.File, tab []rename) bool { - fixed := false - added := map[string]bool{} - check := map[string]bool{} - for _, t := range tab { - if !imports(f, t.OldImport) { - continue - } - optr, opkg, onam := parseName(t.Old) - walk(f, func(n interface{}) { - np, ok := n.(*ast.Expr) - if !ok { - return - } - x := *np - if optr { - p, ok := x.(*ast.StarExpr) - if !ok { - return - } - x = p.X - } - if !isPkgDot(x, opkg, onam) { - return - } - if t.NewImport != "" && !added[t.NewImport] { - addImport(f, t.NewImport) - added[t.NewImport] = true - } - *np = expr(t.New) - check[t.OldImport] = true - fixed = true - }) - } - - for ipath := range check { - if !usesImport(f, ipath) { - deleteImport(f, ipath) - } - } - return fixed -} -- cgit v1.2.3-54-g00ecf From d4986e0e1d6e03e1b92e905ca5e01b4c223fbeb3 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 16 Dec 2019 17:18:06 -0500 Subject: cmd/go/internal/modload: reject empty go.mod files Don't add a module declaration to a go.mod file when loading a module. Require a user to call go mod init or to add the module declaration themselves. Fixes #35070 Change-Id: If5543580d3c1cfee1fc528eb853b872c4173ca82 Reviewed-on: https://go-review.googlesource.com/c/go/+/234107 Run-TryBot: Michael Matloob TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modload/init.go | 10 +++------- src/cmd/go/testdata/script/mod_find.txt | 5 +++++ src/cmd/go/testdata/script/mod_invalid_path.txt | 12 ++++++++++++ 3 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 src/cmd/go/testdata/script/mod_invalid_path.txt diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 7f493104b1..af23647cd4 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -368,13 +368,9 @@ func InitMod(ctx context.Context) { modFile = f index = indexModFile(data, f, fixed) - if len(f.Syntax.Stmt) == 0 || f.Module == nil { - // Empty mod file. Must add module path. - path, err := findModulePath(modRoot) - if err != nil { - base.Fatalf("go: %v", err) - } - f.AddModuleStmt(path) + if f.Module == nil { + // No module declaration. Must add module path. + base.Fatalf("go: no module declaration in go.mod.\n\tRun 'go mod edit -module=example.com/mod' to specify the module path.") } if len(f.Syntax.Stmt) == 1 && f.Module != nil { diff --git a/src/cmd/go/testdata/script/mod_find.txt b/src/cmd/go/testdata/script/mod_find.txt index 7fbe9fb7fe..9468acfd33 100644 --- a/src/cmd/go/testdata/script/mod_find.txt +++ b/src/cmd/go/testdata/script/mod_find.txt @@ -19,6 +19,11 @@ go mod init stderr 'module example.com/x/y$' rm go.mod +# go mod init rejects a zero-length go.mod file +cp $devnull go.mod # can't use touch to create it because Windows +! go mod init +stderr 'go.mod already exists' + # Module path from Godeps/Godeps.json overrides GOPATH. cd $GOPATH/src/example.com/x/y/z go mod init diff --git a/src/cmd/go/testdata/script/mod_invalid_path.txt b/src/cmd/go/testdata/script/mod_invalid_path.txt new file mode 100644 index 0000000000..1ab418a075 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_invalid_path.txt @@ -0,0 +1,12 @@ +# Test that mod files with missing paths produce an error. + +# Test that go list fails on a go.mod with no module declaration. +cd $WORK/gopath/src/mod +! go list . +stderr '^go: no module declaration in go.mod.\n\tRun ''go mod edit -module=example.com/mod'' to specify the module path.$' + +-- mod/go.mod -- + +-- mod/foo.go -- +package foo + -- cgit v1.2.3-54-g00ecf From bf869c65d1a96da5db78a891430ea3acd7ddf1ab Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Mon, 13 Apr 2020 16:33:28 -0400 Subject: cmd/go: update vendored golang.org/x/mod This CL vendors go.mod parser changes for the retract directive. For #24031 Change-Id: Ief19b0eca4c7956eceadc893bb209da7e9ecf22c Reviewed-on: https://go-review.googlesource.com/c/go/+/228377 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills Reviewed-by: Michael Matloob --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 5 +- src/cmd/vendor/golang.org/x/mod/modfile/read.go | 8 + src/cmd/vendor/golang.org/x/mod/modfile/rule.go | 243 ++++++++++++++++++++++-- src/cmd/vendor/modules.txt | 2 +- 5 files changed, 235 insertions(+), 25 deletions(-) diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 21670b9996..5c5c99e3cd 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -7,7 +7,7 @@ require ( github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 // indirect golang.org/x/arch v0.0.0-20200511175325-f7c78586839d golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231 + golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8 golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 // indirect diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 1b5ef515c2..69cebe1b23 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -14,8 +14,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231 h1:R11LxkoUvECaAHdM5/ZOevSR7n+016EgTw8nbE1l+XM= -golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8 h1:Qbq3laTJZip3mEOreFwHF81RGkkhIvmraRMINHNyWHE= +golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -32,7 +32,6 @@ golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGg golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 h1:Jhw4VC65LaKnpq9FvcK+a8ZzrFm3D+UygvMMrhkOw70= golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/src/cmd/vendor/golang.org/x/mod/modfile/read.go b/src/cmd/vendor/golang.org/x/mod/modfile/read.go index c1f2008ee4..2a961ca81c 100644 --- a/src/cmd/vendor/golang.org/x/mod/modfile/read.go +++ b/src/cmd/vendor/golang.org/x/mod/modfile/read.go @@ -477,9 +477,17 @@ func (in *input) startToken() { // endToken marks the end of an input token. // It records the actual token string in tok.text. +// A single trailing newline (LF or CRLF) will be removed from comment tokens. func (in *input) endToken(kind tokenKind) { in.token.kind = kind text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)]) + if kind.isComment() { + if strings.HasSuffix(text, "\r\n") { + text = text[:len(text)-2] + } else { + text = strings.TrimSuffix(text, "\n") + } + } in.token.text = text in.token.endPos = in.pos } diff --git a/src/cmd/vendor/golang.org/x/mod/modfile/rule.go b/src/cmd/vendor/golang.org/x/mod/modfile/rule.go index 91ca6828df..83398dda5d 100644 --- a/src/cmd/vendor/golang.org/x/mod/modfile/rule.go +++ b/src/cmd/vendor/golang.org/x/mod/modfile/rule.go @@ -30,6 +30,7 @@ import ( "golang.org/x/mod/internal/lazyregexp" "golang.org/x/mod/module" + "golang.org/x/mod/semver" ) // A File is the parsed, interpreted form of a go.mod file. @@ -39,6 +40,7 @@ type File struct { Require []*Require Exclude []*Exclude Replace []*Replace + Retract []*Retract Syntax *FileSyntax } @@ -75,6 +77,21 @@ type Replace struct { Syntax *Line } +// A Retract is a single retract statement. +type Retract struct { + VersionInterval + Rationale string + Syntax *Line +} + +// A VersionInterval represents a range of versions with upper and lower bounds. +// Intervals are closed: both bounds are included. When Low is equal to High, +// the interval may refer to a single version ('v1.2.3') or an interval +// ('[v1.2.3, v1.2.3]'); both have the same representation. +type VersionInterval struct { + Low, High string +} + func (f *File) AddModuleStmt(path string) error { if f.Syntax == nil { f.Syntax = new(FileSyntax) @@ -138,7 +155,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File for _, x := range fs.Stmt { switch x := x.(type) { case *Line: - f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict) + f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict) case *LineBlock: if len(x.Token) > 1 { @@ -161,9 +178,9 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File }) } continue - case "module", "require", "exclude", "replace": + case "module", "require", "exclude", "replace", "retract": for _, l := range x.Line { - f.add(&errs, l, x.Token[0], l.Token, fix, strict) + f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } } } @@ -177,7 +194,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) -func (f *File) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer, strict bool) { +func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. // We ignore all unknown directives as well as main-module-only // directives like replace and exclude. It will work better for @@ -186,7 +203,7 @@ func (f *File) add(errs *ErrorList, line *Line, verb string, args []string, fix // and simply ignore those statements. if !strict { switch verb { - case "module", "require", "go": + case "go", "module", "retract", "require": // want these even for dependency go.mods default: return @@ -232,6 +249,7 @@ func (f *File) add(errs *ErrorList, line *Line, verb string, args []string, fix f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "module": if f.Module != nil { errorf("repeated module statement") @@ -248,6 +266,7 @@ func (f *File) add(errs *ErrorList, line *Line, verb string, args []string, fix return } f.Module.Mod = module.Version{Path: s} + case "require", "exclude": if len(args) != 2 { errorf("usage: %s module/path v1.2.3", verb) @@ -284,6 +303,7 @@ func (f *File) add(errs *ErrorList, line *Line, verb string, args []string, fix Syntax: line, }) } + case "replace": arrow := 2 if len(args) >= 2 && args[1] == "=>" { @@ -347,6 +367,33 @@ func (f *File) add(errs *ErrorList, line *Line, verb string, args []string, fix New: module.Version{Path: ns, Version: nv}, Syntax: line, }) + + case "retract": + rationale := parseRetractRationale(block, line) + vi, err := parseVersionInterval(verb, &args, fix) + if err != nil { + if strict { + wrapError(err) + return + } else { + // Only report errors parsing intervals in the main module. We may + // support additional syntax in the future, such as open and half-open + // intervals. Those can't be supported now, because they break the + // go.mod parser, even in lax mode. + return + } + } + if len(args) > 0 && strict { + // In the future, there may be additional information after the version. + errorf("unexpected token after version: %q", args[0]) + return + } + retract := &Retract{ + VersionInterval: vi, + Rationale: rationale, + Syntax: line, + } + f.Retract = append(f.Retract, retract) } } @@ -444,6 +491,53 @@ func AutoQuote(s string) string { return s } +func parseVersionInterval(verb string, args *[]string, fix VersionFixer) (VersionInterval, error) { + toks := *args + if len(toks) == 0 || toks[0] == "(" { + return VersionInterval{}, fmt.Errorf("expected '[' or version") + } + if toks[0] != "[" { + v, err := parseVersion(verb, "", &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + *args = toks[1:] + return VersionInterval{Low: v, High: v}, nil + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after '['") + } + low, err := parseVersion(verb, "", &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "," { + return VersionInterval{}, fmt.Errorf("expected ',' after version") + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after ','") + } + high, err := parseVersion(verb, "", &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "]" { + return VersionInterval{}, fmt.Errorf("expected ']' after version") + } + toks = toks[1:] + + *args = toks + return VersionInterval{Low: low, High: high}, nil +} + func parseString(s *string) (string, error) { t := *s if strings.HasPrefix(t, `"`) { @@ -461,6 +555,27 @@ func parseString(s *string) (string, error) { return t, nil } +// parseRetractRationale extracts the rationale for a retract directive from the +// surrounding comments. If the line does not have comments and is part of a +// block that does have comments, the block's comments are used. +func parseRetractRationale(block *LineBlock, line *Line) string { + comments := line.Comment() + if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 { + comments = block.Comment() + } + groups := [][]Comment{comments.Before, comments.Suffix} + var lines []string + for _, g := range groups { + for _, c := range g { + if !strings.HasPrefix(c.Token, "//") { + continue // blank line + } + lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//"))) + } + } + return strings.Join(lines, "\n") +} + type ErrorList []Error func (e ErrorList) Error() string { @@ -494,6 +609,8 @@ func (e *Error) Error() string { var directive string if e.ModPath != "" { directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath) + } else if e.Verb != "" { + directive = fmt.Sprintf("%s: ", e.Verb) } return pos + directive + e.Err.Error() @@ -585,6 +702,15 @@ func (f *File) Cleanup() { } f.Replace = f.Replace[:w] + w = 0 + for _, r := range f.Retract { + if r.Low != "" || r.High != "" { + f.Retract[w] = r + w++ + } + } + f.Retract = f.Retract[:w] + f.Syntax.Cleanup() } @@ -778,6 +904,34 @@ func (f *File) DropReplace(oldPath, oldVers string) error { return nil } +func (f *File) AddRetract(vi VersionInterval, rationale string) error { + r := &Retract{ + VersionInterval: vi, + } + if vi.Low == vi.High { + r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low)) + } else { + r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") + } + if rationale != "" { + for _, line := range strings.Split(rationale, "\n") { + com := Comment{Token: "// " + line} + r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) + } + } + return nil +} + +func (f *File) DropRetract(vi VersionInterval) error { + for _, r := range f.Retract { + if r.VersionInterval == vi { + f.Syntax.removeLine(r.Syntax) + *r = Retract{} + } + } + return nil +} + func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe @@ -786,28 +940,38 @@ func (f *File) SortBlocks() { if !ok { continue } - sort.Slice(block.Line, func(i, j int) bool { - li := block.Line[i] - lj := block.Line[j] - for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { - if li.Token[k] != lj.Token[k] { - return li.Token[k] < lj.Token[k] - } - } - return len(li.Token) < len(lj.Token) + less := lineLess + if block.Token[0] == "retract" { + less = lineRetractLess + } + sort.SliceStable(block.Line, func(i, j int) bool { + return less(block.Line[i], block.Line[j]) }) } } +// removeDups removes duplicate exclude and replace directives. +// +// Earlier exclude directives take priority. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. func (f *File) removeDups() { - have := make(map[module.Version]bool) kill := make(map[*Line]bool) + + // Remove duplicate excludes. + haveExclude := make(map[module.Version]bool) for _, x := range f.Exclude { - if have[x.Mod] { + if haveExclude[x.Mod] { kill[x.Syntax] = true continue } - have[x.Mod] = true + haveExclude[x.Mod] = true } var excl []*Exclude for _, x := range f.Exclude { @@ -817,15 +981,16 @@ func (f *File) removeDups() { } f.Exclude = excl - have = make(map[module.Version]bool) + // Remove duplicate replacements. // Later replacements take priority over earlier ones. + haveReplace := make(map[module.Version]bool) for i := len(f.Replace) - 1; i >= 0; i-- { x := f.Replace[i] - if have[x.Old] { + if haveReplace[x.Old] { kill[x.Syntax] = true continue } - have[x.Old] = true + haveReplace[x.Old] = true } var repl []*Replace for _, x := range f.Replace { @@ -835,6 +1000,9 @@ func (f *File) removeDups() { } f.Replace = repl + // Duplicate require and retract directives are not removed. + + // Drop killed statements from the syntax tree. var stmts []Expr for _, stmt := range f.Syntax.Stmt { switch stmt := stmt.(type) { @@ -858,3 +1026,38 @@ func (f *File) removeDups() { } f.Syntax.Stmt = stmts } + +// lineLess returns whether li should be sorted before lj. It sorts +// lexicographically without assigning any special meaning to tokens. +func lineLess(li, lj *Line) bool { + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) +} + +// lineRetractLess returns whether li should be sorted before lj for lines in +// a "retract" block. It treats each line as a version interval. Single versions +// are compared as if they were intervals with the same low and high version. +// Intervals are sorted in descending order, first by low version, then by +// high version, using semver.Compare. +func lineRetractLess(li, lj *Line) bool { + interval := func(l *Line) VersionInterval { + if len(l.Token) == 1 { + return VersionInterval{Low: l.Token[0], High: l.Token[0]} + } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" { + return VersionInterval{Low: l.Token[1], High: l.Token[3]} + } else { + // Line in unknown format. Treat as an invalid version. + return VersionInterval{} + } + } + vii := interval(li) + vij := interval(lj) + if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { + return cmp > 0 + } + return semver.Compare(vii.High, vij.High) > 0 +} diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index 7272f04ff3..ab2f81a66b 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -29,7 +29,7 @@ golang.org/x/arch/x86/x86asm golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/ssh/terminal -# golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231 +# golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8 ## explicit golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile -- cgit v1.2.3-54-g00ecf From db821b54d1a8dffa85a9a3cf599f83a19184f020 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 15 Apr 2020 12:08:24 -0400 Subject: cmd/go/internal/modload: refactor version filtering for exclude Query and other functions now accept an "allowed" function that returns an error (previously, the function returned a bool). If the error is equivalent to ErrDisallowed, it indicates the version is excluded (or, in a future CL, retracted). This provides predicates a chance to explain why a version is not allowed. When a query refers to a specific revision (by version, branch, tag, or commit name), most callers will not use the Allowed predicate. This allows commands like 'go list -m' and 'go mod download' to handle disallowed versions when explicitly requested. 'go get' will reject excluded versions though. When a query does not refer to a specific revision (for example, "latest"), disallowed versions will not be considered. When an "allowed" predicate returns an error not equivalent to ErrDisallowed, it may be ignored or returned, depending on the case. This never happens for excluded versions, but it may happen for retractions (in a future CL). This indicates a list of retractions could not be loaded. This frequently happens when offline, and it shouldn't cause a fatal or warning in most cases. For #24031 Change-Id: I4df6fb6bd60e3e0259e5b3b4bf71a307b4b32298 Reviewed-on: https://go-review.googlesource.com/c/go/+/228379 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modget/get.go | 6 +- src/cmd/go/internal/modload/build.go | 6 +- src/cmd/go/internal/modload/import.go | 2 +- src/cmd/go/internal/modload/list.go | 9 +- src/cmd/go/internal/modload/modfile.go | 38 ++++++-- src/cmd/go/internal/modload/mvs.go | 28 ++++-- src/cmd/go/internal/modload/query.go | 111 +++++++++++++++-------- src/cmd/go/internal/modload/query_test.go | 8 +- src/cmd/go/testdata/script/mod_query_exclude.txt | 40 ++++++-- 9 files changed, 177 insertions(+), 71 deletions(-) diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index ee9757912b..06d59d9e0d 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -812,7 +812,7 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc } } - info, err := modload.Query(ctx, path, vers, prevM.Version, modload.Allowed) + info, err := modload.Query(ctx, path, vers, prevM.Version, modload.CheckAllowed) if err == nil { if info.Version != vers && info.Version != prevM.Version { logOncef("go: %s %s => %s", path, vers, info.Version) @@ -838,7 +838,7 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc // If it turns out to only exist as a module, we can detect the resulting // PackageNotInModuleError and avoid a second round-trip through (potentially) // all of the configured proxies. - results, err := modload.QueryPattern(ctx, path, vers, modload.Allowed) + results, err := modload.QueryPattern(ctx, path, vers, modload.CheckAllowed) if err != nil { // If the path doesn't contain a wildcard, check whether it was actually a // module path instead. If so, return that. @@ -994,7 +994,7 @@ func (u *upgrader) Upgrade(m module.Version) (module.Version, error) { // If we're querying "upgrade" or "patch", Query will compare the current // version against the chosen version and will return the current version // if it is newer. - info, err := modload.Query(context.TODO(), m.Path, string(getU), m.Version, modload.Allowed) + info, err := modload.Query(context.TODO(), m.Path, string(getU), m.Version, modload.CheckAllowed) if err != nil { // Report error but return m, to let version selection continue. // (Reporting the error will fail the command at the next base.ExitIfErrors.) diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 7e182b4a4d..a29e085875 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -90,7 +90,7 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { return } - if info, err := Query(ctx, m.Path, "upgrade", m.Version, Allowed); err == nil && semver.Compare(info.Version, m.Version) > 0 { + if info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed); err == nil && semver.Compare(info.Version, m.Version) > 0 { m.Update = &modinfo.ModulePublic{ Path: m.Path, Version: info.Version, @@ -100,8 +100,8 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { } // addVersions fills in m.Versions with the list of known versions. -func addVersions(m *modinfo.ModulePublic) { - m.Versions, _ = versions(m.Path) +func addVersions(ctx context.Context, m *modinfo.ModulePublic) { + m.Versions, _ = versions(ctx, m.Path, CheckAllowed) } func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modinfo.ModulePublic { diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 5c51a79124..6459e716b7 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -286,7 +286,7 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) - candidates, err := QueryPackage(ctx, path, "latest", Allowed) + candidates, err := QueryPackage(ctx, path, "latest", CheckAllowed) if err != nil { if errors.Is(err, os.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 7bf4e86c8d..2f549540a6 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -34,7 +34,7 @@ func ListModules(ctx context.Context, args []string, listU, listVersions bool) [ addUpdate(ctx, m) } if listVersions { - addVersions(m) + addVersions(ctx, m) } <-sem }() @@ -83,7 +83,12 @@ func listModules(ctx context.Context, args []string, listVersions bool) []*modin } } - info, err := Query(ctx, path, vers, current, nil) + allowed := CheckAllowed + if IsRevisionQuery(vers) { + // Allow excluded versions if the user asked for a specific revision. + allowed = nil + } + info, err := Query(ctx, path, vers, current, allowed) if err != nil { mods = append(mods, &modinfo.ModulePublic{ Path: path, diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index c04e2add13..aed1f0a36b 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -5,15 +5,17 @@ package modload import ( + "context" + "errors" + "fmt" + "path/filepath" + "sync" + "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch" "cmd/go/internal/par" - "errors" - "fmt" - "path/filepath" - "sync" "golang.org/x/mod/modfile" "golang.org/x/mod/module" @@ -41,11 +43,33 @@ type requireMeta struct { indirect bool } -// Allowed reports whether module m is allowed (not excluded) by the main module's go.mod. -func Allowed(m module.Version) bool { - return index == nil || !index.exclude[m] +// CheckAllowed returns an error equivalent to ErrDisallowed if m is excluded by +// the main module's go.mod. Most version queries use this to filter out +// versions that should not be used. +func CheckAllowed(ctx context.Context, m module.Version) error { + return CheckExclusions(ctx, m) +} + +// ErrDisallowed is returned by version predicates passed to Query and similar +// functions to indicate that a version should not be considered. +var ErrDisallowed = errors.New("disallowed module version") + +// CheckExclusions returns an error equivalent to ErrDisallowed if module m is +// excluded by the main module's go.mod file. +func CheckExclusions(ctx context.Context, m module.Version) error { + if index != nil && index.exclude[m] { + return module.VersionError(m, errExcluded) + } + return nil } +var errExcluded = &excludedError{} + +type excludedError struct{} + +func (e *excludedError) Error() string { return "excluded by go.mod" } +func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } + // Replacement returns the replacement for mod, if any, from go.mod. // If there is no replacement for mod, Replacement returns // a module.Version with Path == "". diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 6b6ad945e4..d023ab5094 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -6,6 +6,7 @@ package modload import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -73,16 +74,29 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) { return m, nil } -func versions(path string) ([]string, error) { +func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string, error) { // Note: modfetch.Lookup and repo.Versions are cached, // so there's no need for us to add extra caching here. var versions []string err := modfetch.TryProxies(func(proxy string) error { repo, err := modfetch.Lookup(proxy, path) - if err == nil { - versions, err = repo.Versions("") + if err != nil { + return err } - return err + allVersions, err := repo.Versions("") + if err != nil { + return err + } + allowedVersions := make([]string, 0, len(allVersions)) + for _, v := range allVersions { + if err := allowed(ctx, module.Version{Path: path, Version: v}); err == nil { + allowedVersions = append(allowedVersions, v) + } else if !errors.Is(err, ErrDisallowed) { + return err + } + } + versions = allowedVersions + return nil }) return versions, err } @@ -90,7 +104,8 @@ func versions(path string) ([]string, error) { // Previous returns the tagged version of m.Path immediately prior to // m.Version, or version "none" if no prior version is tagged. func (*mvsReqs) Previous(m module.Version) (module.Version, error) { - list, err := versions(m.Path) + // TODO(golang.org/issue/38714): thread tracing context through MVS. + list, err := versions(context.TODO(), m.Path, CheckAllowed) if err != nil { return module.Version{}, err } @@ -105,7 +120,8 @@ func (*mvsReqs) Previous(m module.Version) (module.Version, error) { // It is only used by the exclusion processing in the Required method, // not called directly by MVS. func (*mvsReqs) next(m module.Version) (module.Version, error) { - list, err := versions(m.Path) + // TODO(golang.org/issue/38714): thread tracing context through MVS. + list, err := versions(context.TODO(), m.Path, CheckAllowed) if err != nil { return module.Version{}, err } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index e82eb1506f..f67a738677 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -52,12 +52,16 @@ import ( // version that would otherwise be chosen. This prevents accidental downgrades // from newer pre-release or development versions. // -// If the allowed function is non-nil, Query excludes any versions for which -// allowed returns false. +// The allowed function (which may be nil) is used to filter out unsuitable +// versions (see AllowedFunc documentation for details). If the query refers to +// a specific revision (for example, "master"; see IsRevisionQuery), and the +// revision is disallowed by allowed, Query returns the error. If the query +// does not refer to a specific revision (for example, "latest"), Query +// acts as if versions disallowed by allowed do not exist. // // If path is the path of the main module and the query is "latest", // Query returns Target.Version as the version. -func Query(ctx context.Context, path, query, current string, allowed func(module.Version) bool) (*modfetch.RevInfo, error) { +func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { var info *modfetch.RevInfo err := modfetch.TryProxies(func(proxy string) (err error) { info, err = queryProxy(ctx, proxy, path, query, current, allowed) @@ -66,6 +70,17 @@ func Query(ctx context.Context, path, query, current string, allowed func(module return info, err } +// AllowedFunc is used by Query and other functions to filter out unsuitable +// versions, for example, those listed in exclude directives in the main +// module's go.mod file. +// +// An AllowedFunc returns an error equivalent to ErrDisallowed for an unsuitable +// version. Any other error indicates the function was unable to determine +// whether the version should be allowed, for example, the function was unable +// to fetch or parse a go.mod file containing retractions. Typically, errors +// other than ErrDisallowd may be ignored. +type AllowedFunc func(context.Context, module.Version) error + var errQueryDisabled error = queryDisabledError{} type queryDisabledError struct{} @@ -77,7 +92,7 @@ func (queryDisabledError) Error() string { return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } -func queryProxy(ctx context.Context, proxy, path, query, current string, allowed func(module.Version) bool) (*modfetch.RevInfo, error) { +func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query) defer span.Done() @@ -88,7 +103,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, errQueryDisabled } if allowed == nil { - allowed = func(module.Version) bool { return true } + allowed = func(context.Context, module.Version) error { return nil } } // Parse query to detect parse errors (and possibly handle query) @@ -104,7 +119,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return module.CheckPathMajor(v, pathMajor) == nil } var ( - ok func(module.Version) bool + match = func(m module.Version) bool { return true } + prefix string preferOlder bool mayUseLatest bool @@ -112,21 +128,18 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed ) switch { case query == "latest": - ok = allowed mayUseLatest = true case query == "upgrade": - ok = allowed mayUseLatest = true case query == "patch": if current == "" { - ok = allowed mayUseLatest = true } else { prefix = semver.MajorMinor(current) - ok = func(m module.Version) bool { - return matchSemverPrefix(prefix, m.Version) && allowed(m) + match = func(m module.Version) bool { + return matchSemverPrefix(prefix, m.Version) } } @@ -139,8 +152,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed // Refuse to say whether <=v1.2 allows v1.2.3 (remember, @v1.2 might mean v1.2.3). return nil, fmt.Errorf("ambiguous semantic version %q in range %q", v, query) } - ok = func(m module.Version) bool { - return semver.Compare(m.Version, v) <= 0 && allowed(m) + match = func(m module.Version) bool { + return semver.Compare(m.Version, v) <= 0 } if !matchesMajor(v) { preferIncompatible = true @@ -151,8 +164,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed if !semver.IsValid(v) { return badVersion(v) } - ok = func(m module.Version) bool { - return semver.Compare(m.Version, v) < 0 && allowed(m) + match = func(m module.Version) bool { + return semver.Compare(m.Version, v) < 0 } if !matchesMajor(v) { preferIncompatible = true @@ -163,8 +176,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed if !semver.IsValid(v) { return badVersion(v) } - ok = func(m module.Version) bool { - return semver.Compare(m.Version, v) >= 0 && allowed(m) + match = func(m module.Version) bool { + return semver.Compare(m.Version, v) >= 0 } preferOlder = true if !matchesMajor(v) { @@ -180,8 +193,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed // Refuse to say whether >v1.2 allows v1.2.3 (remember, @v1.2 might mean v1.2.3). return nil, fmt.Errorf("ambiguous semantic version %q in range %q", v, query) } - ok = func(m module.Version) bool { - return semver.Compare(m.Version, v) > 0 && allowed(m) + match = func(m module.Version) bool { + return semver.Compare(m.Version, v) > 0 } preferOlder = true if !matchesMajor(v) { @@ -189,8 +202,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed } case semver.IsValid(query) && isSemverPrefix(query): - ok = func(m module.Version) bool { - return matchSemverPrefix(query, m.Version) && allowed(m) + match = func(m module.Version) bool { + return matchSemverPrefix(query, m.Version) } prefix = query + "." if !matchesMajor(query) { @@ -219,8 +232,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, queryErr } } - if !allowed(module.Version{Path: path, Version: info.Version}) { - return nil, fmt.Errorf("%s@%s excluded", path, info.Version) + if err := allowed(ctx, module.Version{Path: path, Version: info.Version}); errors.Is(err, ErrDisallowed) { + return nil, err } return info, nil } @@ -229,8 +242,8 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed if query != "latest" { return nil, fmt.Errorf("can't query specific version (%q) for the main module (%s)", query, path) } - if !allowed(Target) { - return nil, fmt.Errorf("internal error: main module version is not allowed") + if err := allowed(ctx, Target); err != nil { + return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err) } return &modfetch.RevInfo{Version: Target.Version}, nil } @@ -248,7 +261,13 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed if err != nil { return nil, err } - releases, prereleases, err := filterVersions(ctx, path, versions, ok, preferIncompatible) + matchAndAllowed := func(ctx context.Context, m module.Version) error { + if !match(m) { + return ErrDisallowed + } + return allowed(ctx, m) + } + releases, prereleases, err := filterVersions(ctx, path, versions, matchAndAllowed, preferIncompatible) if err != nil { return nil, err } @@ -288,11 +307,12 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed } if mayUseLatest { - // Special case for "latest": if no tags match, use latest commit in repo, - // provided it is not excluded. + // Special case for "latest": if no tags match, use latest commit in repo + // if it is allowed. latest, err := repo.Latest() if err == nil { - if allowed(module.Version{Path: path, Version: latest.Version}) { + m := module.Version{Path: path, Version: latest.Version} + if err := allowed(ctx, m); !errors.Is(err, ErrDisallowed) { return lookup(latest.Version) } } else if !errors.Is(err, os.ErrNotExist) { @@ -303,6 +323,22 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, &NoMatchingVersionError{query: query, current: current} } +// IsRevisionQuery returns true if vers is a version query that may refer to +// a particular version or revision in a repository like "v1.0.0", "master", +// or "0123abcd". IsRevisionQuery returns false if vers is a query that +// chooses from among available versions like "latest" or ">v1.0.0". +func IsRevisionQuery(vers string) bool { + if vers == "latest" || + vers == "upgrade" || + vers == "patch" || + strings.HasPrefix(vers, "<") || + strings.HasPrefix(vers, ">") || + (semver.IsValid(vers) && isSemverPrefix(vers)) { + return false + } + return true +} + // isSemverPrefix reports whether v is a semantic version prefix: v1 or v1.2 (not v1.2.3). // The caller is assumed to have checked that semver.IsValid(v) is true. func isSemverPrefix(v string) bool { @@ -329,13 +365,16 @@ func matchSemverPrefix(p, v string) bool { // filterVersions classifies versions into releases and pre-releases, filtering // out: -// 1. versions that do not satisfy the 'ok' predicate, and +// 1. versions that do not satisfy the 'allowed' predicate, and // 2. "+incompatible" versions, if a compatible one satisfies the predicate // and the incompatible version is not preferred. -func filterVersions(ctx context.Context, path string, versions []string, ok func(module.Version) bool, preferIncompatible bool) (releases, prereleases []string, err error) { +// +// If the allowed predicate returns an error not equivalent to ErrDisallowed, +// filterVersions returns that error. +func filterVersions(ctx context.Context, path string, versions []string, allowed AllowedFunc, preferIncompatible bool) (releases, prereleases []string, err error) { var lastCompatible string for _, v := range versions { - if !ok(module.Version{Path: path, Version: v}) { + if err := allowed(ctx, module.Version{Path: path, Version: v}); errors.Is(err, ErrDisallowed) { continue } @@ -385,7 +424,7 @@ type QueryResult struct { // If the package is in the main module, QueryPackage considers only the main // module and only the version "latest", without checking for other possible // modules. -func QueryPackage(ctx context.Context, path, query string, allowed func(module.Version) bool) ([]QueryResult, error) { +func QueryPackage(ctx context.Context, path, query string, allowed AllowedFunc) ([]QueryResult, error) { m := search.NewMatch(path) if m.IsLocal() || !m.IsLiteral() { return nil, fmt.Errorf("pattern %s is not an importable package", path) @@ -406,7 +445,7 @@ func QueryPackage(ctx context.Context, path, query string, allowed func(module.V // If any matching package is in the main module, QueryPattern considers only // the main module and only the version "latest", without checking for other // possible modules. -func QueryPattern(ctx context.Context, pattern, query string, allowed func(module.Version) bool) ([]QueryResult, error) { +func QueryPattern(ctx context.Context, pattern, query string, allowed AllowedFunc) ([]QueryResult, error) { ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query) defer span.Done() @@ -450,8 +489,8 @@ func QueryPattern(ctx context.Context, pattern, query string, allowed func(modul if query != "latest" { return nil, fmt.Errorf("can't query specific version for package %s in the main module (%s)", pattern, Target.Path) } - if !allowed(Target) { - return nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed", pattern, Target.Path) + if err := allowed(ctx, Target); err != nil { + return nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed: %w", pattern, Target.Path, err) } return []QueryResult{{ Mod: Target, diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go index 77080e9b5b..351826f2ab 100644 --- a/src/cmd/go/internal/modload/query_test.go +++ b/src/cmd/go/internal/modload/query_test.go @@ -187,9 +187,11 @@ func TestQuery(t *testing.T) { if allow == "" { allow = "*" } - allowed := func(m module.Version) bool { - ok, _ := path.Match(allow, m.Version) - return ok + allowed := func(ctx context.Context, m module.Version) error { + if ok, _ := path.Match(allow, m.Version); !ok { + return ErrDisallowed + } + return nil } tt := tt t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) { diff --git a/src/cmd/go/testdata/script/mod_query_exclude.txt b/src/cmd/go/testdata/script/mod_query_exclude.txt index a64a8e1086..742c6f17e3 100644 --- a/src/cmd/go/testdata/script/mod_query_exclude.txt +++ b/src/cmd/go/testdata/script/mod_query_exclude.txt @@ -1,23 +1,43 @@ env GO111MODULE=on +# list excluded version +go list -modfile=go.exclude.mod -m rsc.io/quote@v1.5.0 +stdout '^rsc.io/quote v1.5.0$' + +# list versions should not print excluded versions +go list -m -versions rsc.io/quote +stdout '\bv1.5.0\b' +go list -modfile=go.exclude.mod -m -versions rsc.io/quote +! stdout '\bv1.5.0\b' + +# list query with excluded version +go list -m rsc.io/quote@>=v1.5 +stdout '^rsc.io/quote v1.5.0$' +go list -modfile=go.exclude.mod -m rsc.io/quote@>=v1.5 +stdout '^rsc.io/quote v1.5.1$' + # get excluded version -cp go.mod1 go.mod -! go get rsc.io/quote@v1.5.0 -stderr 'rsc.io/quote@v1.5.0 excluded' +cp go.exclude.mod go.exclude.mod.orig +! go get -modfile=go.exclude.mod -d rsc.io/quote@v1.5.0 +stderr '^go get rsc.io/quote@v1.5.0: rsc.io/quote@v1.5.0: excluded by go.mod$' # get non-excluded version -cp go.mod1 go.mod -go get rsc.io/quote@v1.5.1 +cp go.exclude.mod.orig go.exclude.mod +go get -modfile=go.exclude.mod -d rsc.io/quote@v1.5.1 stderr 'rsc.io/quote v1.5.1' -# get range with excluded version -cp go.mod1 go.mod -go get rsc.io/quote@>=v1.5 -go list -m ...quote +# get query with excluded version +cp go.exclude.mod.orig go.exclude.mod +go get -modfile=go.exclude.mod -d rsc.io/quote@>=v1.5 +go list -modfile=go.exclude.mod -m ...quote stdout 'rsc.io/quote v1.5.[1-9]' --- go.mod1 -- +-- go.mod -- module x + +-- go.exclude.mod -- +module x + exclude rsc.io/quote v1.5.0 -- x.go -- -- cgit v1.2.3-54-g00ecf From c769f034d796769ad10fc03fe6866b36039d1a09 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 15 Apr 2020 13:56:09 -0400 Subject: cmd/go/internal/modload: support go.mod retract directive The go command now recognizes 'retract' directives in go.mod. A retract directive may be used by a module author to indicate a version should not be used. The go command will not automatically upgrade to a retracted version. Retracted versions will not be considered when resolving version queries like "latest" that don't refer to a specific version. Internally, when the go command resolves a version query, it will find the highest release version (or pre-release if no release is available), then it will load retractions from the go.mod file for that version. Comments on retractions are treated as a rationale and may appear in error messages. Retractions are only loaded when a query is resolved, so this should have no impact on performance for most builds, except when go.mod is incomplete. For #24031 Change-Id: I17d643b9e03a3445676dbf1a5a351090c6ff6914 Reviewed-on: https://go-review.googlesource.com/c/go/+/228380 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/alldocs.go | 8 +- src/cmd/go/internal/modinfo/info.go | 17 ++- src/cmd/go/internal/modload/help.go | 8 +- src/cmd/go/internal/modload/list.go | 3 +- src/cmd/go/internal/modload/modfile.go | 146 ++++++++++++++++++++- .../example.com_retract_self_prev_v1.0.0-bad.txt | 14 ++ .../mod/example.com_retract_self_prev_v1.1.0.txt | 14 ++ .../mod/example.com_retract_self_prev_v1.9.0.txt | 18 +++ .../mod/example.com_retract_v1.0.0-bad.txt | 10 ++ .../mod/example.com_retract_v1.0.0-good.txt | 10 ++ .../mod/example.com_retract_v1.0.0-unused.txt | 10 ++ .../go/testdata/mod/example.com_retract_v1.1.0.txt | 13 ++ src/cmd/go/testdata/script/mod_download.txt | 64 +++++---- src/cmd/go/testdata/script/mod_retract.txt | 40 ++++++ src/cmd/go/testdata/script/mod_sumdb_golang.txt | 12 +- 15 files changed, 340 insertions(+), 47 deletions(-) create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt create mode 100644 src/cmd/go/testdata/script/mod_retract.txt diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 68bad3cff1..f50529c4f2 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -1894,15 +1894,17 @@ // require new/thing/v2 v2.3.4 // exclude old/thing v1.2.3 // replace bad/thing v1.4.5 => good/thing v1.4.5 +// retract v1.5.6 // // The verbs are // module, to define the module path; // go, to set the expected language version; // require, to require a particular module at a given version or later; -// exclude, to exclude a particular module version from use; and -// replace, to replace a module version with a different module version. +// exclude, to exclude a particular module version from use; +// replace, to replace a module version with a different module version; and +// retract, to indicate a previously released version should not be used. // Exclude and replace apply only in the main module's go.mod and are ignored -// in dependencies. See https://research.swtch.com/vgo-mvs for details. +// in dependencies. See https://golang.org/ref/mod for details. // // The leading verb can be factored out of adjacent lines to create a block, // like in Go imports: diff --git a/src/cmd/go/internal/modinfo/info.go b/src/cmd/go/internal/modinfo/info.go index 07248d1a61..897be56397 100644 --- a/src/cmd/go/internal/modinfo/info.go +++ b/src/cmd/go/internal/modinfo/info.go @@ -21,6 +21,7 @@ type ModulePublic struct { Dir string `json:",omitempty"` // directory holding local copy of files, if any GoMod string `json:",omitempty"` // path to go.mod file describing module, if any GoVersion string `json:",omitempty"` // go version used in module + Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) Error *ModuleError `json:",omitempty"` // error loading module } @@ -30,18 +31,26 @@ type ModuleError struct { func (m *ModulePublic) String() string { s := m.Path + versionString := func(mm *ModulePublic) string { + v := mm.Version + if len(mm.Retracted) == 0 { + return v + } + return v + " (retracted)" + } + if m.Version != "" { - s += " " + m.Version + s += " " + versionString(m) if m.Update != nil { - s += " [" + m.Update.Version + "]" + s += " [" + versionString(m.Update) + "]" } } if m.Replace != nil { s += " => " + m.Replace.Path if m.Replace.Version != "" { - s += " " + m.Replace.Version + s += " " + versionString(m.Replace) if m.Replace.Update != nil { - s += " [" + m.Replace.Update.Version + "]" + s += " [" + versionString(m.Replace.Update) + "]" } } } diff --git a/src/cmd/go/internal/modload/help.go b/src/cmd/go/internal/modload/help.go index d80206b194..37f23d967f 100644 --- a/src/cmd/go/internal/modload/help.go +++ b/src/cmd/go/internal/modload/help.go @@ -432,15 +432,17 @@ verb followed by arguments. For example: require new/thing/v2 v2.3.4 exclude old/thing v1.2.3 replace bad/thing v1.4.5 => good/thing v1.4.5 + retract v1.5.6 The verbs are module, to define the module path; go, to set the expected language version; require, to require a particular module at a given version or later; - exclude, to exclude a particular module version from use; and - replace, to replace a module version with a different module version. + exclude, to exclude a particular module version from use; + replace, to replace a module version with a different module version; and + retract, to indicate a previously released version should not be used. Exclude and replace apply only in the main module's go.mod and are ignored -in dependencies. See https://research.swtch.com/vgo-mvs for details. +in dependencies. See https://golang.org/ref/mod for details. The leading verb can be factored out of adjacent lines to create a block, like in Go imports: diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 2f549540a6..a3461eea26 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -85,7 +85,8 @@ func listModules(ctx context.Context, args []string, listVersions bool) []*modin allowed := CheckAllowed if IsRevisionQuery(vers) { - // Allow excluded versions if the user asked for a specific revision. + // Allow excluded and retracted versions if the user asked for a + // specific revision. allowed = nil } info, err := Query(ctx, path, vers, current, allowed) diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index aed1f0a36b..0b135c5fb5 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -9,13 +9,16 @@ import ( "errors" "fmt" "path/filepath" + "strings" "sync" + "unicode" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch" "cmd/go/internal/par" + "cmd/go/internal/trace" "golang.org/x/mod/modfile" "golang.org/x/mod/module" @@ -44,10 +47,16 @@ type requireMeta struct { } // CheckAllowed returns an error equivalent to ErrDisallowed if m is excluded by -// the main module's go.mod. Most version queries use this to filter out -// versions that should not be used. +// the main module's go.mod or retracted by its author. Most version queries use +// this to filter out versions that should not be used. func CheckAllowed(ctx context.Context, m module.Version) error { - return CheckExclusions(ctx, m) + if err := CheckExclusions(ctx, m); err != nil { + return err + } + if err := checkRetractions(ctx, m); err != nil { + return err + } + return nil } // ErrDisallowed is returned by version predicates passed to Query and similar @@ -70,6 +79,120 @@ type excludedError struct{} func (e *excludedError) Error() string { return "excluded by go.mod" } func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } +// checkRetractions returns an error if module m has been retracted by +// its author. +func checkRetractions(ctx context.Context, m module.Version) error { + if m.Version == "" { + // Main module, standard library, or file replacement module. + // Cannot be retracted. + return nil + } + + // Look up retraction information from the latest available version of + // the module. Cache retraction information so we don't parse the go.mod + // file repeatedly. + type entry struct { + retract []retraction + err error + } + path := m.Path + e := retractCache.Do(path, func() (v interface{}) { + ctx, span := trace.StartSpan(ctx, "checkRetractions "+path) + defer span.Done() + + if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + // All versions of the module were replaced with a local directory. + // Don't load retractions. + return &entry{nil, nil} + } + + // Find the latest version of the module. + // Ignore exclusions from the main module's go.mod. + // We may need to account for the current version: for example, + // v2.0.0+incompatible is not "latest" if v1.0.0 is current. + rev, err := Query(ctx, path, "latest", findCurrentVersion(path), nil) + if err != nil { + return &entry{err: err} + } + + // Load go.mod for that version. + // If the version is replaced, we'll load retractions from the replacement. + // If there's an error loading the go.mod, we'll return it here. + // These errors should generally be ignored by callers of checkRetractions, + // since they happen frequently when we're offline. These errors are not + // equivalent to ErrDisallowed, so they may be distinguished from + // retraction errors. + summary, err := goModSummary(module.Version{Path: path, Version: rev.Version}) + if err != nil { + return &entry{err: err} + } + return &entry{retract: summary.retract} + }).(*entry) + + if e.err != nil { + return fmt.Errorf("loading module retractions: %v", e.err) + } + + var rationale []string + isRetracted := false + for _, r := range e.retract { + if semver.Compare(r.Low, m.Version) <= 0 && semver.Compare(m.Version, r.High) <= 0 { + isRetracted = true + if r.Rationale != "" { + rationale = append(rationale, r.Rationale) + } + } + } + if isRetracted { + return &retractedError{rationale: rationale} + } + return nil +} + +var retractCache par.Cache + +type retractedError struct { + rationale []string +} + +func (e *retractedError) Error() string { + msg := "retracted by module author" + if len(e.rationale) > 0 { + // This is meant to be a short error printed on a terminal, so just + // print the first rationale. + msg += ": " + ShortRetractionRationale(e.rationale[0]) + } + return msg +} + +func (e *retractedError) Is(err error) bool { + return err == ErrDisallowed +} + +// ShortRetractionRationale returns a retraction rationale string that is safe +// to print in a terminal. It returns hard-coded strings if the rationale +// is empty, too long, or contains non-printable characters. +func ShortRetractionRationale(rationale string) string { + const maxRationaleBytes = 500 + if i := strings.Index(rationale, "\n"); i >= 0 { + rationale = rationale[:i] + } + rationale = strings.TrimSpace(rationale) + if rationale == "" { + return "retracted by module author" + } + if len(rationale) > maxRationaleBytes { + return "(rationale omitted: too long)" + } + for _, r := range rationale { + if !unicode.IsGraphic(r) && !unicode.IsSpace(r) { + return "(rationale omitted: contains non-printable characters)" + } + } + // NOTE: the go.mod parser rejects invalid UTF-8, so we don't check that here. + return rationale +} + // Replacement returns the replacement for mod, if any, from go.mod. // If there is no replacement for mod, Replacement returns // a module.Version with Path == "". @@ -210,6 +333,14 @@ type modFileSummary struct { module module.Version goVersionV string // GoVersion with "v" prefix require []module.Version + retract []retraction +} + +// A retraction consists of a retracted version interval and rationale. +// retraction is like modfile.Retract, but it doesn't point to the syntax tree. +type retraction struct { + modfile.VersionInterval + Rationale string } // goModSummary returns a summary of the go.mod file for module m, @@ -363,6 +494,15 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { summary.require = append(summary.require, req.Mod) } } + if len(f.Retract) > 0 { + summary.retract = make([]retraction, 0, len(f.Retract)) + for _, ret := range f.Retract { + summary.retract = append(summary.retract, retraction{ + VersionInterval: ret.VersionInterval, + Rationale: ret.Rationale, + }) + } + } return summary, nil } diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt b/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt new file mode 100644 index 0000000000..095063d69b --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt @@ -0,0 +1,14 @@ +See example.com_retract_self_prev_v1.9.0.txt. + +This version is retracted. + +-- .mod -- +module example.com/retract/self/prev + +go 1.15 + +-- .info -- +{"Version":"v1.0.0-bad"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt b/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt new file mode 100644 index 0000000000..27c3a39065 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt @@ -0,0 +1,14 @@ +See example.com_retract_self_pref_v1.9.0.txt. + +This version is the latest (only) non-retracted version. + +-- .mod -- +module example.com/retract/self/prev + +go 1.15 + +-- .info -- +{"Version":"v1.1.0"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt new file mode 100644 index 0000000000..03d6168f0d --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt @@ -0,0 +1,18 @@ +Module example.com/retract/self/prev is a module that retracts its own +latest version, as well as an earlier version. + +A previous unretracted release version, v1.1.0, is still available. + +-- .mod -- +module example.com/retract/self/prev + +go 1.15 + +retract v1.0.0-bad // bad +retract v1.9.0 // self + +-- .info -- +{"Version":"v1.9.0"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt b/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt new file mode 100644 index 0000000000..2f996cfc36 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt @@ -0,0 +1,10 @@ +-- .mod -- +module example.com/retract + +go 1.15 + +-- .info -- +{"Version":"v1.0.0-bad"} + +-- retract.go -- +package retract diff --git a/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt b/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt new file mode 100644 index 0000000000..78152bba4f --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt @@ -0,0 +1,10 @@ +-- .mod -- +module example.com/retract + +go 1.15 + +-- .info -- +{"Version":"v1.0.0-good"} + +-- retract.go -- +package retract diff --git a/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt b/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt new file mode 100644 index 0000000000..3bc9e35b7c --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt @@ -0,0 +1,10 @@ +-- .mod -- +module example.com/retract + +go 1.15 + +-- .info -- +{"Version":"v1.0.0-unused"} + +-- retract.go -- +package retract diff --git a/src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt b/src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt new file mode 100644 index 0000000000..18d6d832e2 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt @@ -0,0 +1,13 @@ +-- .mod -- +module example.com/retract + +go 1.15 + +retract v1.0.0-bad // bad +retract v1.0.0-unused // bad + +-- .info -- +{"Version":"v1.1.0"} + +-- retract.go -- +package retract diff --git a/src/cmd/go/testdata/script/mod_download.txt b/src/cmd/go/testdata/script/mod_download.txt index bb5c4627db..5acb83266b 100644 --- a/src/cmd/go/testdata/script/mod_download.txt +++ b/src/cmd/go/testdata/script/mod_download.txt @@ -1,13 +1,15 @@ env GO111MODULE=on -# download with version should print nothing +# download with version should print nothing. +# It should not load retractions from the .mod file from the latest version. go mod download rsc.io/quote@v1.5.0 ! stdout . ! stderr . - exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod # download of an invalid path should report the error [short] skip @@ -31,53 +33,59 @@ stdout '^\t"GoModSum": "h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe\+TKr0="' go list -m all ! stdout rsc.io -# add to go.mod so we can test non-query downloads -go mod edit -require rsc.io/quote@v1.5.2 -! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info -! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod +# download query should have downloaded go.mod for the highest release version +# in order to find retractions when resolving the query '@<=v1.5.0'. +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip +# add to go.mod so we can test non-query downloads +go mod edit -require rsc.io/quote@v1.5.3-pre1 +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.info +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.mod +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip + # module loading will page in the info and mod files go list -m all -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod -! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.mod +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip # download will fetch and unpack the zip file go mod download -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip -exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.mod +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip +exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.3-pre1 # download repopulates deleted files and directories independently. -rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info +rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.info go mod download -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info -rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.info +rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.mod go mod download -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod -rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.mod +rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip go mod download -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip -rm -r $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip +rm -r $GOPATH/pkg/mod/rsc.io/quote@v1.5.3-pre1 go mod download -exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 +exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.3-pre1 # download reports the locations of downloaded files go mod download -json stdout '^\t"Path": "rsc.io/quote"' -stdout '^\t"Version": "v1.5.2"' -stdout '^\t"Info": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.2.info"' -stdout '^\t"GoMod": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.2.mod"' -stdout '^\t"Zip": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.2.zip"' -stdout '^\t"Dir": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)rsc.io(\\\\|/)quote@v1.5.2"' +stdout '^\t"Version": "v1.5.3-pre1"' +stdout '^\t"Info": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.3-pre1.info"' +stdout '^\t"GoMod": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.3-pre1.mod"' +stdout '^\t"Zip": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.3-pre1.zip"' +stdout '^\t"Dir": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)rsc.io(\\\\|/)quote@v1.5.3-pre1"' # download will follow replacements -go mod edit -require rsc.io/quote@v1.5.1 -replace rsc.io/quote@v1.5.1=rsc.io/quote@v1.5.3-pre1 +go mod edit -require rsc.io/quote@v1.5.1 -replace rsc.io/quote@v1.5.1=rsc.io/quote@v1.5.2 go mod download ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.1.zip -exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip # download will not follow replacements for explicit module queries go mod download -json rsc.io/quote@v1.5.1 diff --git a/src/cmd/go/testdata/script/mod_retract.txt b/src/cmd/go/testdata/script/mod_retract.txt new file mode 100644 index 0000000000..5d21902043 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_retract.txt @@ -0,0 +1,40 @@ +cp go.mod go.mod.orig + +# 'go list pkg' does not report an error when a retracted version is used. +go list -e -f '{{if .Error}}{{.Error}}{{end}}' ./use +! stdout . +cmp go.mod go.mod.orig + +# Nor does 'go build'. +[!short] go build ./use +[!short] ! stderr . +[!short] cmp go.mod go.mod.orig + +# Neither 'go list' nor 'go build' should download go.mod from the version +# that would list retractions. +exists $GOPATH/pkg/mod/cache/download/example.com/retract/@v/v1.0.0-bad.mod +! exists $GOPATH/pkg/mod/cache/download/example.com/retract/@v/v1.1.0.mod + +# Importing a package from a module with a retracted latest version will +# select the latest non-retracted version. +go list ./use_self_prev +go list -m example.com/retract/self/prev +stdout '^example.com/retract/self/prev v1.1.0$' +exists $GOPATH/pkg/mod/cache/download/example.com/retract/self/prev/@v/v1.9.0.mod + +-- go.mod -- +module example.com/use + +go 1.15 + +require example.com/retract v1.0.0-bad + +-- use/use.go -- +package use + +import _ "example.com/retract" + +-- use_self_prev/use.go -- +package use_self_prev + +import _ "example.com/retract/self/prev" diff --git a/src/cmd/go/testdata/script/mod_sumdb_golang.txt b/src/cmd/go/testdata/script/mod_sumdb_golang.txt index 40a07fc7e9..d9fb63acb0 100644 --- a/src/cmd/go/testdata/script/mod_sumdb_golang.txt +++ b/src/cmd/go/testdata/script/mod_sumdb_golang.txt @@ -9,7 +9,7 @@ env GOPROXY=https://proxy.golang.org go env GOSUMDB stdout '^sum.golang.org$' -# download direct from github +# Download direct from github. [!net] skip [!exec:git] skip env GOSUMDB=sum.golang.org @@ -17,11 +17,13 @@ env GOPROXY=direct go get -d rsc.io/quote@v1.5.2 cp go.sum saved.sum -# download from proxy.golang.org with go.sum entry already +# Download from proxy.golang.org with go.sum entry already. +# Use 'go list' instead of 'go get' since the latter may download extra go.mod +# files not listed in go.sum. go clean -modcache env GOSUMDB= env GOPROXY= -go get -x -d rsc.io/quote@v1.5.2 +go list -x -deps rsc.io/quote ! stderr github stderr proxy.golang.org/rsc.io/quote ! stderr sum.golang.org/tile @@ -32,7 +34,7 @@ cmp go.sum saved.sum # Should use the checksum database to validate new go.sum lines, # but not need to fetch any new data from the proxy. rm go.sum -go get -x -d rsc.io/quote@v1.5.2 +go list -x rsc.io/quote ! stderr github ! stderr proxy.golang.org/rsc.io/quote stderr sum.golang.org/tile @@ -43,7 +45,7 @@ cmp go.sum saved.sum env TESTGOPROXY404=1 go clean -modcache rm go.sum -go get -x -d rsc.io/quote@v1.5.2 +go list -x rsc.io/quote stderr 'proxy.golang.org.*404 testing' stderr github.com/rsc cmp go.sum saved.sum -- cgit v1.2.3-54-g00ecf From 0bbd386e8bbdf419077d708d3671245fc0f50f0c Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 15 Apr 2020 14:17:08 -0400 Subject: cmd/go: add -retract and -dropretract flags to 'go mod edit' 'go mod edit' can now add and remove 'retract' directives from go.mod files. Also, retractions are now included in the 'go mod edit -json' output. For #24031 Change-Id: Ife7915e259fa508626d6ec5f786b5c860b489599 Reviewed-on: https://go-review.googlesource.com/c/go/+/228381 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/alldocs.go | 18 ++++- src/cmd/go/internal/modcmd/edit.go | 101 +++++++++++++++++++++++++--- src/cmd/go/testdata/script/mod_edit.txt | 114 ++++++++++++++++++++++++++++---- 3 files changed, 208 insertions(+), 25 deletions(-) diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index f50529c4f2..609ede49cd 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -1100,9 +1100,14 @@ // module path and version pair. If the @v is omitted, a replacement without // a version on the left side is dropped. // +// The -retract=version and -dropretract=version flags add and drop a +// retraction on the given version. The version may be a single version +// like "v1.2.3" or a closed interval like "[v1.1.0-v1.1.9]". Note that +// -retract=version is a no-op if that retraction already exists. +// // The -require, -droprequire, -exclude, -dropexclude, -replace, -// and -dropreplace editing flags may be repeated, and the changes -// are applied in the order given. +// -dropreplace, -retract, and -dropretract editing flags may be repeated, +// and the changes are applied in the order given. // // The -go=version flag sets the expected Go language version. // @@ -1136,6 +1141,15 @@ // New Module // } // +// type Retract struct { +// Low string +// High string +// Rationale string +// } +// +// Retract entries representing a single version (not an interval) will have +// the "Low" and "High" fields set to the same value. +// // Note that this only describes the go.mod file itself, not other modules // referred to indirectly. For the full set of modules available to a build, // use 'go list -m -json all'. diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index a81c25270f..18bdd34cd0 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -68,9 +68,14 @@ The -dropreplace=old[@v] flag drops a replacement of the given module path and version pair. If the @v is omitted, a replacement without a version on the left side is dropped. +The -retract=version and -dropretract=version flags add and drop a +retraction on the given version. The version may be a single version +like "v1.2.3" or a closed interval like "[v1.1.0-v1.1.9]". Note that +-retract=version is a no-op if that retraction already exists. + The -require, -droprequire, -exclude, -dropexclude, -replace, -and -dropreplace editing flags may be repeated, and the changes -are applied in the order given. +-dropreplace, -retract, and -dropretract editing flags may be repeated, +and the changes are applied in the order given. The -go=version flag sets the expected Go language version. @@ -104,6 +109,15 @@ writing it back to go.mod. The JSON output corresponds to these Go types: New Module } + type Retract struct { + Low string + High string + Rationale string + } + +Retract entries representing a single version (not an interval) will have +the "Low" and "High" fields set to the same value. + Note that this only describes the go.mod file itself, not other modules referred to indirectly. For the full set of modules available to a build, use 'go list -m -json all'. @@ -137,6 +151,8 @@ func init() { cmdEdit.Flag.Var(flagFunc(flagDropReplace), "dropreplace", "") cmdEdit.Flag.Var(flagFunc(flagReplace), "replace", "") cmdEdit.Flag.Var(flagFunc(flagDropExclude), "dropexclude", "") + cmdEdit.Flag.Var(flagFunc(flagRetract), "retract", "") + cmdEdit.Flag.Var(flagFunc(flagDropRetract), "dropretract", "") work.AddModCommonFlags(cmdEdit) base.AddBuildFlagsNX(&cmdEdit.Flag) @@ -252,12 +268,7 @@ func parsePathVersion(flag, arg string) (path, version string) { base.Fatalf("go mod: -%s=%s: invalid path: %v", flag, arg, err) } - // We don't call modfile.CheckPathVersion, because that insists - // on versions being in semver form, but here we want to allow - // versions like "master" or "1234abcdef", which the go command will resolve - // the next time it runs (or during -fix). - // Even so, we need to make sure the version is a valid token. - if modfile.MustQuote(version) { + if !allowedVersionArg(version) { base.Fatalf("go mod: -%s=%s: invalid version %q", flag, arg, version) } @@ -289,12 +300,48 @@ func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version return path, version, fmt.Errorf("invalid %s path: %v", adj, err) } } - if path != arg && modfile.MustQuote(version) { + if path != arg && !allowedVersionArg(version) { return path, version, fmt.Errorf("invalid %s version: %q", adj, version) } return path, version, nil } +// parseVersionInterval parses a single version like "v1.2.3" or a closed +// interval like "[v1.2.3,v1.4.5]". Note that a single version has the same +// representation as an interval with equal upper and lower bounds: both +// Low and High are set. +func parseVersionInterval(arg string) (modfile.VersionInterval, error) { + if !strings.HasPrefix(arg, "[") { + if !allowedVersionArg(arg) { + return modfile.VersionInterval{}, fmt.Errorf("invalid version: %q", arg) + } + return modfile.VersionInterval{Low: arg, High: arg}, nil + } + if !strings.HasSuffix(arg, "]") { + return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg) + } + s := arg[1 : len(arg)-1] + i := strings.Index(s, ",") + if i < 0 { + return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg) + } + low := strings.TrimSpace(s[:i]) + high := strings.TrimSpace(s[i+1:]) + if !allowedVersionArg(low) || !allowedVersionArg(high) { + return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg) + } + return modfile.VersionInterval{Low: low, High: high}, nil +} + +// allowedVersionArg returns whether a token may be used as a version in go.mod. +// We don't call modfile.CheckPathVersion, because that insists on versions +// being in semver form, but here we want to allow versions like "master" or +// "1234abcdef", which the go command will resolve the next time it runs (or +// during -fix). Even so, we need to make sure the version is a valid token. +func allowedVersionArg(arg string) bool { + return !modfile.MustQuote(arg) +} + // flagRequire implements the -require flag. func flagRequire(arg string) { path, version := parsePathVersion("require", arg) @@ -377,6 +424,32 @@ func flagDropReplace(arg string) { }) } +// flagRetract implements the -retract flag. +func flagRetract(arg string) { + vi, err := parseVersionInterval(arg) + if err != nil { + base.Fatalf("go mod: -retract=%s: %v", arg, err) + } + edits = append(edits, func(f *modfile.File) { + if err := f.AddRetract(vi, ""); err != nil { + base.Fatalf("go mod: -retract=%s: %v", arg, err) + } + }) +} + +// flagDropRetract implements the -dropretract flag. +func flagDropRetract(arg string) { + vi, err := parseVersionInterval(arg) + if err != nil { + base.Fatalf("go mod: -dropretract=%s: %v", arg, err) + } + edits = append(edits, func(f *modfile.File) { + if err := f.DropRetract(vi); err != nil { + base.Fatalf("go mod: -dropretract=%s: %v", arg, err) + } + }) +} + // fileJSON is the -json output data structure. type fileJSON struct { Module module.Version @@ -384,6 +457,7 @@ type fileJSON struct { Require []requireJSON Exclude []module.Version Replace []replaceJSON + Retract []retractJSON } type requireJSON struct { @@ -397,6 +471,12 @@ type replaceJSON struct { New module.Version } +type retractJSON struct { + Low string `json:",omitempty"` + High string `json:",omitempty"` + Rationale string `json:",omitempty"` +} + // editPrintJSON prints the -json output. func editPrintJSON(modFile *modfile.File) { var f fileJSON @@ -415,6 +495,9 @@ func editPrintJSON(modFile *modfile.File) { for _, r := range modFile.Replace { f.Replace = append(f.Replace, replaceJSON{r.Old, r.New}) } + for _, r := range modFile.Retract { + f.Retract = append(f.Retract, retractJSON{r.Low, r.High, r.Rationale}) + } data, err := json.MarshalIndent(&f, "", "\t") if err != nil { base.Fatalf("go: internal error: %v", err) diff --git a/src/cmd/go/testdata/script/mod_edit.txt b/src/cmd/go/testdata/script/mod_edit.txt index 898d8524ac..78485eb86a 100644 --- a/src/cmd/go/testdata/script/mod_edit.txt +++ b/src/cmd/go/testdata/script/mod_edit.txt @@ -16,15 +16,19 @@ cmpenv go.mod $WORK/go.mod.init cmpenv go.mod $WORK/go.mod.init # go mod edits -go mod edit -droprequire=x.1 -require=x.1@v1.0.0 -require=x.2@v1.1.0 -droprequire=x.2 -exclude='x.1 @ v1.2.0' -exclude=x.1@v1.2.1 -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z' +go mod edit -droprequire=x.1 -require=x.1@v1.0.0 -require=x.2@v1.1.0 -droprequire=x.2 -exclude='x.1 @ v1.2.0' -exclude=x.1@v1.2.1 -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z' -retract=v1.6.0 -retract=[v1.1.0,v1.2.0] -retract=[v1.3.0,v1.4.0] -retract=v1.0.0 cmpenv go.mod $WORK/go.mod.edit1 -go mod edit -droprequire=x.1 -dropexclude=x.1@v1.2.1 -dropreplace=x.1@v1.3.0 -require=x.3@v1.99.0 +go mod edit -droprequire=x.1 -dropexclude=x.1@v1.2.1 -dropreplace=x.1@v1.3.0 -require=x.3@v1.99.0 -dropretract=v1.0.0 -dropretract=[v1.1.0,v1.2.0] cmpenv go.mod $WORK/go.mod.edit2 # go mod edit -json go mod edit -json cmpenv stdout $WORK/go.mod.json +# go mod edit -json (retractions with rationales) +go mod edit -json $WORK/go.mod.retractrationale +cmp stdout $WORK/go.mod.retractrationale.json + # go mod edit -json (empty mod file) go mod edit -json $WORK/go.mod.empty cmp stdout $WORK/go.mod.empty.json @@ -40,11 +44,11 @@ cmpenv go.mod $WORK/go.mod.edit5 # go mod edit -fmt cp $WORK/go.mod.badfmt go.mod go mod edit -fmt -print # -print should avoid writing file -cmpenv stdout $WORK/go.mod.edit6 +cmpenv stdout $WORK/go.mod.goodfmt cmp go.mod $WORK/go.mod.badfmt go mod edit -fmt # without -print, should write file (and nothing to stdout) ! stdout . -cmpenv go.mod $WORK/go.mod.edit6 +cmpenv go.mod $WORK/go.mod.goodfmt # go mod edit -module cd $WORK/m @@ -84,6 +88,13 @@ replace ( x.1 v1.3.0 => y.1 v1.4.0 x.1 v1.4.0 => ../z ) + +retract ( + v1.6.0 + [v1.3.0, v1.4.0] + [v1.1.0, v1.2.0] + v1.0.0 +) -- $WORK/go.mod.edit2 -- module x.x/y/z @@ -93,6 +104,11 @@ exclude x.1 v1.2.0 replace x.1 v1.4.0 => ../z +retract ( + v1.6.0 + [v1.3.0, v1.4.0] +) + require x.3 v1.99.0 -- $WORK/go.mod.json -- { @@ -122,6 +138,16 @@ require x.3 v1.99.0 "Path": "../z" } } + ], + "Retract": [ + { + "Low": "v1.6.0", + "High": "v1.6.0" + }, + { + "Low": "v1.3.0", + "High": "v1.4.0" + } ] } -- $WORK/go.mod.edit3 -- @@ -136,6 +162,11 @@ replace ( x.1 v1.4.0 => y.1/v2 v2.3.5 ) +retract ( + v1.6.0 + [v1.3.0, v1.4.0] +) + require x.3 v1.99.0 -- $WORK/go.mod.edit4 -- module x.x/y/z @@ -146,6 +177,11 @@ exclude x.1 v1.2.0 replace x.1 => y.1/v2 v2.3.6 +retract ( + v1.6.0 + [v1.3.0, v1.4.0] +) + require x.3 v1.99.0 -- $WORK/go.mod.edit5 -- module x.x/y/z @@ -154,15 +190,10 @@ go $goversion exclude x.1 v1.2.0 -require x.3 v1.99.0 --- $WORK/go.mod.edit6 -- -module x.x/y/z - -go 1.10 - -exclude x.1 v1.2.0 - -replace x.1 => y.1/v2 v2.3.6 +retract ( + v1.6.0 + [v1.3.0, v1.4.0] +) require x.3 v1.99.0 -- $WORK/local/go.mod.edit -- @@ -183,10 +214,64 @@ exclude x.1 v1.2.0 replace x.1 => y.1/v2 v2.3.6 require x.3 v1.99.0 + +retract [ "v1.8.1" , "v1.8.2" ] +-- $WORK/go.mod.goodfmt -- +module x.x/y/z + +go 1.10 + +exclude x.1 v1.2.0 + +replace x.1 => y.1/v2 v2.3.6 + +require x.3 v1.99.0 + +retract [v1.8.1, v1.8.2] -- $WORK/m/go.mod.edit -- module x.x/y/z go $goversion +-- $WORK/go.mod.retractrationale -- +module x.x/y/z + +go 1.15 + +// a +retract v1.0.0 + +// b +retract ( + v1.0.1 + v1.0.2 // c +) +-- $WORK/go.mod.retractrationale.json -- +{ + "Module": { + "Path": "x.x/y/z" + }, + "Go": "1.15", + "Require": null, + "Exclude": null, + "Replace": null, + "Retract": [ + { + "Low": "v1.0.0", + "High": "v1.0.0", + "Rationale": "a" + }, + { + "Low": "v1.0.1", + "High": "v1.0.1", + "Rationale": "b" + }, + { + "Low": "v1.0.2", + "High": "v1.0.2", + "Rationale": "c" + } + ] +} -- $WORK/go.mod.empty -- -- $WORK/go.mod.empty.json -- { @@ -195,5 +280,6 @@ go $goversion }, "Require": null, "Exclude": null, - "Replace": null + "Replace": null, + "Retract": null } -- cgit v1.2.3-54-g00ecf From eb3e27ac1a9346c7c2669ba2b863811607eddeae Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 15 Apr 2020 14:42:15 -0400 Subject: cmd/go: add -retracted flag to 'go list' The -retracted flag causes 'go list' to load information about retracted module module versions. When -retracted is used with -f or -json, the Retracted field is set to a string containing the reason for the retraction on retracted module versions. The string is based on comments on the retract directive. This field is also populated when the -u flag is used. When -retracted is used with -versions, retracted versions are shown. Normally, they are omitted. For #24031 Change-Id: Ic13d516eddffb1b8404e21034f78cecc9896d1b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/228382 Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/alldocs.go | 15 ++- src/cmd/go/internal/list/list.go | 100 ++++++++++++++++--- src/cmd/go/internal/modcmd/download.go | 5 +- src/cmd/go/internal/modcmd/why.go | 3 +- src/cmd/go/internal/modload/build.go | 50 ++++++++-- src/cmd/go/internal/modload/list.go | 27 +++--- .../mod/example.com_retract_missingmod_v1.0.0.txt | 8 ++ .../mod/example.com_retract_missingmod_v1.9.0.txt | 4 + .../mod/example.com_retract_self_all_v1.9.0.txt | 14 +++ .../example.com_retract_self_prerelease_v1.0.0.txt | 16 +++ .../example.com_retract_self_prerelease_v1.9.0.txt | 19 ++++ ...mple.com_retract_self_prerelease_v1.9.1-pre.txt | 16 +++ ..._self_pseudo_v0.0.0-20200325131415-0123456789ab | 20 ++++ .../example.com_retract_self_pseudo_v1.0.0-bad.txt | 14 +++ .../mod/example.com_retract_self_pseudo_v1.9.0.txt | 16 +++ src/cmd/go/testdata/script/mod_list_pseudo.txt | 21 +--- src/cmd/go/testdata/script/mod_list_retract.txt | 108 +++++++++++++++++++++ src/cmd/go/testdata/script/mod_retract_replace.txt | 51 ++++++++++ 18 files changed, 455 insertions(+), 52 deletions(-) create mode 100644 src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt create mode 100644 src/cmd/go/testdata/script/mod_list_retract.txt create mode 100644 src/cmd/go/testdata/script/mod_retract_replace.txt diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 609ede49cd..98861c8a0d 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -916,6 +916,7 @@ // Dir string // directory holding files for this module, if any // GoMod string // path to go.mod file used when loading this module, if any // GoVersion string // go version used in module +// Retracted string // retraction information, if any (with -retracted or -u) // Error *ModuleError // error loading module // } // @@ -947,14 +948,16 @@ // The -u flag adds information about available upgrades. // When the latest version of a given module is newer than // the current one, list -u sets the Module's Update field -// to information about the newer module. +// to information about the newer module. list -u will also set +// the module's Retracted field if the current version is retracted. // The Module's String method indicates an available upgrade by // formatting the newer version in brackets after the current version. +// If a version is retracted, the string "(retracted)" will follow it. // For example, 'go list -m -u all' might print: // // my/main/module // golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text -// rsc.io/pdf v0.1.1 [v0.1.2] +// rsc.io/pdf v0.1.1 (retracted) [v0.1.2] // // (For tools, 'go list -m -u -json all' may be more convenient to parse.) // @@ -964,6 +967,14 @@ // the default output format to display the module path followed by the // space-separated version list. // +// The -retracted flag causes list to report information about retracted +// module versions. When -retracted is used with -f or -json, the Retracted +// field will be set to a string explaining why the version was retracted. +// The string is taken from comments on the retract directive in the +// module's go.mod file. When -retracted is used with -versions, retracted +// versions are listed together with unretracted versions. The -retracted +// flag may be used with or without -m. +// // The arguments to list -m are interpreted as a list of modules, not packages. // The main module is the module containing the current directory. // The active modules are the main module and its dependencies. diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index e68c39f392..6d81c1cad1 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "os" "sort" @@ -215,6 +216,7 @@ applied to a Go struct, but now a Module struct: Dir string // directory holding files for this module, if any GoMod string // path to go.mod file used when loading this module, if any GoVersion string // go version used in module + Retracted string // retraction information, if any (with -retracted or -u) Error *ModuleError // error loading module } @@ -246,14 +248,16 @@ the replaced source code.) The -u flag adds information about available upgrades. When the latest version of a given module is newer than the current one, list -u sets the Module's Update field -to information about the newer module. +to information about the newer module. list -u will also set +the module's Retracted field if the current version is retracted. The Module's String method indicates an available upgrade by formatting the newer version in brackets after the current version. +If a version is retracted, the string "(retracted)" will follow it. For example, 'go list -m -u all' might print: my/main/module golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text - rsc.io/pdf v0.1.1 [v0.1.2] + rsc.io/pdf v0.1.1 (retracted) [v0.1.2] (For tools, 'go list -m -u -json all' may be more convenient to parse.) @@ -263,6 +267,14 @@ to semantic versioning, earliest to latest. The flag also changes the default output format to display the module path followed by the space-separated version list. +The -retracted flag causes list to report information about retracted +module versions. When -retracted is used with -f or -json, the Retracted +field will be set to a string explaining why the version was retracted. +The string is taken from comments on the retract directive in the +module's go.mod file. When -retracted is used with -versions, retracted +versions are listed together with unretracted versions. The -retracted +flag may be used with or without -m. + The arguments to list -m are interpreted as a list of modules, not packages. The main module is the module containing the current directory. The active modules are the main module and its dependencies. @@ -296,17 +308,18 @@ func init() { } var ( - listCompiled = CmdList.Flag.Bool("compiled", false, "") - listDeps = CmdList.Flag.Bool("deps", false, "") - listE = CmdList.Flag.Bool("e", false, "") - listExport = CmdList.Flag.Bool("export", false, "") - listFmt = CmdList.Flag.String("f", "", "") - listFind = CmdList.Flag.Bool("find", false, "") - listJson = CmdList.Flag.Bool("json", false, "") - listM = CmdList.Flag.Bool("m", false, "") - listU = CmdList.Flag.Bool("u", false, "") - listTest = CmdList.Flag.Bool("test", false, "") - listVersions = CmdList.Flag.Bool("versions", false, "") + listCompiled = CmdList.Flag.Bool("compiled", false, "") + listDeps = CmdList.Flag.Bool("deps", false, "") + listE = CmdList.Flag.Bool("e", false, "") + listExport = CmdList.Flag.Bool("export", false, "") + listFmt = CmdList.Flag.String("f", "", "") + listFind = CmdList.Flag.Bool("find", false, "") + listJson = CmdList.Flag.Bool("json", false, "") + listM = CmdList.Flag.Bool("m", false, "") + listRetracted = CmdList.Flag.Bool("retracted", false, "") + listTest = CmdList.Flag.Bool("test", false, "") + listU = CmdList.Flag.Bool("u", false, "") + listVersions = CmdList.Flag.Bool("versions", false, "") ) var nl = []byte{'\n'} @@ -367,6 +380,16 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } + modload.Init() + if *listRetracted { + if cfg.BuildMod == "vendor" { + base.Fatalf("go list -retracted cannot be used when vendoring is enabled") + } + if !modload.Enabled() { + base.Fatalf("go list -retracted can only be used in module-aware mode") + } + } + if *listM { // Module mode. if *listCompiled { @@ -416,7 +439,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { modload.LoadBuildList(ctx) - mods := modload.ListModules(ctx, args, *listU, *listVersions) + mods := modload.ListModules(ctx, args, *listU, *listVersions, *listRetracted) if !*listE { for _, m := range mods { if m.Error != nil { @@ -607,6 +630,55 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } + // TODO(golang.org/issue/40676): This mechanism could be extended to support + // -u without -m. + if *listRetracted { + // Load retractions for modules that provide packages that will be printed. + // TODO(golang.org/issue/40775): Packages from the same module refer to + // distinct ModulePublic instance. It would be nice if they could all point + // to the same instance. This would require additional global state in + // modload.loaded, so that should be refactored first. For now, we update + // all instances. + modToArg := make(map[*modinfo.ModulePublic]string) + argToMods := make(map[string][]*modinfo.ModulePublic) + var args []string + addModule := func(mod *modinfo.ModulePublic) { + if mod.Version == "" { + return + } + arg := fmt.Sprintf("%s@%s", mod.Path, mod.Version) + if argToMods[arg] == nil { + args = append(args, arg) + } + argToMods[arg] = append(argToMods[arg], mod) + modToArg[mod] = arg + } + for _, p := range pkgs { + if p.Module == nil { + continue + } + addModule(p.Module) + if p.Module.Replace != nil { + addModule(p.Module.Replace) + } + } + + if len(args) > 0 { + listU := false + listVersions := false + rmods := modload.ListModules(ctx, args, listU, listVersions, *listRetracted) + for i, arg := range args { + rmod := rmods[i] + for _, mod := range argToMods[arg] { + mod.Retracted = rmod.Retracted + if rmod.Error != nil && mod.Error == nil { + mod.Error = rmod.Error + } + } + } + } + } + // Record non-identity import mappings in p.ImportMap. for _, p := range pkgs { for i, srcPath := range p.Internal.RawImports { diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index d4c161fca1..41f294d475 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -12,8 +12,8 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" - "cmd/go/internal/modload" "cmd/go/internal/modfetch" + "cmd/go/internal/modload" "cmd/go/internal/work" "golang.org/x/mod/module" @@ -136,9 +136,10 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { var mods []*moduleJSON listU := false listVersions := false + listRetractions := false type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) - for _, info := range modload.ListModules(ctx, args, listU, listVersions) { + for _, info := range modload.ListModules(ctx, args, listU, listVersions, listRetractions) { if info.Replace != nil { info = info.Replace } diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index da33fff89e..b16887d318 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -69,12 +69,13 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { if *whyM { listU := false listVersions := false + listRetractions := false for _, arg := range args { if strings.Contains(arg, "@") { base.Fatalf("go mod why: module query not allowed") } } - mods := modload.ListModules(ctx, args, listU, listVersions) + mods := modload.ListModules(ctx, args, listU, listVersions, listRetractions) byModule := make(map[module.Version][]string) for _, path := range loadALL(ctx) { m := modload.PackageModule(path) diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index a29e085875..e9f9a82fab 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "internal/goroot" "os" @@ -58,7 +59,9 @@ func PackageModuleInfo(pkgpath string) *modinfo.ModulePublic { if !ok { return nil } - return moduleInfo(context.TODO(), m, true) + fromBuildList := true + listRetracted := false + return moduleInfo(context.TODO(), m, fromBuildList, listRetracted) } func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { @@ -66,13 +69,17 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { return nil } + listRetracted := false if i := strings.Index(path, "@"); i >= 0 { - return moduleInfo(ctx, module.Version{Path: path[:i], Version: path[i+1:]}, false) + m := module.Version{Path: path[:i], Version: path[i+1:]} + fromBuildList := false + return moduleInfo(ctx, m, fromBuildList, listRetracted) } for _, m := range BuildList() { if m.Path == path { - return moduleInfo(ctx, m, true) + fromBuildList := true + return moduleInfo(ctx, m, fromBuildList, listRetracted) } } @@ -100,11 +107,37 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { } // addVersions fills in m.Versions with the list of known versions. -func addVersions(ctx context.Context, m *modinfo.ModulePublic) { - m.Versions, _ = versions(ctx, m.Path, CheckAllowed) +// Excluded versions will be omitted. If listRetracted is false, retracted +// versions will also be omitted. +func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) { + allowed := CheckAllowed + if listRetracted { + allowed = CheckExclusions + } + m.Versions, _ = versions(ctx, m.Path, allowed) +} + +// addRetraction fills in m.Retracted if the module was retracted by its author. +// m.Error is set if there's an error loading retraction information. +func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { + if m.Version == "" { + return + } + + err := checkRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) + var rerr *retractedError + if errors.As(err, &rerr) { + if len(rerr.rationale) == 0 { + m.Retracted = []string{"retracted by module author"} + } else { + m.Retracted = rerr.rationale + } + } else if err != nil && m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } } -func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modinfo.ModulePublic { +func moduleInfo(ctx context.Context, m module.Version, fromBuildList, listRetracted bool) *modinfo.ModulePublic { if m == Target { info := &modinfo.ModulePublic{ Path: m.Path, @@ -152,6 +185,10 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi if err == nil { m.Dir = dir } + + if listRetracted { + addRetraction(ctx, m) + } } if m.GoVersion == "" { @@ -205,6 +242,7 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList bool) *modi completeFromModCache(info.Replace) info.Dir = info.Replace.Dir info.GoMod = info.Replace.GoMod + info.Retracted = info.Replace.Retracted } info.GoVersion = info.Replace.GoVersion return info diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index a3461eea26..8c7b9a3950 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -20,12 +20,12 @@ import ( "golang.org/x/mod/module" ) -func ListModules(ctx context.Context, args []string, listU, listVersions bool) []*modinfo.ModulePublic { - mods := listModules(ctx, args, listVersions) +func ListModules(ctx context.Context, args []string, listU, listVersions, listRetracted bool) []*modinfo.ModulePublic { + mods := listModules(ctx, args, listVersions, listRetracted) type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) - if listU || listVersions { + if listU || listVersions || listRetracted { for _, m := range mods { add := func(m *modinfo.ModulePublic) { sem <- token{} @@ -34,7 +34,10 @@ func ListModules(ctx context.Context, args []string, listU, listVersions bool) [ addUpdate(ctx, m) } if listVersions { - addVersions(ctx, m) + addVersions(ctx, m, listRetracted) + } + if listRetracted || listU { + addRetraction(ctx, m) } <-sem }() @@ -54,10 +57,10 @@ func ListModules(ctx context.Context, args []string, listU, listVersions bool) [ return mods } -func listModules(ctx context.Context, args []string, listVersions bool) []*modinfo.ModulePublic { +func listModules(ctx context.Context, args []string, listVersions, listRetracted bool) []*modinfo.ModulePublic { LoadBuildList(ctx) if len(args) == 0 { - return []*modinfo.ModulePublic{moduleInfo(ctx, buildList[0], true)} + return []*modinfo.ModulePublic{moduleInfo(ctx, buildList[0], true, listRetracted)} } var mods []*modinfo.ModulePublic @@ -84,9 +87,9 @@ func listModules(ctx context.Context, args []string, listVersions bool) []*modin } allowed := CheckAllowed - if IsRevisionQuery(vers) { + if IsRevisionQuery(vers) || listRetracted { // Allow excluded and retracted versions if the user asked for a - // specific revision. + // specific revision or used 'go list -retracted'. allowed = nil } info, err := Query(ctx, path, vers, current, allowed) @@ -98,7 +101,8 @@ func listModules(ctx context.Context, args []string, listVersions bool) []*modin }) continue } - mods = append(mods, moduleInfo(ctx, module.Version{Path: path, Version: info.Version}, false)) + mod := moduleInfo(ctx, module.Version{Path: path, Version: info.Version}, false, listRetracted) + mods = append(mods, mod) continue } @@ -123,7 +127,7 @@ func listModules(ctx context.Context, args []string, listVersions bool) []*modin matched = true if !matchedBuildList[i] { matchedBuildList[i] = true - mods = append(mods, moduleInfo(ctx, m, true)) + mods = append(mods, moduleInfo(ctx, m, true, listRetracted)) } } } @@ -135,7 +139,8 @@ func listModules(ctx context.Context, args []string, listVersions bool) []*modin // Instead, resolve the module, even if it isn't an existing dependency. info, err := Query(ctx, arg, "latest", "", nil) if err == nil { - mods = append(mods, moduleInfo(ctx, module.Version{Path: arg, Version: info.Version}, false)) + mod := moduleInfo(ctx, module.Version{Path: arg, Version: info.Version}, false, listRetracted) + mods = append(mods, mod) } else { mods = append(mods, &modinfo.ModulePublic{ Path: arg, diff --git a/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt new file mode 100644 index 0000000000..2023c7b096 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt @@ -0,0 +1,8 @@ +This version should be retracted, but the go.mod file for the version that would +contain the retraction is not available. +-- .mod -- +module example.com/retract/missingmod + +go 1.14 +-- .info -- +{"Version":"v1.0.0"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt new file mode 100644 index 0000000000..bba919ec21 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt @@ -0,0 +1,4 @@ +The go.mod file at this version will be loaded to check for retractions +of earlier versions. However, the .mod file is not available. +-- .info -- +{"Version":"v1.9.0"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt new file mode 100644 index 0000000000..4dc486b599 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt @@ -0,0 +1,14 @@ +Module example.com/retract/self/prev is a module that retracts its own +latest version. + +No unretracted versions are available. + +-- .mod -- +module example.com/retract/self/all + +go 1.15 + +retract v1.9.0 // bad + +-- .info -- +{"Version":"v1.9.0"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt new file mode 100644 index 0000000000..04c28455d7 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt @@ -0,0 +1,16 @@ +Module example.com/retract/self/prerelease is a module that retracts its own +latest version and all other release version. + +A pre-release version higher than the highest release version is still +available, and that should be matched by @latest. + +-- .mod -- +module example.com/retract/self/prerelease + +go 1.15 + +-- .info -- +{"Version":"v1.0.0"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt new file mode 100644 index 0000000000..7c1c047e69 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt @@ -0,0 +1,19 @@ +Module example.com/retract/self/prerelease is a module that retracts its own +latest version and all other release version. + +A pre-release version higher than the highest release version is still +available, and that should be matched by @latest. + +-- .mod -- +module example.com/retract/self/prerelease + +go 1.15 + +retract v1.0.0 // bad +retract v1.9.0 // self + +-- .info -- +{"Version":"v1.9.0"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt b/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt new file mode 100644 index 0000000000..abf44fdae1 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt @@ -0,0 +1,16 @@ +Module example.com/retract/self/prerelease is a module that retracts its own +latest version and all other release version. + +A pre-release version higher than the highest release version is still +available, and that should be matched by @latest. + +-- .mod -- +module example.com/retract/self/prerelease + +go 1.15 + +-- .info -- +{"Version":"v1.9.1-pre"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab b/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab new file mode 100644 index 0000000000..f9ab41e88f --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab @@ -0,0 +1,20 @@ +See example.com_retract_self_pseudo_v1.9.0.txt. + +This version is not retracted. It should be returned by the proxy's +@latest endpoint. It should match the @latest version query. + +TODO(golang.org/issue/24031): the proxy and proxy.golang.org both return +the highest release version from the @latest endpoint, even if that +version is retracted, so there is no way for the go command to +discover an unretracted pseudo-version. + +-- .mod -- +module example.com/retract/self/pseudo + +go 1.15 + +-- .info -- +{"Version":"v0.0.0-20200325131415-01234567890ab"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt b/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt new file mode 100644 index 0000000000..d47eda0597 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt @@ -0,0 +1,14 @@ +See example.com_retract_self_pseudo_v1.9.0.txt. + +This version is retracted. + +-- .mod -- +module example.com/retract/self/pseudo + +go 1.15 + +-- .info -- +{"Version":"v1.0.0-bad"} + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt new file mode 100644 index 0000000000..db09cc6a5f --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt @@ -0,0 +1,16 @@ +Module example.com/retract/self/pseudo is a module that retracts its own +latest version, as well as an earlier version. + +An unretracted pseudo-version is available. + +-- .mod -- +module example.com/retract/self/pseudo + +go 1.15 + +retract v1.0.0-bad // bad +retract v1.9.0 // self + +-- .info -- +{"Version":"v1.9.0"} + diff --git a/src/cmd/go/testdata/script/mod_list_pseudo.txt b/src/cmd/go/testdata/script/mod_list_pseudo.txt index 3a10b3a040..056c093128 100644 --- a/src/cmd/go/testdata/script/mod_list_pseudo.txt +++ b/src/cmd/go/testdata/script/mod_list_pseudo.txt @@ -10,30 +10,25 @@ go mod download github.com/dmitshur-test/modtest5@v0.5.0-alpha go mod download github.com/dmitshur-test/modtest5@v0.5.0-alpha.0.20190619023908-3da23a9deb9e cmp $GOPATH/pkg/mod/cache/download/github.com/dmitshur-test/modtest5/@v/list $WORK/modtest5.list +env GOSUMDB=off # don't verify go.mod files when loading retractions env GOPROXY=file:///$GOPATH/pkg/mod/cache/download env GOPATH=$WORK/gopath2 mkdir $GOPATH -go list -m -json github.com/dmitshur-test/modtest5@latest -cmp stdout $WORK/modtest5.json +go list -m -f '{{.Path}} {{.Version}} {{.Time.Format "2006-01-02"}}' github.com/dmitshur-test/modtest5@latest +stdout '^github.com/dmitshur-test/modtest5 v0.5.0-alpha 2019-06-18$' # If the module proxy contains only pseudo-versions, 'latest' should stat # the version with the most recent timestamp — not the highest semantic # version — and return its metadata. env GOPROXY=file:///$WORK/tinyproxy -go list -m -json dmitri.shuralyov.com/test/modtest3@latest -cmp stdout $WORK/modtest3.json +go list -m -f '{{.Path}} {{.Version}} {{.Time.Format "2006-01-02"}}' dmitri.shuralyov.com/test/modtest3@latest +stdout '^dmitri.shuralyov.com/test/modtest3 v0.0.0-20181023043359-a85b471d5412 2018-10-22$' -- $WORK/modtest5.list -- v0.0.0-20190619020302-197a620e0c9a v0.5.0-alpha v0.5.0-alpha.0.20190619023908-3da23a9deb9e --- $WORK/modtest5.json -- -{ - "Path": "github.com/dmitshur-test/modtest5", - "Version": "v0.5.0-alpha", - "Time": "2019-06-18T19:04:46-07:00" -} -- $WORK/tinyproxy/dmitri.shuralyov.com/test/modtest3/@v/list -- v0.1.0-0.20161023043300-000000000000 v0.0.0-20181023043359-a85b471d5412 @@ -42,9 +37,3 @@ v0.0.0-20181023043359-a85b471d5412 "Version": "v0.0.0-20181023043359-a85b471d5412", "Time": "2018-10-22T21:33:59-07:00" } --- $WORK/modtest3.json -- -{ - "Path": "dmitri.shuralyov.com/test/modtest3", - "Version": "v0.0.0-20181023043359-a85b471d5412", - "Time": "2018-10-22T21:33:59-07:00" -} diff --git a/src/cmd/go/testdata/script/mod_list_retract.txt b/src/cmd/go/testdata/script/mod_list_retract.txt new file mode 100644 index 0000000000..4e177b3f54 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_retract.txt @@ -0,0 +1,108 @@ +# 'go list -mod=vendor -retracted' reports an error. +go mod vendor +! go list -m -retracted -mod=vendor +stderr '^go list -retracted cannot be used when vendoring is enabled$' +rm vendor + +# 'go list -retracted' reports an error in GOPATH mode. +env GO111MODULE=off +! go list -retracted +stderr '^go list -retracted can only be used in module-aware mode$' +env GO111MODULE= + +# 'go list pkg' does not show retraction. +go list -f '{{with .Module}}{{with .Retracted}}retracted{{end}}{{end}}' example.com/retract +! stdout . + +# 'go list -retracted pkg' shows retraction. +go list -retracted -f '{{with .Module}}{{with .Retracted}}retracted{{end}}{{end}}' example.com/retract +stdout retracted + +# 'go list -m' does not show retraction. +go list -m -f '{{with .Retracted}}retracted{{end}}' example.com/retract +! stdout . + +# 'go list -m -retracted' shows retraction. +go list -m -retracted -f '{{with .Retracted}}retracted{{end}}' example.com/retract + +# 'go list -m mod@version' does not show retraction. +go list -m -f '{{with .Retracted}}retracted{{end}}' example.com/retract@v1.0.0-unused +! stdout . + +# 'go list -m -retracted mod@version' shows an error if the go.mod that should +# contain the retractions is not available. +! go list -m -retracted example.com/retract/missingmod@v1.0.0 +stderr '^go list -m: loading module retractions: example.com/retract/missingmod@v1.9.0:.*404 Not Found$' +go list -e -m -retracted -f '{{.Error.Err}}' example.com/retract/missingmod@v1.0.0 +stdout '^loading module retractions: example.com/retract/missingmod@v1.9.0:.*404 Not Found$' + +# 'go list -m -retracted mod@version' shows retractions. +go list -m -retracted example.com/retract@v1.0.0-unused +stdout '^example.com/retract v1.0.0-unused \(retracted\)$' +go list -m -retracted -f '{{with .Retracted}}retracted{{end}}' example.com/retract@v1.0.0-unused +stdout retracted + +# 'go list -m mod@latest' selects a previous release version, not self-retracted latest. +go list -m -f '{{.Version}}{{with .Retracted}} retracted{{end}}' example.com/retract/self/prev@latest +stdout '^v1.1.0$' + +# 'go list -m -retracted mod@latest' selects the self-retracted latest version. +go list -m -retracted -f '{{.Version}}{{with .Retracted}} retracted{{end}}' example.com/retract/self/prev@latest +stdout '^v1.9.0 retracted$' + +# 'go list -m mod@latest' selects a pre-release version if all release versions are retracted. +go list -m -f '{{.Version}}{{with .Retracted}} retracted{{end}}' example.com/retract/self/prerelease@latest +stdout '^v1.9.1-pre$' + +# 'go list -m -retracted mod@latest' selects the self-retracted latest version. +go list -m -retracted -f '{{.Version}}{{with .Retracted}} retracted{{end}}' example.com/retract/self/prerelease@latest +stdout '^v1.9.0 retracted$' + +# 'go list -m mod@latest' selects a pseudo-version if all versions are retracted. +# TODO(golang.org/issue/24031): the proxy does not expose the pseudo-version, +# even if all release versions are retracted. +go list -m -e -f '{{.Error.Err}}' example.com/retract/self/pseudo@latest +stdout '^module example.com/retract/self/pseudo: no matching versions for query "latest"$' + +# 'go list -m mod@latest' reports an error if all versions are retracted. +go list -m -e -f '{{.Error.Err}}' example.com/retract/self/all@latest +stdout '^module example.com/retract/self/all: no matching versions for query "latest"$' + +# 'go list -m mod@ Date: Wed, 15 Apr 2020 14:52:38 -0400 Subject: cmd/go: improve 'go get' handling of retracted versions 'go get' will now warn about retracted versions in the build list, after updating go.mod. The warning instructs users to run 'go get module@latest' to upgrade or downgrade away from the retracted version. 'go get' now allows users to explicitly request a specific retracted version. For #24031 Change-Id: I15fda918dc84258fb35b615dcd33b0f499481bd7 Reviewed-on: https://go-review.googlesource.com/c/go/+/228383 Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modget/get.go | 58 +++++++++++++++- .../example.com_retract_rationale_v1.0.0-block.txt | 6 ++ ...m_retract_rationale_v1.0.0-blockwithcomment.txt | 6 ++ .../example.com_retract_rationale_v1.0.0-empty.txt | 8 +++ .../example.com_retract_rationale_v1.0.0-long.txt | 8 +++ ...ple.com_retract_rationale_v1.0.0-multiline1.txt | 8 +++ ...ple.com_retract_rationale_v1.0.0-multiline2.txt | 8 +++ .../example.com_retract_rationale_v1.0.0-order.txt | 6 ++ ...le.com_retract_rationale_v1.0.0-unprintable.txt | 8 +++ .../example.com_retract_rationale_v1.0.1-order.txt | 6 ++ .../mod/example.com_retract_rationale_v1.9.0.txt | 48 +++++++++++++ src/cmd/go/testdata/script/mod_get_retract.txt | 49 ++++++++++++++ .../go/testdata/script/mod_retract_rationale.txt | 79 ++++++++++++++++++++++ 13 files changed, 296 insertions(+), 2 deletions(-) create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt create mode 100644 src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt create mode 100644 src/cmd/go/testdata/script/mod_get_retract.txt create mode 100644 src/cmd/go/testdata/script/mod_retract_rationale.txt diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 06d59d9e0d..4ca7f5b529 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -702,6 +702,15 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // Everything succeeded. Update go.mod. modload.AllowWriteGoMod() modload.WriteGoMod() + modload.DisallowWriteGoMod() + + // Report warnings if any retracted versions are in the build list. + // This must be done after writing go.mod to avoid spurious '// indirect' + // comments. These functions read and write global state. + // TODO(golang.org/issue/40775): ListModules resets modload.loader, which + // contains information about direct dependencies that WriteGoMod uses. + // Refactor to avoid these kinds of global side effects. + reportRetractions(ctx) // If -d was specified, we're done after the module work. // We've already downloaded modules by loading packages above. @@ -804,6 +813,14 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc base.Fatalf("go get: internal error: prevM may be set if and only if forceModulePath is set") } + // If vers is a query like "latest", we should ignore retracted and excluded + // versions. If vers refers to a specific version or commit like "v1.0.0" + // or "master", we should only ignore excluded versions. + allowed := modload.CheckAllowed + if modload.IsRevisionQuery(vers) { + allowed = modload.CheckExclusions + } + // If the query must be a module path, try only that module path. if forceModulePath { if path == modload.Target.Path { @@ -812,7 +829,7 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc } } - info, err := modload.Query(ctx, path, vers, prevM.Version, modload.CheckAllowed) + info, err := modload.Query(ctx, path, vers, prevM.Version, allowed) if err == nil { if info.Version != vers && info.Version != prevM.Version { logOncef("go: %s %s => %s", path, vers, info.Version) @@ -838,7 +855,7 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc // If it turns out to only exist as a module, we can detect the resulting // PackageNotInModuleError and avoid a second round-trip through (potentially) // all of the configured proxies. - results, err := modload.QueryPattern(ctx, path, vers, modload.CheckAllowed) + results, err := modload.QueryPattern(ctx, path, vers, allowed) if err != nil { // If the path doesn't contain a wildcard, check whether it was actually a // module path instead. If so, return that. @@ -1050,6 +1067,43 @@ func (r *lostUpgradeReqs) Required(mod module.Version) ([]module.Version, error) return r.Reqs.Required(mod) } +// reportRetractions prints warnings if any modules in the build list are +// retracted. +func reportRetractions(ctx context.Context) { + // Query for retractions of modules in the build list. + // Use modload.ListModules, since that provides information in the same format + // as 'go list -m'. Don't query for "all", since that's not allowed outside a + // module. + buildList := modload.BuildList() + args := make([]string, 0, len(buildList)) + for _, m := range buildList { + if m.Version == "" { + // main module or dummy target module + continue + } + args = append(args, m.Path+"@"+m.Version) + } + listU := false + listVersions := false + listRetractions := true + mods := modload.ListModules(ctx, args, listU, listVersions, listRetractions) + retractPath := "" + for _, mod := range mods { + if len(mod.Retracted) > 0 { + if retractPath == "" { + retractPath = mod.Path + } else { + retractPath = "" + } + rationale := modload.ShortRetractionRationale(mod.Retracted[0]) + logOncef("go: warning: %s@%s is retracted: %s", mod.Path, mod.Version, rationale) + } + } + if modload.HasModRoot() && retractPath != "" { + logOncef("go: run 'go get %s@latest' to switch to the latest unretracted version", retractPath) + } +} + var loggedLines sync.Map func logOncef(format string, args ...interface{}) { diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt new file mode 100644 index 0000000000..c4a53e1d80 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt @@ -0,0 +1,6 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-block"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt new file mode 100644 index 0000000000..92573b62e3 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt @@ -0,0 +1,6 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-blockwithcomment"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt new file mode 100644 index 0000000000..1f0894aa8b --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt @@ -0,0 +1,8 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-empty"} +-- empty.go -- +package empty diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt new file mode 100644 index 0000000000..1b5e753428 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt @@ -0,0 +1,8 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-long"} +-- empty.go -- +package empty diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt new file mode 100644 index 0000000000..b1ffe27225 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt @@ -0,0 +1,8 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-multiline1"} +-- empty.go -- +package empty diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt new file mode 100644 index 0000000000..72f80b3254 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt @@ -0,0 +1,8 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-multiline2"} +-- empty.go -- +package empty diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt new file mode 100644 index 0000000000..1b0450462b --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt @@ -0,0 +1,6 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-order"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt new file mode 100644 index 0000000000..949612431e --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt @@ -0,0 +1,8 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.0-unprintable"} +-- empty.go -- +package empty diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt new file mode 100644 index 0000000000..3be7d5b56e --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt @@ -0,0 +1,6 @@ +-- .mod -- +module example.com/retract/rationale + +go 1.14 +-- .info -- +{"Version":"v1.0.1-order"} diff --git a/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt new file mode 100644 index 0000000000..6975d4ebd4 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt @@ -0,0 +1,48 @@ +Module example.com/retract/description retracts all versions of itself. +The rationale comments have various problems. + +-- .mod -- +module example.com/retract/rationale + +go 1.14 + +retract ( + v1.0.0-empty + + // short description + // more + // + // detail + v1.0.0-multiline1 // suffix + // after not included +) + +// short description +// more +// +// detail +retract v1.0.0-multiline2 // suffix + +// loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong +retract v1.0.0-long + +// Ends with a BEL character. Beep! +retract v1.0.0-unprintable + +// block comment +retract ( + v1.0.0-block + + // inner comment + v1.0.0-blockwithcomment +) + +retract ( + [v1.0.0-order, v1.0.0-order] // degenerate range + v1.0.0-order // single version + + v1.0.1-order // single version + [v1.0.1-order, v1.0.1-order] // degenerate range +) +-- .info -- +{"Version":"v1.9.0"} diff --git a/src/cmd/go/testdata/script/mod_get_retract.txt b/src/cmd/go/testdata/script/mod_get_retract.txt new file mode 100644 index 0000000000..da6c25523f --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_retract.txt @@ -0,0 +1,49 @@ +# 'go get pkg' should not upgrade to a retracted version. +cp go.mod.orig go.mod +go mod edit -require example.com/retract/self/prev@v1.1.0 +go get -d example.com/retract/self/prev +go list -m example.com/retract/self/prev +stdout '^example.com/retract/self/prev v1.1.0$' + +# 'go get pkg' should not downgrade from a retracted version when no higher +# version is available. +cp go.mod.orig go.mod +go mod edit -require example.com/retract/self/prev@v1.9.0 +go get -d example.com/retract/self/prev +stderr '^go: warning: example.com/retract/self/prev@v1.9.0 is retracted: self$' +go list -m example.com/retract/self/prev +stdout '^example.com/retract/self/prev v1.9.0$' + +# 'go get pkg@latest' should downgrade from a retracted version. +cp go.mod.orig go.mod +go mod edit -require example.com/retract/self/prev@v1.9.0 +go get -d example.com/retract/self/prev@latest +go list -m example.com/retract/self/prev +stdout '^example.com/retract/self/prev v1.1.0$' + +# 'go get pkg@version' should update to a specific version, even if that +# version is retracted. +cp go.mod.orig go.mod +go get -d example.com/retract@v1.0.0-bad +stderr '^go: warning: example.com/retract@v1.0.0-bad is retracted: bad$' +go list -m example.com/retract +stdout '^example.com/retract v1.0.0-bad$' + +# 'go get -u' should not downgrade from a retracted version when no higher +# version is available. +cp go.mod.orig go.mod +go mod edit -require example.com/retract/self/prev@v1.9.0 +go get -d -u . +stderr '^go: warning: example.com/retract/self/prev@v1.9.0 is retracted: self$' +go list -m example.com/retract/self/prev +stdout '^example.com/retract/self/prev v1.9.0$' + +-- go.mod.orig -- +module example.com/use + +go 1.15 + +-- use.go -- +package use + +import _ "example.com/retract/self/prev" diff --git a/src/cmd/go/testdata/script/mod_retract_rationale.txt b/src/cmd/go/testdata/script/mod_retract_rationale.txt new file mode 100644 index 0000000000..584c3a3849 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_retract_rationale.txt @@ -0,0 +1,79 @@ +# When there is no rationale, 'go get' should print a hard-coded message. +go get -d example.com/retract/rationale@v1.0.0-empty +stderr '^go: warning: example.com/retract/rationale@v1.0.0-empty is retracted: retracted by module author$' + +# 'go list' should print the same hard-coded message. +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale +stdout '^\[retracted by module author\]$' + + +# When there is a multi-line message, 'go get' should print the first line. +go get -d example.com/retract/rationale@v1.0.0-multiline1 +stderr '^go: warning: example.com/retract/rationale@v1.0.0-multiline1 is retracted: short description$' +! stderr 'detail' + +# 'go list' should show the full message. +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale +cmp stdout multiline + +# 'go get' output should be the same whether the retraction appears at top-level +# or in a block. +go get -d example.com/retract/rationale@v1.0.0-multiline2 +stderr '^go: warning: example.com/retract/rationale@v1.0.0-multiline2 is retracted: short description$' +! stderr 'detail' + +# Same for 'go list'. +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale +cmp stdout multiline + + +# 'go get' should omit long messages. +go get -d example.com/retract/rationale@v1.0.0-long +stderr '^go: warning: example.com/retract/rationale@v1.0.0-long is retracted: \(rationale omitted: too long\)' + +# 'go list' should show the full message. +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale +stdout '^\[lo{500}ng\]$' + + +# 'go get' should omit messages with unprintable characters. +go get -d example.com/retract/rationale@v1.0.0-unprintable +stderr '^go: warning: example.com/retract/rationale@v1.0.0-unprintable is retracted: \(rationale omitted: contains non-printable characters\)' + +# 'go list' should show the full message. +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale +stdout '^\[Ends with a BEL character. Beep!\x07\]$' + + +# When there is a comment on a block, but not on individual retractions within +# the block, the rationale should come from the block comment. +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale@v1.0.0-block +stdout '^\[block comment\]$' +go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale@v1.0.0-blockwithcomment +stdout '^\[inner comment\]$' + + +# When a version is covered by multiple retractions, all retractions should +# be reported in the order they appear in the file. +go list -m -retracted -f '{{range .Retracted}}{{.}},{{end}}' example.com/retract/rationale@v1.0.0-order +stdout '^degenerate range,single version,$' +go list -m -retracted -f '{{range .Retracted}}{{.}},{{end}}' example.com/retract/rationale@v1.0.1-order +stdout '^single version,degenerate range,$' + +# 'go get' will only report the first retraction to avoid being too verbose. +go get -d example.com/retract/rationale@v1.0.0-order +stderr '^go: warning: example.com/retract/rationale@v1.0.0-order is retracted: degenerate range$' +go get -d example.com/retract/rationale@v1.0.1-order +stderr '^go: warning: example.com/retract/rationale@v1.0.1-order is retracted: single version$' + +-- go.mod -- +module m + +go 1.14 + +-- multiline -- +[short description +more + +detail +suffix] -- cgit v1.2.3-54-g00ecf From 008048c5f4613c6b864c2e69ce795df9fa227e63 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 15 Apr 2020 15:11:51 -0400 Subject: doc: add module retraction to release notes For #24031 Change-Id: I9bd0905e9aacee4bec3463b7d91f6f0929744752 Reviewed-on: https://go-review.googlesource.com/c/go/+/228384 Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- doc/go1.16.html | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/go1.16.html b/doc/go1.16.html index 09e974d07c..b11af7fd99 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -43,6 +43,15 @@ Do not send CLs removing the interior tags from such phrases.

Go command

+

+ retract directives may now be used in a go.mod file + to indicate that certain published versions of the module should not be used + by other modules. A module author may retract a version after a severe problem + is discovered or if the version was published unintentionally.
+ TODO: write and link to section in golang.org/ref/mod
+ TODO: write and link to tutorial or blog post +

+

TODO

-- cgit v1.2.3-54-g00ecf From 694fc8e76bec99b67bbd0302852f6a1c1dafe7ca Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 16 Dec 2019 17:18:06 -0500 Subject: cmd/go/internal/modload: reject some bad module paths This change rejects module paths that don't conform to the new checkModulePathLax function, when loading a go.mod file. The change uses the checkModulePathLax function instead of CheckPath because there are still many users who are using unpublished modules with unpublishable paths, and we don't want to break them all. Next, before this change, when go mod init is run in GOPATH, it would try to use the location of the directory within GOPATH to infer the module path. After this change, it will only use that inferred module path if it conforms to module.CheckPath. Change-Id: Idb36d1655cc76aae82671e87ba634609503ad1a2 Reviewed-on: https://go-review.googlesource.com/c/go/+/211597 Run-TryBot: Michael Matloob TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modload/init.go | 72 +++++++++++++++++++++++-- src/cmd/go/testdata/script/mod_invalid_path.txt | 30 ++++++++++- 2 files changed, 98 insertions(+), 4 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index af23647cd4..6f93b88eab 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -379,6 +379,10 @@ func InitMod(ctx context.Context) { legacyModInit() } + if err := checkModulePathLax(f.Module.Mod.Path); err != nil { + base.Fatalf("go: %v", err) + } + setDefaultBuildMod() modFileToBuildList() if cfg.BuildMod == "vendor" { @@ -387,6 +391,49 @@ func InitMod(ctx context.Context) { } } +// checkModulePathLax checks that the path meets some minimum requirements +// to avoid confusing users or the module cache. The requirements are weaker +// than those of module.CheckPath to allow room for weakening module path +// requirements in the future, but strong enough to help users avoid significant +// problems. +func checkModulePathLax(p string) error { + // TODO(matloob): Replace calls of this function in this CL with calls + // to module.CheckImportPath once it's been laxened, if it becomes laxened. + // See golang.org/issue/29101 for a discussion about whether to make CheckImportPath + // more lax or more strict. + + errorf := func(format string, args ...interface{}) error { + return fmt.Errorf("invalid module path %q: %s", p, fmt.Sprintf(format, args...)) + } + + // Disallow shell characters " ' * < > ? ` | to avoid triggering bugs + // with file systems and subcommands. Disallow file path separators : and \ + // because path separators other than / will confuse the module cache. + // See fileNameOK in golang.org/x/mod/module/module.go. + shellChars := "`" + `\"'*<>?|` + fsChars := `\:` + if i := strings.IndexAny(p, shellChars); i >= 0 { + return errorf("contains disallowed shell character %q", p[i]) + } + if i := strings.IndexAny(p, fsChars); i >= 0 { + return errorf("contains disallowed path separator character %q", p[i]) + } + + // Ensure path.IsAbs and build.IsLocalImport are false, and that the path is + // invariant under path.Clean, also to avoid confusing the module cache. + if path.IsAbs(p) { + return errorf("is an absolute path") + } + if build.IsLocalImport(p) { + return errorf("is a local import path") + } + if path.Clean(p) != p { + return errorf("is not clean") + } + + return nil +} + // fixVersion returns a modfile.VersionFixer implemented using the Query function. // // It resolves commit hashes and branch names to versions, @@ -678,16 +725,35 @@ func findModulePath(dir string) (string, error) { } // Look for path in GOPATH. + var badPathErr error for _, gpdir := range filepath.SplitList(cfg.BuildContext.GOPATH) { if gpdir == "" { continue } if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." { - return filepath.ToSlash(rel), nil + path := filepath.ToSlash(rel) + // TODO(matloob): replace this with module.CheckImportPath + // once it's been laxened. + // Only checkModulePathLax here. There are some unpublishable + // module names that are compatible with checkModulePathLax + // but they already work in GOPATH so don't break users + // trying to do a build with modules. gorelease will alert users + // publishing their modules to fix their paths. + if err := checkModulePathLax(path); err != nil { + badPathErr = err + break + } + return path, nil } } - msg := `cannot determine module path for source directory %s (outside GOPATH, module path must be specified) + reason := "outside GOPATH, module path must be specified" + if badPathErr != nil { + // return a different error message if the module was in GOPATH, but + // the module path determined above would be an invalid path. + reason = fmt.Sprintf("bad module path inferred from directory in GOPATH: %v", badPathErr) + } + msg := `cannot determine module path for source directory %s (%s) Example usage: 'go mod init example.com/m' to initialize a v0 or v1 module @@ -695,7 +761,7 @@ Example usage: Run 'go help mod init' for more information. ` - return "", fmt.Errorf(msg, dir) + return "", fmt.Errorf(msg, dir, reason) } var ( diff --git a/src/cmd/go/testdata/script/mod_invalid_path.txt b/src/cmd/go/testdata/script/mod_invalid_path.txt index 1ab418a075..05a5133571 100644 --- a/src/cmd/go/testdata/script/mod_invalid_path.txt +++ b/src/cmd/go/testdata/script/mod_invalid_path.txt @@ -1,12 +1,40 @@ -# Test that mod files with missing paths produce an error. +# Test that mod files with invalid or missing paths produce an error. # Test that go list fails on a go.mod with no module declaration. cd $WORK/gopath/src/mod ! go list . stderr '^go: no module declaration in go.mod.\n\tRun ''go mod edit -module=example.com/mod'' to specify the module path.$' +# Test that go mod init in GOPATH doesn't add a module declaration +# with a path that can't possibly be a module path, because +# it isn't even a valid import path. +# The single quote and backtick are the only characters we don't allow +# in checkModulePathLax, but is allowed in a Windows file name. +# TODO(matloob): choose a different character once +# module.CheckImportPath is laxened and replaces +# checkModulePathLax. +cd $WORK/'gopath/src/m''d' +! go mod init +stderr 'cannot determine module path' + +# Test that a go.mod file is rejected when its module declaration has a path that can't +# possibly be a module path, because it isn't even a valid import path +cd $WORK/gopath/src/badname +! go list . +stderr 'invalid module path' + -- mod/go.mod -- -- mod/foo.go -- package foo +-- m'd/foo.go -- +package mad + +-- badname/go.mod -- + +module .\. + +-- badname/foo.go -- +package badname + -- cgit v1.2.3-54-g00ecf From e9ad52e46dee4b4f9c73ff44f44e1e234815800f Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 24 Aug 2020 21:42:20 -0700 Subject: net: export ErrClosed This permits programs to reliably detect whether they are using a closed network connection. Fixes #4373 Change-Id: Ib4ce8cc82bbb134c4689f0ebc8b9b11bb8b32a22 Reviewed-on: https://go-review.googlesource.com/c/go/+/250357 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Tobias Klauser Reviewed-by: Russ Cox --- doc/go1.16.html | 12 ++++++++++++ src/net/error_test.go | 17 +++++++++++------ src/net/net.go | 12 ++++++++++++ 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index b11af7fd99..c82b3b9276 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -99,6 +99,18 @@ Do not send CLs removing the interior tags from such phrases. TODO

+

net

+ +

+ The case of I/O on a closed network connection, or I/O on a network + connection that is closed before any of the I/O completes, can now + be detected using the new ErrClosed error. + A typical use would be errors.Is(err, net.ErrClosed). + In earlier releases the only way to reliably detect this case was to + match the string returned by the Error method + with "use of closed network connection". +

+

unicode

diff --git a/src/net/error_test.go b/src/net/error_test.go index 8d4a7ffb3d..62dfb9c15d 100644 --- a/src/net/error_test.go +++ b/src/net/error_test.go @@ -8,6 +8,7 @@ package net import ( "context" + "errors" "fmt" "internal/poll" "io" @@ -101,7 +102,7 @@ second: goto third } switch nestedErr { - case errCanceled, poll.ErrNetClosing, errMissingAddress, errNoSuitableAddress, + case errCanceled, ErrClosed, errMissingAddress, errNoSuitableAddress, context.DeadlineExceeded, context.Canceled: return nil } @@ -436,7 +437,7 @@ second: goto third } switch nestedErr { - case poll.ErrNetClosing, errTimeout, poll.ErrNotPollable, os.ErrDeadlineExceeded: + case ErrClosed, errTimeout, poll.ErrNotPollable, os.ErrDeadlineExceeded: return nil } return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr) @@ -478,7 +479,7 @@ second: goto third } switch nestedErr { - case errCanceled, poll.ErrNetClosing, errMissingAddress, errTimeout, os.ErrDeadlineExceeded, ErrWriteToConnected, io.ErrUnexpectedEOF: + case errCanceled, ErrClosed, errMissingAddress, errTimeout, os.ErrDeadlineExceeded, ErrWriteToConnected, io.ErrUnexpectedEOF: return nil } return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr) @@ -508,6 +509,10 @@ func parseCloseError(nestedErr error, isShutdown bool) error { return fmt.Errorf("error string %q does not contain expected string %q", nestedErr, want) } + if !isShutdown && !errors.Is(nestedErr, ErrClosed) { + return fmt.Errorf("errors.Is(%v, errClosed) returns false, want true", nestedErr) + } + switch err := nestedErr.(type) { case *OpError: if err := err.isValid(); err != nil { @@ -531,7 +536,7 @@ second: goto third } switch nestedErr { - case poll.ErrNetClosing: + case ErrClosed: return nil } return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr) @@ -627,7 +632,7 @@ second: goto third } switch nestedErr { - case poll.ErrNetClosing, errTimeout, poll.ErrNotPollable, os.ErrDeadlineExceeded: + case ErrClosed, errTimeout, poll.ErrNotPollable, os.ErrDeadlineExceeded: return nil } return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr) @@ -706,7 +711,7 @@ second: goto third } switch nestedErr { - case poll.ErrNetClosing: + case ErrClosed: return nil } return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr) diff --git a/src/net/net.go b/src/net/net.go index 2e61a7c02e..4b4ed129cc 100644 --- a/src/net/net.go +++ b/src/net/net.go @@ -81,6 +81,7 @@ package net import ( "context" "errors" + "internal/poll" "io" "os" "sync" @@ -632,6 +633,17 @@ func (e *DNSError) Timeout() bool { return e.IsTimeout } // error and return a DNSError for which Temporary returns false. func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary } +// errClosed exists just so that the docs for ErrClosed don't mention +// the internal package poll. +var errClosed = poll.ErrNetClosing + +// ErrClosed is the error returned by an I/O call on a network +// connection that has already been closed, or that is closed by +// another goroutine before the I/O is completed. This may be wrapped +// in another error, and should normally be tested using +// errors.Is(err, net.ErrClosed). +var ErrClosed = errClosed + type writerOnly struct { io.Writer } -- cgit v1.2.3-54-g00ecf From 47b450997778163dfed6f58cae379d928fc37687 Mon Sep 17 00:00:00 2001 From: ShihCheng Tu Date: Mon, 15 Jun 2020 00:07:10 +0800 Subject: doc/go1.14: document json.Umarshal map key support of TextUnmarshaler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document that json.Unmarshal supports map keys whose underlying types implement encoding.TextUnmarshaler. Fixes #38801 Change-Id: Icb9414e9067517531ba0da910bd4a2bb3daace65 Reviewed-on: https://go-review.googlesource.com/c/go/+/237857 Reviewed-by: Daniel Martí Run-TryBot: Daniel Martí TryBot-Result: Gobot Gobot --- doc/go1.14.html | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/go1.14.html b/doc/go1.14.html index 35a9f3c2f3..410e0cbf7c 100644 --- a/doc/go1.14.html +++ b/doc/go1.14.html @@ -609,6 +609,12 @@ Do not send CLs removing the interior tags from such phrases. If a program needs to accept invalid numbers like the empty string, consider wrapping the type with Unmarshaler.

+ +

+ Unmarshal + can now support map keys with string underlying type which implement + encoding.TextUnmarshaler. +

-- cgit v1.2.3-54-g00ecf From 2aba467933b3252c758b65146d2ea5a5cff196da Mon Sep 17 00:00:00 2001 From: "Paul E. Murphy" Date: Thu, 20 Aug 2020 15:06:06 -0500 Subject: cmd/compile: remove unused carry related ssa ops in ppc64 The intermediate SSA opcodes* are no longer generated during the lowering pass. The shifting rules have been improved using ISEL. Therefore, we can remove them and the rules which expand them. * The removed opcodes are: LoweredAdd64Carry ADDconstForCarry MaskIfNotCarry FlagCarryClear FlagCarrySet Change-Id: I1ebe2726ed988f29ed4800c8f57b428f7a214cd0 Reviewed-on: https://go-review.googlesource.com/c/go/+/249462 Run-TryBot: Lynn Boger TryBot-Result: Gobot Gobot Reviewed-by: Lynn Boger --- src/cmd/compile/internal/ppc64/ssa.go | 19 +------ src/cmd/compile/internal/ssa/gen/PPC64.rules | 9 +-- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 12 ++-- src/cmd/compile/internal/ssa/opGen.go | 36 ------------ src/cmd/compile/internal/ssa/rewritePPC64.go | 82 ---------------------------- 5 files changed, 6 insertions(+), 152 deletions(-) diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 4d2ad48135..9c4c01e935 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -629,23 +629,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpPPC64MaskIfNotCarry: - r := v.Reg() - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = ppc64.REGZERO - p.To.Type = obj.TYPE_REG - p.To.Reg = r - - case ssa.OpPPC64ADDconstForCarry: - r1 := v.Args[0].Reg() - p := s.Prog(v.Op.Asm()) - p.Reg = r1 - p.From.Type = obj.TYPE_CONST - p.From.Offset = v.AuxInt - p.To.Type = obj.TYPE_REG - p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect. - case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW, ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS, @@ -1802,7 +1785,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString()) case ssa.OpPPC64InvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) - case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear: + case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) case ssa.OpClobber: // TODO: implement for clobberdead experiment. Nop is ok for now. diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 14942d50f9..509cfe1c4f 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -276,18 +276,11 @@ (Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8])))) (Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8])))) -// Cleaning up shift ops when input is masked -(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && int64(c) + d < 0 => (MOVDconst [-1]) +// Cleaning up shift ops (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y) (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y) (ORN x (MOVDconst [-1])) => x -(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && (c < 0 || int64(c) + d >= 0) => (FlagCarryClear) -(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && c >= 0 && int64(c) + d < 0 => (FlagCarrySet) - -(MaskIfNotCarry (FlagCarrySet)) => (MOVDconst [0]) -(MaskIfNotCarry (FlagCarryClear)) => (MOVDconst [-1]) - (S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x) (S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 825d0faf34..f91222446c 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -206,9 +206,7 @@ func init() { {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 - {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry) - {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint - {name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1) + {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry) {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width @@ -674,11 +672,9 @@ func init() { // These ops are for temporary use by rewrite rules. They // cannot appear in the generated assembly. - {name: "FlagEQ"}, // equal - {name: "FlagLT"}, // signed < or unsigned < - {name: "FlagGT"}, // signed > or unsigned > - {name: "FlagCarrySet"}, // carry flag set - {name: "FlagCarryClear"}, // carry flag clear + {name: "FlagEQ"}, // equal + {name: "FlagLT"}, // signed < or unsigned < + {name: "FlagGT"}, // signed > or unsigned > } blocks := []blockData{ diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 4cd72799e8..e181174d11 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1853,8 +1853,6 @@ const ( OpPPC64ROTL OpPPC64ROTLW OpPPC64LoweredAdd64Carry - OpPPC64ADDconstForCarry - OpPPC64MaskIfNotCarry OpPPC64SRADconst OpPPC64SRAWconst OpPPC64SRDconst @@ -2027,8 +2025,6 @@ const ( OpPPC64FlagEQ OpPPC64FlagLT OpPPC64FlagGT - OpPPC64FlagCarrySet - OpPPC64FlagCarryClear OpRISCV64ADD OpRISCV64ADDI @@ -24683,28 +24679,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADDconstForCarry", - auxType: auxInt16, - argLen: 1, - asm: ppc64.AADDC, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 2147483648, // R31 - }, - }, - { - name: "MaskIfNotCarry", - argLen: 1, - asm: ppc64.AADDME, - reg: regInfo{ - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, - }, { name: "SRADconst", auxType: auxInt64, @@ -26964,16 +26938,6 @@ var opcodeTable = [...]opInfo{ argLen: 0, reg: regInfo{}, }, - { - name: "FlagCarrySet", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagCarryClear", - argLen: 0, - reg: regInfo{}, - }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 7704b80dc6..1a0b03e81c 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -428,8 +428,6 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ADD(v) case OpPPC64ADDconst: return rewriteValuePPC64_OpPPC64ADDconst(v) - case OpPPC64ADDconstForCarry: - return rewriteValuePPC64_OpPPC64ADDconstForCarry(v) case OpPPC64AND: return rewriteValuePPC64_OpPPC64AND(v) case OpPPC64ANDN: @@ -570,8 +568,6 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MOVWstorezero(v) case OpPPC64MTVSRD: return rewriteValuePPC64_OpPPC64MTVSRD(v) - case OpPPC64MaskIfNotCarry: - return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v) case OpPPC64NOR: return rewriteValuePPC64_OpPPC64NOR(v) case OpPPC64NotEqual: @@ -4075,40 +4071,6 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { } return false } -func rewriteValuePPC64_OpPPC64ADDconstForCarry(v *Value) bool { - v_0 := v.Args[0] - // match: (ADDconstForCarry [c] (MOVDconst [d])) - // cond: c < 0 && (c < 0 || int64(c) + d >= 0) - // result: (FlagCarryClear) - for { - c := auxIntToInt16(v.AuxInt) - if v_0.Op != OpPPC64MOVDconst { - break - } - d := auxIntToInt64(v_0.AuxInt) - if !(c < 0 && (c < 0 || int64(c)+d >= 0)) { - break - } - v.reset(OpPPC64FlagCarryClear) - return true - } - // match: (ADDconstForCarry [c] (MOVDconst [d])) - // cond: c < 0 && c >= 0 && int64(c) + d < 0 - // result: (FlagCarrySet) - for { - c := auxIntToInt16(v.AuxInt) - if v_0.Op != OpPPC64MOVDconst { - break - } - d := auxIntToInt64(v_0.AuxInt) - if !(c < 0 && c >= 0 && int64(c)+d < 0) { - break - } - v.reset(OpPPC64FlagCarrySet) - return true - } - return false -} func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -10374,50 +10336,6 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { } return false } -func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value) bool { - v_0 := v.Args[0] - // match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) - // cond: c < 0 && d > 0 && int64(c) + d < 0 - // result: (MOVDconst [-1]) - for { - if v_0.Op != OpPPC64ADDconstForCarry { - break - } - c := auxIntToInt16(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { - break - } - d := auxIntToInt64(v_0_0.AuxInt) - if !(c < 0 && d > 0 && int64(c)+d < 0) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(-1) - return true - } - // match: (MaskIfNotCarry (FlagCarrySet)) - // result: (MOVDconst [0]) - for { - if v_0.Op != OpPPC64FlagCarrySet { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } - // match: (MaskIfNotCarry (FlagCarryClear)) - // result: (MOVDconst [-1]) - for { - if v_0.Op != OpPPC64FlagCarryClear { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(-1) - return true - } - return false -} func rewriteValuePPC64_OpPPC64NOR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] -- cgit v1.2.3-54-g00ecf From 3e636ab9ad31040aff2d484237808907a776cec6 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 14 Aug 2020 13:29:07 -0700 Subject: net/mail: return error on empty address list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This restores the handling accidentally changed in CL 217377. Fixes #40803 For #36959 Change-Id: If77fbc0c2a1dde4799f760affdfb8dde9bcaf458 Reviewed-on: https://go-review.googlesource.com/c/go/+/248598 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Daniel Martí Reviewed-by: Jeremy Fishman --- src/net/mail/message.go | 13 +++++++++---- src/net/mail/message_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/src/net/mail/message.go b/src/net/mail/message.go index 6833cfaec1..09fb794005 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -279,9 +279,6 @@ func (p *addrParser) parseAddressList() ([]*Address, error) { if p.consume(',') { continue } - if p.empty() { - break - } addrs, err := p.parseAddress(true) if err != nil { @@ -295,9 +292,17 @@ func (p *addrParser) parseAddressList() ([]*Address, error) { if p.empty() { break } - if !p.consume(',') { + if p.peek() != ',' { return nil, errors.New("mail: expected comma") } + + // Skip empty entries for obs-addr-list. + for p.consume(',') { + p.skipSpace() + } + if p.empty() { + break + } } return list, nil } diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go index 75db767547..67e3643aeb 100644 --- a/src/net/mail/message_test.go +++ b/src/net/mail/message_test.go @@ -445,6 +445,19 @@ func TestAddressParsing(t *testing.T) { }, }, }, + { + ` , joe@where.test,,John ,,`, + []*Address{ + { + Name: "", + Address: "joe@where.test", + }, + { + Name: "John", + Address: "jdoe@one.test", + }, + }, + }, { `Group1: ;, Group 2: addr2@example.com;, John `, []*Address{ @@ -1067,3 +1080,22 @@ func TestAddressFormattingAndParsing(t *testing.T) { } } } + +func TestEmptyAddress(t *testing.T) { + parsed, err := ParseAddress("") + if parsed != nil || err == nil { + t.Errorf(`ParseAddress("") = %v, %v, want nil, error`, parsed, err) + } + list, err := ParseAddressList("") + if len(list) > 0 || err == nil { + t.Errorf(`ParseAddressList("") = %v, %v, want nil, error`, list, err) + } + list, err = ParseAddressList(",") + if len(list) > 0 || err == nil { + t.Errorf(`ParseAddressList("") = %v, %v, want nil, error`, list, err) + } + list, err = ParseAddressList("a@b c@d") + if len(list) > 0 || err == nil { + t.Errorf(`ParseAddressList("") = %v, %v, want nil, error`, list, err) + } +} -- cgit v1.2.3-54-g00ecf From 346efc28508dc358cba7e246adeb40bde99cfb2a Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 12 Jul 2020 16:57:01 -0400 Subject: go/types: better error when converting untyped values in assignments The error returned by convertUntyped is 'cannot convert _ to _', which can be misleading in contexts where an explicit conversion would be allowed. Arguably the error message from convertUntyped should just be 'cannot use _ as _', as 'convert' has an explicit meaning within the spec. Making that change caused a large number of test failures, so for now we just fix this for assignments by interpreting the error. For #22070 Change-Id: I4eed6f39d1a991e8df7e035ec301d28a05150eb5 Reviewed-on: https://go-review.googlesource.com/c/go/+/242083 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/assignments.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go index 34a9d7843d..9697e504cd 100644 --- a/src/go/types/assignments.go +++ b/src/go/types/assignments.go @@ -7,6 +7,7 @@ package types import ( + "errors" "go/ast" "go/token" ) @@ -43,8 +44,16 @@ func (check *Checker) assignment(x *operand, T Type, context string) { } target = Default(x.typ) } - check.convertUntyped(x, target) - if x.mode == invalid { + if err := check.canConvertUntyped(x, target); err != nil { + var internalErr Error + var msg string + if errors.As(err, &internalErr) { + msg = internalErr.Msg + } else { + msg = err.Error() + } + check.errorf(x.pos(), "cannot use %s as %s value in %s: %v", x, target, context, msg) + x.mode = invalid return } } -- cgit v1.2.3-54-g00ecf From 73a5c372410adb272b6a63484c9a9de5e93e986c Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 24 Aug 2020 10:43:43 -0400 Subject: go/types: add untyped test cases for AssignableTo API The AssignableTo API is specifically for non-constant values, but is currently called by gopls for constant completions. Add a test to ensure that we handle this edge case correctly. Change-Id: I83115cbca2443a783df1c3090b5741260dffb78e Reviewed-on: https://go-review.googlesource.com/c/go/+/250258 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/api_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index 798c09bbff..6c129cd01b 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -1243,6 +1243,11 @@ func TestConvertibleTo(t *testing.T) { {newDefined(new(Struct)), new(Struct), true}, {newDefined(Typ[Int]), new(Struct), false}, {Typ[UntypedInt], Typ[Int], true}, + // TODO (rFindley): the below behavior is undefined as non-constant untyped + // string values are not permitted by the spec. But we should consider + // changing this case to return 'true', to have more reasonable behavior in + // cases where the API is used for constant expressions. + {Typ[UntypedString], Typ[String], false}, } { if got := ConvertibleTo(test.v, test.t); got != test.want { t.Errorf("ConvertibleTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) @@ -1260,6 +1265,14 @@ func TestAssignableTo(t *testing.T) { {newDefined(Typ[Int]), Typ[Int], false}, {newDefined(new(Struct)), new(Struct), true}, {Typ[UntypedBool], Typ[Bool], true}, + {Typ[UntypedString], Typ[Bool], false}, + // TODO (rFindley): the below behavior is undefined as AssignableTo is + // intended for non-constant values (and neither UntypedString or + // UntypedInt assignments arise during normal type checking). But as + // described in TestConvertibleTo above, we should consider changing this + // behavior. + {Typ[UntypedString], Typ[String], false}, + {Typ[UntypedInt], Typ[Int], false}, } { if got := AssignableTo(test.v, test.t); got != test.want { t.Errorf("AssignableTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) -- cgit v1.2.3-54-g00ecf From 17ae5873833fd9a20aae123faf96f3eca9a149ea Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Thu, 14 May 2020 07:58:27 -0700 Subject: cmd/compile: use addressing modes pass on s390x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add s390x support to the addressing modes pass. This significantly reduces the number of rules we need to have to handle indexed addressing modes on s390x. There are some changes introduced with the new approach. Notably pointer calculations of the form '(ADD x (ADDconst y [c]))' won't get fully merged into address fields right now, the constant offset will remain separate. That is a relatively minor issue though. file before after Δ % addr2line 4120904 4120960 +56 +0.001% api 4944005 4948765 +4760 +0.096% asm 4977431 4984335 +6904 +0.139% buildid 2683760 2683504 -256 -0.010% cgo 4557976 4558408 +432 +0.009% compile 19103577 18916634 -186943 -0.979% cover 4883694 4885054 +1360 +0.028% dist 3545177 3553689 +8512 +0.240% doc 3921766 3921518 -248 -0.006% fix 3295254 3302182 +6928 +0.210% link 6539222 6540286 +1064 +0.016% nm 4105085 4107757 +2672 +0.065% objdump 4546015 4545439 -576 -0.013% pack 2416661 2415485 -1176 -0.049% pprof 13267433 13265489 -1944 -0.015% test2json 2762180 2761996 -184 -0.007% trace 10145090 10135626 -9464 -0.093% vet 6772946 6771738 -1208 -0.018% total 106588176 106418865 -169311 -0.159% Fixes #37891. Change-Id: If60d51f31eb2806b011432a6519951b8668cb42f Reviewed-on: https://go-review.googlesource.com/c/go/+/250958 Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/addressingmodes.go | 65 +- src/cmd/compile/internal/ssa/gen/S390X.rules | 412 +-- src/cmd/compile/internal/ssa/rewriteS390X.go | 3795 +---------------------- 3 files changed, 145 insertions(+), 4127 deletions(-) diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go index 97a5ab4f03..aae0def27f 100644 --- a/src/cmd/compile/internal/ssa/addressingmodes.go +++ b/src/cmd/compile/internal/ssa/addressingmodes.go @@ -7,12 +7,14 @@ package ssa // addressingModes combines address calculations into memory operations // that can perform complicated addressing modes. func addressingModes(f *Func) { + isInImmediateRange := is32Bit switch f.Config.arch { default: // Most architectures can't do this. return case "amd64", "386": - // TODO: s390x? + case "s390x": + isInImmediateRange = is20Bit } var tmp []*Value @@ -40,7 +42,7 @@ func addressingModes(f *Func) { switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} { case [2]auxType{auxSymOff, auxInt32}: // TODO: introduce auxSymOff32 - if !is32Bit(v.AuxInt + p.AuxInt) { + if !isInImmediateRange(v.AuxInt + p.AuxInt) { continue } v.AuxInt += p.AuxInt @@ -48,7 +50,7 @@ func addressingModes(f *Func) { if v.Aux != nil && p.Aux != nil { continue } - if !is32Bit(v.AuxInt + p.AuxInt) { + if !isInImmediateRange(v.AuxInt + p.AuxInt) { continue } if p.Aux != nil { @@ -398,4 +400,61 @@ var combine = map[[2]Op]Op{ [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4, [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4, [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4, + + // s390x + [2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx, + [2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx, + [2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx, + [2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx, + + [2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx, + [2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx, + [2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx, + + [2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx, + [2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx, + [2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx, + + [2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx, + [2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx, + + [2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx, + [2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx, + [2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx, + [2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx, + + [2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx, + [2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx, + [2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx, + + [2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx, + [2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx, + + [2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx, + [2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx, + [2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx, + [2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx, + + [2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx, + [2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx, + [2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx, + + [2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx, + [2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx, + [2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx, + + [2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx, + [2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx, + + [2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx, + [2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx, + [2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx, + [2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx, + + [2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx, + [2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx, + [2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx, + + [2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx, + [2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx, } diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index f7d391cf3a..e564f638d3 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -498,27 +498,19 @@ // Remove zero extensions after zero extending load. // Note: take care that if x is spilled it is restored correctly. (MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x -(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x (MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x -(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x (MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x -(MOVWZreg x:(MOVWZloadidx _ _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x // Remove sign extensions after sign extending load. // Note: take care that if x is spilled it is restored correctly. (MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x -(MOV(B|H|W)reg x:(MOVBloadidx _ _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x (MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x -(MOV(H|W)reg x:(MOVHloadidx _ _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x (MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x -(MOVWreg x:(MOVWloadidx _ _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x // Remove sign extensions after zero extending load. // These type checks are probably unnecessary but do them anyway just in case. (MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x -(MOV(H|W)reg x:(MOVBZloadidx _ _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x (MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x -(MOVWreg x:(MOVHZloadidx _ _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x // Fold sign and zero extensions into loads. // @@ -538,14 +530,6 @@ && x.Uses == 1 && clobber(x) => @x.Block (MOV(B|H|W)load [o] {s} p mem) -(MOV(B|H|W)Zreg x:(MOV(B|H|W)loadidx [o] {s} p i mem)) - && x.Uses == 1 - && clobber(x) - => @x.Block (MOV(B|H|W)Zloadidx [o] {s} p i mem) -(MOV(B|H|W)reg x:(MOV(B|H|W)Zloadidx [o] {s} p i mem)) - && x.Uses == 1 - && clobber(x) - => @x.Block (MOV(B|H|W)loadidx [o] {s} p i mem) // Remove zero extensions after argument load. (MOVBZreg x:(Arg )) && !t.IsSigned() && t.Size() == 1 => x @@ -753,12 +737,12 @@ // Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them). (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x) (ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x) -(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB && idx.Op != OpSB => (MOVDaddridx [c] {s} ptr idx) +(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx) // fold ADDconst into MOVDaddrx (ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) -(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) && x.Op != OpSB => (MOVDaddridx [c+d] {s} x y) -(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) && y.Op != OpSB => (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) // reverse ordering of compare instruction (LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp) @@ -943,91 +927,6 @@ (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) -// generating indexed loads and stores -(MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVBZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVBloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVHZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVHloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVWZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVWloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (FMOVSloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) -(FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (FMOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - -(MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVBstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) -(MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVHstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) -(MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVWstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) -(MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (MOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) -(FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (FMOVSstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) -(FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - (FMOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) - -(MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVBZloadidx [off] {sym} ptr idx mem) -(MOVBload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVBloadidx [off] {sym} ptr idx mem) -(MOVHZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVHZloadidx [off] {sym} ptr idx mem) -(MOVHload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVHloadidx [off] {sym} ptr idx mem) -(MOVWZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVWZloadidx [off] {sym} ptr idx mem) -(MOVWload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVWloadidx [off] {sym} ptr idx mem) -(MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (MOVDloadidx [off] {sym} ptr idx mem) -(FMOVSload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (FMOVSloadidx [off] {sym} ptr idx mem) -(FMOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB => (FMOVDloadidx [off] {sym} ptr idx mem) - -(MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (MOVBstoreidx [off] {sym} ptr idx val mem) -(MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (MOVHstoreidx [off] {sym} ptr idx val mem) -(MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (MOVWstoreidx [off] {sym} ptr idx val mem) -(MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (MOVDstoreidx [off] {sym} ptr idx val mem) -(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (FMOVSstoreidx [off] {sym} ptr idx val mem) -(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB => (FMOVDstoreidx [off] {sym} ptr idx val mem) - -// combine ADD into indexed loads and stores -(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVBZloadidx [c+d] {sym} ptr idx mem) -(MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVBloadidx [c+d] {sym} ptr idx mem) -(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVHZloadidx [c+d] {sym} ptr idx mem) -(MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVHloadidx [c+d] {sym} ptr idx mem) -(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVWZloadidx [c+d] {sym} ptr idx mem) -(MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVWloadidx [c+d] {sym} ptr idx mem) -(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (MOVDloadidx [c+d] {sym} ptr idx mem) -(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (FMOVSloadidx [c+d] {sym} ptr idx mem) -(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(int64(c)+int64(d)) => (FMOVDloadidx [c+d] {sym} ptr idx mem) - -(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVBstoreidx [c+d] {sym} ptr idx val mem) -(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVHstoreidx [c+d] {sym} ptr idx val mem) -(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVWstoreidx [c+d] {sym} ptr idx val mem) -(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (MOVDstoreidx [c+d] {sym} ptr idx val mem) -(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (FMOVSstoreidx [c+d] {sym} ptr idx val mem) -(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(int64(c)+int64(d)) => (FMOVDstoreidx [c+d] {sym} ptr idx val mem) - -(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVBZloadidx [c+d] {sym} ptr idx mem) -(MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVBloadidx [c+d] {sym} ptr idx mem) -(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVHZloadidx [c+d] {sym} ptr idx mem) -(MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVHloadidx [c+d] {sym} ptr idx mem) -(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVWZloadidx [c+d] {sym} ptr idx mem) -(MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVWloadidx [c+d] {sym} ptr idx mem) -(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (MOVDloadidx [c+d] {sym} ptr idx mem) -(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (FMOVSloadidx [c+d] {sym} ptr idx mem) -(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(int64(c)+int64(d)) => (FMOVDloadidx [c+d] {sym} ptr idx mem) - -(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVBstoreidx [c+d] {sym} ptr idx val mem) -(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVHstoreidx [c+d] {sym} ptr idx val mem) -(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVWstoreidx [c+d] {sym} ptr idx val mem) -(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (MOVDstoreidx [c+d] {sym} ptr idx val mem) -(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (FMOVSstoreidx [c+d] {sym} ptr idx val mem) -(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(int64(c)+int64(d)) => (FMOVDstoreidx [c+d] {sym} ptr idx val mem) - // MOVDaddr into MOVDaddridx (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y) @@ -1372,47 +1271,6 @@ && clobber(x) => (MOVDstore [i-4] {s} p w0 mem) -(MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHstoreidx [i-1] {s} p idx w mem) -(MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHstoreidx [i-1] {s} p idx w0 mem) -(MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHstoreidx [i-1] {s} p idx w mem) -(MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHstoreidx [i-1] {s} p idx w0 mem) -(MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWstoreidx [i-2] {s} p idx w mem) -(MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWstoreidx [i-2] {s} p idx w0 mem) -(MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWstoreidx [i-2] {s} p idx w mem) -(MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWstoreidx [i-2] {s} p idx w0 mem) -(MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVDstoreidx [i-4] {s} p idx w mem) -(MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVDstoreidx [i-4] {s} p idx w0 mem) - // Combine stores into larger (unaligned) stores with the bytes reversed (little endian). // Store-with-bytes-reversed instructions do not support relative memory addresses, // so these stores can't operate on global data (SB). @@ -1461,47 +1319,6 @@ && clobber(x) => (MOVDBRstore [i-4] {s} p w0 mem) -(MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHBRstoreidx [i-1] {s} p idx w mem) -(MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHBRstoreidx [i-1] {s} p idx w0 mem) -(MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHBRstoreidx [i-1] {s} p idx w mem) -(MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVHBRstoreidx [i-1] {s} p idx w0 mem) -(MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWBRstoreidx [i-2] {s} p idx w mem) -(MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWBRstoreidx [i-2] {s} p idx w0 mem) -(MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWBRstoreidx [i-2] {s} p idx w mem) -(MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVWBRstoreidx [i-2] {s} p idx w0 mem) -(MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) - && x.Uses == 1 - && clobber(x) - => (MOVDBRstoreidx [i-4] {s} p idx w mem) -(MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) - && x.Uses == 1 - && clobber(x) - => (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - // Combining byte loads into larger (unaligned) loads. // Big-endian loads @@ -1612,114 +1429,6 @@ && clobber(x0, x1, s0, s1, or) => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) -// Big-endian indexed loads - -(ORW x1:(MOVBZloadidx [i1] {s} p idx mem) - sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - && i1 == i0+1 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - -(OR x1:(MOVBZloadidx [i1] {s} p idx mem) - sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - && i1 == i0+1 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - -(ORW x1:(MOVHZloadidx [i1] {s} p idx mem) - sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - && i1 == i0+2 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - -(OR x1:(MOVHZloadidx [i1] {s} p idx mem) - sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - && i1 == i0+2 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - -(OR x1:(MOVWZloadidx [i1] {s} p idx mem) - sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) - && i1 == i0+4 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - -(ORW - s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - or:(ORW - s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1,y) != nil - && clobber(x0, x1, s0, s1, or) - => @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - -(OR - s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - or:(OR - s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1,y) != nil - && clobber(x0, x1, s0, s1, or) - => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - -(OR - s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) - or:(OR - s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) - y)) - && i1 == i0+2 - && j1 == j0-16 - && j1 % 32 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1,y) != nil - && clobber(x0, x1, s0, s1, or) - => @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - // Little-endian loads (ORW x0:(MOVBZload [i0] {s} p mem) @@ -1835,121 +1544,6 @@ && clobber(x0, x1, r0, r1, s0, s1, or) => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) -// Little-endian indexed loads - -(ORW x0:(MOVBZloadidx [i0] {s} p idx mem) - sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - && p.Op != OpSB - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - -(OR x0:(MOVBZloadidx [i0] {s} p idx mem) - sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - && p.Op != OpSB - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, sh) - => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - -(ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) - sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, r0, r1, sh) - => @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - -(OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) - sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, r0, r1, sh) - => @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - -(OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) - sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0, x1, r0, r1, sh) - => @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - -(ORW - s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - or:(ORW - s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - y)) - && p.Op != OpSB - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1,y) != nil - && clobber(x0, x1, s0, s1, or) - => @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - -(OR - s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - or:(OR - s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - y)) - && p.Op != OpSB - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1,y) != nil - && clobber(x0, x1, s0, s1, or) - => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - -(OR - s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) - or:(OR - s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - y)) - && i1 == i0+2 - && j1 == j0+16 - && j0 % 32 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1,y) != nil - && clobber(x0, x1, r0, r1, s0, s1, or) - => @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - // Combine stores into store multiples. // 32-bit (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 2dba8163bc..78a57c2388 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -587,20 +587,12 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XFCMPS(v) case OpS390XFMOVDload: return rewriteValueS390X_OpS390XFMOVDload(v) - case OpS390XFMOVDloadidx: - return rewriteValueS390X_OpS390XFMOVDloadidx(v) case OpS390XFMOVDstore: return rewriteValueS390X_OpS390XFMOVDstore(v) - case OpS390XFMOVDstoreidx: - return rewriteValueS390X_OpS390XFMOVDstoreidx(v) case OpS390XFMOVSload: return rewriteValueS390X_OpS390XFMOVSload(v) - case OpS390XFMOVSloadidx: - return rewriteValueS390X_OpS390XFMOVSloadidx(v) case OpS390XFMOVSstore: return rewriteValueS390X_OpS390XFMOVSstore(v) - case OpS390XFMOVSstoreidx: - return rewriteValueS390X_OpS390XFMOVSstoreidx(v) case OpS390XFNEG: return rewriteValueS390X_OpS390XFNEG(v) case OpS390XFNEGS: @@ -623,78 +615,52 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XLoweredRound64F(v) case OpS390XMOVBZload: return rewriteValueS390X_OpS390XMOVBZload(v) - case OpS390XMOVBZloadidx: - return rewriteValueS390X_OpS390XMOVBZloadidx(v) case OpS390XMOVBZreg: return rewriteValueS390X_OpS390XMOVBZreg(v) case OpS390XMOVBload: return rewriteValueS390X_OpS390XMOVBload(v) - case OpS390XMOVBloadidx: - return rewriteValueS390X_OpS390XMOVBloadidx(v) case OpS390XMOVBreg: return rewriteValueS390X_OpS390XMOVBreg(v) case OpS390XMOVBstore: return rewriteValueS390X_OpS390XMOVBstore(v) case OpS390XMOVBstoreconst: return rewriteValueS390X_OpS390XMOVBstoreconst(v) - case OpS390XMOVBstoreidx: - return rewriteValueS390X_OpS390XMOVBstoreidx(v) case OpS390XMOVDaddridx: return rewriteValueS390X_OpS390XMOVDaddridx(v) case OpS390XMOVDload: return rewriteValueS390X_OpS390XMOVDload(v) - case OpS390XMOVDloadidx: - return rewriteValueS390X_OpS390XMOVDloadidx(v) case OpS390XMOVDstore: return rewriteValueS390X_OpS390XMOVDstore(v) case OpS390XMOVDstoreconst: return rewriteValueS390X_OpS390XMOVDstoreconst(v) - case OpS390XMOVDstoreidx: - return rewriteValueS390X_OpS390XMOVDstoreidx(v) case OpS390XMOVHBRstore: return rewriteValueS390X_OpS390XMOVHBRstore(v) - case OpS390XMOVHBRstoreidx: - return rewriteValueS390X_OpS390XMOVHBRstoreidx(v) case OpS390XMOVHZload: return rewriteValueS390X_OpS390XMOVHZload(v) - case OpS390XMOVHZloadidx: - return rewriteValueS390X_OpS390XMOVHZloadidx(v) case OpS390XMOVHZreg: return rewriteValueS390X_OpS390XMOVHZreg(v) case OpS390XMOVHload: return rewriteValueS390X_OpS390XMOVHload(v) - case OpS390XMOVHloadidx: - return rewriteValueS390X_OpS390XMOVHloadidx(v) case OpS390XMOVHreg: return rewriteValueS390X_OpS390XMOVHreg(v) case OpS390XMOVHstore: return rewriteValueS390X_OpS390XMOVHstore(v) case OpS390XMOVHstoreconst: return rewriteValueS390X_OpS390XMOVHstoreconst(v) - case OpS390XMOVHstoreidx: - return rewriteValueS390X_OpS390XMOVHstoreidx(v) case OpS390XMOVWBRstore: return rewriteValueS390X_OpS390XMOVWBRstore(v) - case OpS390XMOVWBRstoreidx: - return rewriteValueS390X_OpS390XMOVWBRstoreidx(v) case OpS390XMOVWZload: return rewriteValueS390X_OpS390XMOVWZload(v) - case OpS390XMOVWZloadidx: - return rewriteValueS390X_OpS390XMOVWZloadidx(v) case OpS390XMOVWZreg: return rewriteValueS390X_OpS390XMOVWZreg(v) case OpS390XMOVWload: return rewriteValueS390X_OpS390XMOVWload(v) - case OpS390XMOVWloadidx: - return rewriteValueS390X_OpS390XMOVWloadidx(v) case OpS390XMOVWreg: return rewriteValueS390X_OpS390XMOVWreg(v) case OpS390XMOVWstore: return rewriteValueS390X_OpS390XMOVWstore(v) case OpS390XMOVWstoreconst: return rewriteValueS390X_OpS390XMOVWstoreconst(v) - case OpS390XMOVWstoreidx: - return rewriteValueS390X_OpS390XMOVWstoreidx(v) case OpS390XMULLD: return rewriteValueS390X_OpS390XMULLD(v) case OpS390XMULLDconst: @@ -5325,7 +5291,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { break } // match: (ADD idx (MOVDaddr [c] {s} ptr)) - // cond: ptr.Op != OpSB && idx.Op != OpSB + // cond: ptr.Op != OpSB // result: (MOVDaddridx [c] {s} ptr idx) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -5336,7 +5302,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) s := auxToSym(v_1.Aux) ptr := v_1.Args[0] - if !(ptr.Op != OpSB && idx.Op != OpSB) { + if !(ptr.Op != OpSB) { continue } v.reset(OpS390XMOVDaddridx) @@ -7487,106 +7453,6 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (FMOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XFMOVDloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (FMOVDloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XFMOVDloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVDloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - if v_1.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVDloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } return false } func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { @@ -7638,111 +7504,6 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (FMOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (FMOVDstoreidx [off] {sym} ptr idx val mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - val := v_1 - mem := v_2 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - if v_1.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } return false } func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { @@ -7809,106 +7570,6 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (FMOVSloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XFMOVSloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (FMOVSloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XFMOVSloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVSloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - if v_1.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVSloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } return false } func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { @@ -7960,124 +7621,19 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (FMOVSstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) + return false +} +func rewriteValueS390X_OpS390XFNEG(v *Value) bool { + v_0 := v.Args[0] + // match: (FNEG (LPDFR x)) + // result: (LNDFR x) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + if v_0.Op != OpS390XLPDFR { break } - v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (FMOVSstoreidx [off] {sym} ptr idx val mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - val := v_1 - mem := v_2 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - if v_1.Op != OpS390XADDconst { - break - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - break - } - v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XFNEG(v *Value) bool { - v_0 := v.Args[0] - // match: (FNEG (LPDFR x)) - // result: (LNDFR x) - for { - if v_0.Op != OpS390XLPDFR { - break - } - x := v_0.Args[0] - v.reset(OpS390XLNDFR) - v.AddArg(x) + x := v_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) return true } // match: (FNEG (LNDFR x)) @@ -8559,112 +8115,6 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVBZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (MOVBZload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVBZloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } return false } func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { @@ -8778,17 +8228,6 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.copyOf(x) - return true - } // match: (MOVBZreg x:(MOVBload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBZload [o] {s} p mem) @@ -8813,31 +8252,6 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { v0.AddArg2(p, mem) return true } - // match: (MOVBZreg x:(MOVBloadidx [o] {s} p i mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBZloadidx [o] {s} p i mem) - for { - t := v.Type - x := v_0 - if x.Op != OpS390XMOVBloadidx { - break - } - o := auxIntToInt32(x.AuxInt) - s := auxToSym(x.Aux) - mem := x.Args[2] - p := x.Args[0] - i := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(o) - v0.Aux = symToAux(s) - v0.AddArg3(p, i, mem) - return true - } // match: (MOVBZreg x:(Arg )) // cond: !t.IsSigned() && t.Size() == 1 // result: x @@ -8971,112 +8385,6 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVBloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (MOVBload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVBloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVBloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVBloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVBloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } return false } func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { @@ -9190,17 +8498,6 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVBreg x:(MOVBloadidx _ _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } // match: (MOVBreg x:(MOVBZload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [o] {s} p mem) @@ -9225,34 +8522,9 @@ func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { v0.AddArg2(p, mem) return true } - // match: (MOVBreg x:(MOVBZloadidx [o] {s} p i mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBloadidx [o] {s} p i mem) - for { - t := v.Type - x := v_0 - if x.Op != OpS390XMOVBZloadidx { - break - } - o := auxIntToInt32(x.AuxInt) - s := auxToSym(x.Aux) - mem := x.Args[2] - p := x.Args[0] - i := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, t) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(o) - v0.Aux = symToAux(s) - v0.AddArg3(p, i, mem) - return true - } - // match: (MOVBreg x:(Arg )) - // cond: t.IsSigned() && t.Size() == 1 - // result: x + // match: (MOVBreg x:(Arg )) + // cond: t.IsSigned() && t.Size() == 1 + // result: x for { x := v_0 if x.Op != OpArg { @@ -9401,58 +8673,6 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVBstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVBstoreidx [off] {sym} ptr idx val mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - val := v_1 - mem := v_2 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w mem) @@ -9752,358 +8972,11 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { } return false } -func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w := v_2 - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w0 := v_2 - if w0.Op != OpS390XSRDconst { - continue - } - j := auxIntToInt8(w0.AuxInt) - w := w0.Args[0] - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w := v_2 - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w0 := v_2 - if w0.Op != OpS390XSRWconst { - continue - } - j := auxIntToInt8(w0.AuxInt) - w := w0.Args[0] - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRDconst || auxIntToInt8(v_2.AuxInt) != 8 { - continue - } - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRDconst { - continue - } - j := auxIntToInt8(v_2.AuxInt) - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRWconst || auxIntToInt8(v_2.AuxInt) != 8 { - continue - } - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRWconst { - continue - } - j := auxIntToInt8(v_2.AuxInt) - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVBstoreidx || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 1) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - return false -} func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) - // cond: is20Bit(int64(c)+int64(d)) && x.Op != OpSB + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDaddridx [c+d] {s} x y) for { c := auxIntToInt32(v.AuxInt) @@ -10114,7 +8987,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is20Bit(int64(c)+int64(d)) && x.Op != OpSB) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XMOVDaddridx) @@ -10124,7 +8997,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { return true } // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) - // cond: is20Bit(int64(c)+int64(d)) && y.Op != OpSB + // cond: is20Bit(int64(c)+int64(d)) // result: (MOVDaddridx [c+d] {s} x y) for { c := auxIntToInt32(v.AuxInt) @@ -10135,7 +9008,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { } d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is20Bit(int64(c)+int64(d)) && y.Op != OpSB) { + if !(is20Bit(int64(c) + int64(d))) { break } v.reset(OpS390XMOVDaddridx) @@ -10274,138 +9147,32 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVDloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) + return false +} +func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { break } off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { break } - v.reset(OpS390XMOVDloadidx) + v.reset(OpS390XMOVDstore) v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (MOVDload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVDloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(int64(off1)+int64(off2)) - // result: (MOVDstore [off1+off2] {sym} ptr val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADDconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is20Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpS390XMOVDstore) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(ptr, val, mem) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) @@ -10453,58 +9220,6 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVDstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVDstoreidx [off] {sym} ptr idx val mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - val := v_1 - mem := v_2 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x) // result: (STMG2 [i-8] {s} p w0 w1 mem) @@ -10636,65 +9351,6 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { } return false } -func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - return false -} func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -10811,155 +9467,6 @@ func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { } return false } -func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRDconst || auxIntToInt8(v_2.AuxInt) != 16 { - continue - } - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRDconst { - continue - } - j := auxIntToInt8(v_2.AuxInt) - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRWconst || auxIntToInt8(v_2.AuxInt) != 16 { - continue - } - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRWconst { - continue - } - j := auxIntToInt8(v_2.AuxInt) - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVHBRstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - return false -} func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -11026,137 +9533,31 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVHZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) + return false +} +func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVHZreg e:(MOVBZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { + e := v_0 + if e.Op != OpS390XMOVBZreg { break } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + x := e.Args[0] + if !(clobberIfDead(e)) { break } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) + v.reset(OpS390XMOVBZreg) + v.AddArg(x) return true } - // match: (MOVHZload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVHZloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MOVHZreg e:(MOVBZreg x)) - // cond: clobberIfDead(e) - // result: (MOVBZreg x) - for { - e := v_0 - if e.Op != OpS390XMOVBZreg { - break - } - x := e.Args[0] - if !(clobberIfDead(e)) { - break - } - v.reset(OpS390XMOVBZreg) - v.AddArg(x) - return true - } - // match: (MOVHZreg e:(MOVHreg x)) - // cond: clobberIfDead(e) - // result: (MOVHZreg x) + // match: (MOVHZreg e:(MOVHreg x)) + // cond: clobberIfDead(e) + // result: (MOVHZreg x) for { e := v_0 if e.Op != OpS390XMOVHreg { @@ -11229,17 +9630,6 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.copyOf(x) - return true - } // match: (MOVHZreg x:(MOVHZload _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x @@ -11251,17 +9641,6 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.copyOf(x) - return true - } // match: (MOVHZreg x:(MOVHload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVHZload [o] {s} p mem) @@ -11286,31 +9665,6 @@ func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { v0.AddArg2(p, mem) return true } - // match: (MOVHZreg x:(MOVHloadidx [o] {s} p i mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZloadidx [o] {s} p i mem) - for { - t := v.Type - x := v_0 - if x.Op != OpS390XMOVHloadidx { - break - } - o := auxIntToInt32(x.AuxInt) - s := auxToSym(x.Aux) - mem := x.Args[2] - p := x.Args[0] - i := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(o) - v0.Aux = symToAux(s) - v0.AddArg3(p, i, mem) - return true - } // match: (MOVHZreg x:(Arg )) // cond: !t.IsSigned() && t.Size() <= 2 // result: x @@ -11420,112 +9774,6 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVHloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (MOVHload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVHloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVHloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVHloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVHloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } return false } func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { @@ -11623,17 +9871,6 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVHreg x:(MOVBloadidx _ _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } // match: (MOVHreg x:(MOVHload _ _)) // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x @@ -11645,17 +9882,6 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVHreg x:(MOVHloadidx _ _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVHloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } // match: (MOVHreg x:(MOVBZload _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x @@ -11667,17 +9893,6 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVHreg x:(MOVBZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.copyOf(x) - return true - } // match: (MOVHreg x:(MOVHZload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVHload [o] {s} p mem) @@ -11702,31 +9917,6 @@ func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { v0.AddArg2(p, mem) return true } - // match: (MOVHreg x:(MOVHZloadidx [o] {s} p i mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHloadidx [o] {s} p i mem) - for { - t := v.Type - x := v_0 - if x.Op != OpS390XMOVHZloadidx { - break - } - o := auxIntToInt32(x.AuxInt) - s := auxToSym(x.Aux) - mem := x.Args[2] - p := x.Args[0] - i := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, t) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(o) - v0.Aux = symToAux(s) - v0.AddArg3(p, i, mem) - return true - } // match: (MOVHreg x:(Arg )) // cond: t.IsSigned() && t.Size() <= 2 // result: x @@ -11879,72 +10069,20 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVHstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) + // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w mem) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w := v_1 + x := v_2 + if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVHstoreidx [off] {sym} ptr idx val mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - val := v_1 - mem := v_2 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-2] {s} p w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - p := v_0 - w := v_1 - x := v_2 - if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - break - } - mem := x.Args[2] - if p != x.Args[0] { + mem := x.Args[2] + if p != x.Args[0] { break } x_1 := x.Args[1] @@ -12124,211 +10262,6 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { } return false } -func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w := v_2 - x := v_3 - if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w0 := v_2 - if w0.Op != OpS390XSRDconst { - continue - } - j := auxIntToInt8(w0.AuxInt) - w := w0.Args[0] - x := v_3 - if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w := v_2 - x := v_3 - if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w0 := v_2 - if w0.Op != OpS390XSRWconst { - continue - } - j := auxIntToInt8(w0.AuxInt) - w := w0.Args[0] - x := v_3 - if x.Op != OpS390XMOVHstoreidx || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || auxIntToInt8(x_2.AuxInt) != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(i - 2) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - return false -} func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -12390,84 +10323,6 @@ func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { } return false } -func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRDconst || auxIntToInt8(v_2.AuxInt) != 32 { - continue - } - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVWBRstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 4) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - if v_2.Op != OpS390XSRDconst { - continue - } - j := auxIntToInt8(v_2.AuxInt) - w := v_2.Args[0] - x := v_3 - if x.Op != OpS390XMOVWBRstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = int32ToAuxInt(i - 4) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - return false -} func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -12534,112 +10389,6 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVWZloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (MOVWZload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVWZloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } return false } func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { @@ -12720,17 +10469,6 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { - break - } - v.copyOf(x) - return true - } // match: (MOVWZreg x:(MOVHZload _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x @@ -12742,17 +10480,6 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.copyOf(x) - return true - } // match: (MOVWZreg x:(MOVWZload _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 4) // result: x @@ -12764,17 +10491,6 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 4) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVWZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 4) { - break - } - v.copyOf(x) - return true - } // match: (MOVWZreg x:(MOVWload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWZload [o] {s} p mem) @@ -12799,31 +10515,6 @@ func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { v0.AddArg2(p, mem) return true } - // match: (MOVWZreg x:(MOVWloadidx [o] {s} p i mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZloadidx [o] {s} p i mem) - for { - t := v.Type - x := v_0 - if x.Op != OpS390XMOVWloadidx { - break - } - o := auxIntToInt32(x.AuxInt) - s := auxToSym(x.Aux) - mem := x.Args[2] - p := x.Args[0] - i := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(o) - v0.Aux = symToAux(s) - v0.AddArg3(p, i, mem) - return true - } // match: (MOVWZreg x:(Arg )) // cond: !t.IsSigned() && t.Size() <= 4 // result: x @@ -12918,112 +10609,6 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { v.AddArg2(base, mem) return true } - // match: (MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVWloadidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg3(ptr, idx, mem) - return true - } - // match: (MOVWload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVWloadidx [off] {sym} ptr idx mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - mem := v_1 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - return false -} -func rewriteValueS390X_OpS390XMOVWloadidx(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVWloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } - // match: (MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVWloadidx [c+d] {sym} ptr idx mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - mem := v_2 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg3(ptr, idx, mem) - return true - } - break - } return false } func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { @@ -13098,84 +10683,40 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { // result: x for { x := v_0 - if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } - // match: (MOVWreg x:(MOVBloadidx _ _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVBloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } - // match: (MOVWreg x:(MOVHload _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } - // match: (MOVWreg x:(MOVHloadidx _ _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVHloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { - break - } - v.copyOf(x) - return true - } - // match: (MOVWreg x:(MOVWload _ _)) - // cond: (x.Type.IsSigned() || x.Type.Size() == 8) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) return true } - // match: (MOVWreg x:(MOVWloadidx _ _ _)) + // match: (MOVWreg x:(MOVHload _ _)) // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { x := v_0 - if x.Op != OpS390XMOVWloadidx || !(x.Type.IsSigned() || x.Type.Size() == 8) { + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) return true } - // match: (MOVWreg x:(MOVBZload _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) + // match: (MOVWreg x:(MOVWload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { x := v_0 - if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) { break } v.copyOf(x) return true } - // match: (MOVWreg x:(MOVBZloadidx _ _ _)) + // match: (MOVWreg x:(MOVBZload _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { x := v_0 - if x.Op != OpS390XMOVBZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { break } v.copyOf(x) @@ -13192,17 +10733,6 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { v.copyOf(x) return true } - // match: (MOVWreg x:(MOVHZloadidx _ _ _)) - // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) - // result: x - for { - x := v_0 - if x.Op != OpS390XMOVHZloadidx || !(!x.Type.IsSigned() || x.Type.Size() > 2) { - break - } - v.copyOf(x) - return true - } // match: (MOVWreg x:(MOVWZload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWload [o] {s} p mem) @@ -13227,31 +10757,6 @@ func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { v0.AddArg2(p, mem) return true } - // match: (MOVWreg x:(MOVWZloadidx [o] {s} p i mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWloadidx [o] {s} p i mem) - for { - t := v.Type - x := v_0 - if x.Op != OpS390XMOVWZloadidx { - break - } - o := auxIntToInt32(x.AuxInt) - s := auxToSym(x.Aux) - mem := x.Args[2] - p := x.Args[0] - i := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, t) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(o) - v0.Aux = symToAux(s) - v0.AddArg3(p, i, mem) - return true - } // match: (MOVWreg x:(Arg )) // cond: t.IsSigned() && t.Size() <= 4 // result: x @@ -13385,58 +10890,6 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx [off1+off2] {mergeSymTyped(sym1,sym2)} ptr idx val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSymTyped(sym1, sym2)) - v.AddArg4(ptr, idx, val, mem) - return true - } - // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx [off] {sym} ptr idx val mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpS390XADD { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - ptr := v_0_0 - idx := v_0_1 - val := v_1 - mem := v_2 - if !(ptr.Op != OpSB) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVDstore [i-4] {s} p w mem) @@ -13653,138 +11106,6 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { } return false } -func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_0.AuxInt) - ptr := v_0.Args[0] - idx := v_1 - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: is20Bit(int64(c)+int64(d)) - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - ptr := v_0 - if v_1.Op != OpS390XADDconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - idx := v_1.Args[0] - val := v_2 - mem := v_3 - if !(is20Bit(int64(c) + int64(d))) { - continue - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = int32ToAuxInt(c + d) - v.Aux = symToAux(sym) - v.AddArg4(ptr, idx, val, mem) - return true - } - break - } - // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w := v_2 - x := v_3 - if x.Op != OpS390XMOVWstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = int32ToAuxInt(i - 4) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w, mem) - return true - } - } - break - } - // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := auxIntToInt32(v.AuxInt) - s := auxToSym(v.Aux) - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - p := v_0 - idx := v_1 - w0 := v_2 - if w0.Op != OpS390XSRDconst { - continue - } - j := auxIntToInt8(w0.AuxInt) - w := w0.Args[0] - x := v_3 - if x.Op != OpS390XMOVWstoreidx || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { - continue - } - mem := x.Args[3] - x_0 := x.Args[0] - x_1 := x.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { - if p != x_0 || idx != x_1 { - continue - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || auxIntToInt8(x_2.AuxInt) != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - continue - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = int32ToAuxInt(i - 4) - v.Aux = symToAux(s) - v.AddArg4(p, idx, w0, mem) - return true - } - } - break - } - return false -} func rewriteValueS390X_OpS390XMULLD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -14758,301 +12079,13 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = int8ToAuxInt(j1) - v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AuxInt = int32ToAuxInt(i0) - v2.Aux = symToAux(s) - v2.AddArg2(p, mem) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } - break - } - // match: (OR x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x1 := v_0 - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - sh := v_1 - if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 8 { - continue - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (OR x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x1 := v_0 - if x1.Op != OpS390XMOVHZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - sh := v_1 - if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 16 { - continue - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (OR x1:(MOVWZloadidx [i1] {s} p idx mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x1 := v_0 - if x1.Op != OpS390XMOVWZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - sh := v_1 - if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 32 { - continue - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s0 := v_0 - if s0.Op != OpS390XSLDconst { - continue - } - j0 := auxIntToInt8(s0.AuxInt) - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - or := v_1 - if or.Op != OpS390XOR { - continue - } - _ = or.Args[1] - or_0 := or.Args[0] - or_1 := or.Args[1] - for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { - s1 := or_0 - if s1.Op != OpS390XSLDconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { - continue - } - y := or_1 - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { - continue - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = int8ToAuxInt(j1) - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = int32ToAuxInt(i0) - v2.Aux = symToAux(s) - v2.AddArg3(p, idx, mem) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } - } - } - break - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s0 := v_0 - if s0.Op != OpS390XSLDconst { - continue - } - j0 := auxIntToInt8(s0.AuxInt) - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - or := v_1 - if or.Op != OpS390XOR { - continue - } - _ = or.Args[1] - or_0 := or.Args[0] - or_1 := or.Args[1] - for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { - s1 := or_0 - if s1.Op != OpS390XSLDconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { - continue - } - y := or_1 - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { - continue - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = int8ToAuxInt(j1) - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = int32ToAuxInt(i0) - v2.Aux = symToAux(s) - v2.AddArg3(p, idx, mem) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } + v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) + v2.AddArg2(p, mem) + v1.AddArg(v2) + v0.AddArg2(v1, y) + return true } } break @@ -15332,326 +12365,6 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } break } - // match: (OR x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x0 := v_0 - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - sh := v_1 - if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 8 { - continue - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = int32ToAuxInt(i0) - v1.Aux = symToAux(s) - v1.AddArg3(p, idx, mem) - v0.AddArg(v1) - return true - } - } - } - break - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - r0 := v_0 - if r0.Op != OpS390XMOVHZreg { - continue - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - sh := v_1 - if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 16 { - continue - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - continue - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = int32ToAuxInt(i0) - v1.Aux = symToAux(s) - v1.AddArg3(p, idx, mem) - v0.AddArg(v1) - return true - } - } - } - break - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - r0 := v_0 - if r0.Op != OpS390XMOVWZreg { - continue - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - sh := v_1 - if sh.Op != OpS390XSLDconst || auxIntToInt8(sh.AuxInt) != 32 { - continue - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - continue - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s1 := v_0 - if s1.Op != OpS390XSLDconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - or := v_1 - if or.Op != OpS390XOR { - continue - } - _ = or.Args[1] - or_0 := or.Args[0] - or_1 := or.Args[1] - for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { - s0 := or_0 - if s0.Op != OpS390XSLDconst { - continue - } - j0 := auxIntToInt8(s0.AuxInt) - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { - continue - } - y := or_1 - if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { - continue - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = int8ToAuxInt(j0) - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = int32ToAuxInt(i0) - v3.Aux = symToAux(s) - v3.AddArg3(p, idx, mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } - } - } - break - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s1 := v_0 - if s1.Op != OpS390XSLDconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - continue - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - or := v_1 - if or.Op != OpS390XOR { - continue - } - _ = or.Args[1] - or_0 := or.Args[0] - or_1 := or.Args[1] - for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { - s0 := or_0 - if s0.Op != OpS390XSLDconst { - continue - } - j0 := auxIntToInt8(s0.AuxInt) - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - continue - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { - continue - } - y := or_1 - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) { - continue - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = int8ToAuxInt(j0) - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = int32ToAuxInt(i0) - v3.Aux = symToAux(s) - v3.AddArg3(p, idx, mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } - } - } - break - } return false } func rewriteValueS390X_OpS390XORW(v *Value) bool { @@ -15904,174 +12617,6 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } break } - // match: (ORW x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x1 := v_0 - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - sh := v_1 - if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 8 { - continue - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (ORW x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x1 := v_0 - if x1.Op != OpS390XMOVHZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - sh := v_1 - if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 16 { - continue - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s0 := v_0 - if s0.Op != OpS390XSLWconst { - continue - } - j0 := auxIntToInt8(s0.AuxInt) - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - or := v_1 - if or.Op != OpS390XORW { - continue - } - _ = or.Args[1] - or_0 := or.Args[0] - or_1 := or.Args[1] - for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { - s1 := or_0 - if s1.Op != OpS390XSLWconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { - continue - } - y := or_1 - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { - continue - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = int8ToAuxInt(j1) - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = int32ToAuxInt(i0) - v2.Aux = symToAux(s) - v2.AddArg3(p, idx, mem) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } - } - } - break - } // match: (ORW x0:(MOVBZload [i0] {s} p mem) sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem))) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) @@ -16225,186 +12770,6 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } break } - // match: (ORW x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x0 := v_0 - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - sh := v_1 - if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 8 { - continue - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = int32ToAuxInt(i0) - v1.Aux = symToAux(s) - v1.AddArg3(p, idx, mem) - v0.AddArg(v1) - return true - } - } - } - break - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - r0 := v_0 - if r0.Op != OpS390XMOVHZreg { - continue - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { - p := x0_0 - idx := x0_1 - sh := v_1 - if sh.Op != OpS390XSLWconst || auxIntToInt8(sh.AuxInt) != 16 { - continue - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - continue - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { - if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg3(p, idx, mem) - return true - } - } - } - break - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s1 := v_0 - if s1.Op != OpS390XSLWconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - s := auxToSym(x1.Aux) - mem := x1.Args[2] - x1_0 := x1.Args[0] - x1_1 := x1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { - p := x1_0 - idx := x1_1 - or := v_1 - if or.Op != OpS390XORW { - continue - } - _ = or.Args[1] - or_0 := or.Args[0] - or_1 := or.Args[1] - for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { - s0 := or_0 - if s0.Op != OpS390XSLWconst { - continue - } - j0 := auxIntToInt8(s0.AuxInt) - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - if auxToSym(x0.Aux) != s { - continue - } - _ = x0.Args[2] - x0_0 := x0.Args[0] - x0_1 := x0.Args[1] - for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { - if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { - continue - } - y := or_1 - if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { - continue - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.copyOf(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = int8ToAuxInt(j0) - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = int32ToAuxInt(i0) - v3.Aux = symToAux(s) - v3.AddArg3(p, idx, mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(v1, y) - return true - } - } - } - } - break - } return false } func rewriteValueS390X_OpS390XORWconst(v *Value) bool { -- cgit v1.2.3-54-g00ecf From 3b20d484fbb7a4f64881002f8a63eca42f0e7c29 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 18 Aug 2020 22:29:22 -0400 Subject: go/types: fix missing Importer for TestBenchmark TestBenchmark is broken due to lack of a Config.Importer, but unfortunately fails silently due to an unchecked error. Fix the importer and check the error. Also improve the output to include allocation stats. Finally, don't run TestBenchmark on go/types by default. If the benchmark is being used during a refactoring of go/types itself, results for go/types will not be comparable. Change-Id: Ib6bdb6807403b3ec99762f535e2496c94bd9b6e0 Reviewed-on: https://go-review.googlesource.com/c/go/+/249517 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Alan Donovan --- src/go/types/self_test.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/go/types/self_test.go b/src/go/types/self_test.go index 10ad06fbca..04c9cd3458 100644 --- a/src/go/types/self_test.go +++ b/src/go/types/self_test.go @@ -47,8 +47,13 @@ func TestBenchmark(t *testing.T) { // We're not using testing's benchmarking mechanism directly // because we want custom output. - for _, p := range []string{"types", "constant", filepath.Join("internal", "gcimporter")} { - path := filepath.Join("..", p) + for _, p := range []string{ + "net/http", + "go/parser", + "go/constant", + filepath.Join("go", "internal", "gcimporter"), + } { + path := filepath.Join("..", "..", p) runbench(t, path, false) runbench(t, path, true) fmt.Println() @@ -64,8 +69,13 @@ func runbench(t *testing.T, path string, ignoreFuncBodies bool) { b := testing.Benchmark(func(b *testing.B) { for i := 0; i < b.N; i++ { - conf := Config{IgnoreFuncBodies: ignoreFuncBodies} - conf.Check(path, fset, files, nil) + conf := Config{ + IgnoreFuncBodies: ignoreFuncBodies, + Importer: importer.Default(), + } + if _, err := conf.Check(path, fset, files, nil); err != nil { + t.Fatal(err) + } } }) @@ -77,10 +87,9 @@ func runbench(t *testing.T, path string, ignoreFuncBodies bool) { }) d := time.Duration(b.NsPerOp()) - fmt.Printf( - "%s: %s for %d lines (%d lines/s), ignoreFuncBodies = %v\n", - filepath.Base(path), d, lines, int64(float64(lines)/d.Seconds()), ignoreFuncBodies, - ) + fmt.Printf("%s (ignoreFuncBodies = %v):\n", filepath.Base(path), ignoreFuncBodies) + fmt.Printf("\t%s for %d lines (%.0f lines/s)\n", d, lines, float64(lines)/d.Seconds()) + fmt.Printf("\t%s\n", b.MemString()) } func pkgFiles(fset *token.FileSet, path string) ([]*ast.File, error) { -- cgit v1.2.3-54-g00ecf From c00b7081699d3b3ced2970fdae2de6febf638c05 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 26 Aug 2020 23:17:53 -0400 Subject: cmd/go/internal/modfetch: do not use mangled version strings to construct module.VersionErrors Better still would be to avoid passing around module.Version instances with invalid Version strings in the first place, so that any time we see a module.Version we know that it is actually a version of a module (and not a structurally-similar datum with something else tacked on to one of the fields). But that's a bigger cleanup for which I don't currently have enough bandwidth. Fixes #41060 Change-Id: I32fba5619105cbf67dd03691064c82b8ebb3ce18 Reviewed-on: https://go-review.googlesource.com/c/go/+/250951 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modfetch/fetch.go | 14 ++++++++++++-- src/cmd/go/testdata/script/mod_download_json.txt | 2 +- src/cmd/go/testdata/script/mod_sumdb.txt | 6 ++++++ 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go index e29eb0a942..01d8f007ac 100644 --- a/src/cmd/go/internal/modfetch/fetch.go +++ b/src/cmd/go/internal/modfetch/fetch.go @@ -503,6 +503,9 @@ func checkGoMod(path, version string, data []byte) error { } // checkModSum checks that the recorded checksum for mod is h. +// +// mod.Version may have the additional suffix "/go.mod" to request the checksum +// for the module's go.mod file only. func checkModSum(mod module.Version, h string) error { // We lock goSum when manipulating it, // but we arrange to release the lock when calling checkSumDB, @@ -579,9 +582,16 @@ func addModSumLocked(mod module.Version, h string) { // checkSumDB checks the mod, h pair against the Go checksum database. // It calls base.Fatalf if the hash is to be rejected. func checkSumDB(mod module.Version, h string) error { + modWithoutSuffix := mod + noun := "module" + if strings.HasSuffix(mod.Version, "/go.mod") { + noun = "go.mod" + modWithoutSuffix.Version = strings.TrimSuffix(mod.Version, "/go.mod") + } + db, lines, err := lookupSumDB(mod) if err != nil { - return module.VersionError(mod, fmt.Errorf("verifying module: %v", err)) + return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: %v", noun, err)) } have := mod.Path + " " + mod.Version + " " + h @@ -591,7 +601,7 @@ func checkSumDB(mod module.Version, h string) error { return nil } if strings.HasPrefix(line, prefix) { - return module.VersionError(mod, fmt.Errorf("verifying module: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, h, db, line[len(prefix)-len("h1:"):])) + return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, noun, h, db, line[len(prefix)-len("h1:"):])) } } return nil diff --git a/src/cmd/go/testdata/script/mod_download_json.txt b/src/cmd/go/testdata/script/mod_download_json.txt index 26291681ce..9555adf8c4 100644 --- a/src/cmd/go/testdata/script/mod_download_json.txt +++ b/src/cmd/go/testdata/script/mod_download_json.txt @@ -3,7 +3,7 @@ env GOSUMDB=$sumdb' '$proxy/sumdb-wrong # download -json with version should print JSON on sumdb failure ! go mod download -json 'rsc.io/quote@<=v1.5.0' -stdout '"Error": ".*verifying module' +stdout '"Error": ".*verifying (module|go.mod)' -- go.mod -- module m diff --git a/src/cmd/go/testdata/script/mod_sumdb.txt b/src/cmd/go/testdata/script/mod_sumdb.txt index caf97e9699..68bbd9c274 100644 --- a/src/cmd/go/testdata/script/mod_sumdb.txt +++ b/src/cmd/go/testdata/script/mod_sumdb.txt @@ -15,6 +15,12 @@ stderr 'localhost.localdev/sumdb: h1:wrong' stderr 'SECURITY ERROR\nThis download does NOT match the one reported by the checksum server.' ! go get -d rsc.io/sampler ! go get -d golang.org/x/text + +go mod edit -require rsc.io/quote@v1.5.2 +! go list all +stderr 'go: rsc.io/quote@v1.5.2: verifying go.mod: checksum mismatch' +stderr 'SECURITY ERROR\n' + rm go.sum # switching to truthful sumdb detects timeline inconsistency -- cgit v1.2.3-54-g00ecf From ac2a1f8796101459a1700d02093745ffe1c821f4 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 11 Aug 2020 12:49:40 -0400 Subject: go/types: clean up test support code and remove global variables This is a straightforward port of CL 244627. Change-Id: Ide980957430b35e22a6e22818b0ce9de410988af Reviewed-on: https://go-review.googlesource.com/c/go/+/247902 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/stdlib_test.go | 51 ++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 51ee0b1c36..f5a3273fa1 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -27,22 +27,21 @@ import ( . "go/types" ) -var ( - pkgCount int // number of packages processed - start time.Time - - // Use the same importer for all std lib tests to - // avoid repeated importing of the same packages. - stdLibImporter = importer.Default() -) +// Use the same importer for all std lib tests to +// avoid repeated importing of the same packages. +var stdLibImporter = importer.Default() func TestStdlib(t *testing.T) { testenv.MustHaveGoBuild(t) - start = time.Now() - walkDirs(t, filepath.Join(runtime.GOROOT(), "src")) + pkgCount := 0 + duration := walkPkgDirs(filepath.Join(runtime.GOROOT(), "src"), func(dir string, filenames []string) { + typecheck(t, dir, filenames) + pkgCount++ + }, t.Error) + if testing.Verbose() { - fmt.Println(pkgCount, "packages typechecked in", time.Since(start)) + fmt.Println(pkgCount, "packages typechecked in", duration) } } @@ -235,7 +234,6 @@ func typecheck(t *testing.T, path string, filenames []string) { } info := Info{Uses: make(map[*ast.Ident]Object)} conf.Check(path, fset, files, &info) - pkgCount++ // Perform checks of API invariants. @@ -278,39 +276,48 @@ func pkgFilenames(dir string) ([]string, error) { return filenames, nil } -// Note: Could use filepath.Walk instead of walkDirs but that wouldn't -// necessarily be shorter or clearer after adding the code to -// terminate early for -short tests. +func walkPkgDirs(dir string, pkgh func(dir string, filenames []string), errh func(args ...interface{})) time.Duration { + w := walker{time.Now(), 10 * time.Millisecond, pkgh, errh} + w.walk(dir) + return time.Since(w.start) +} -func walkDirs(t *testing.T, dir string) { +type walker struct { + start time.Time + dmax time.Duration + pkgh func(dir string, filenames []string) + errh func(args ...interface{}) +} + +func (w *walker) walk(dir string) { // limit run time for short tests - if testing.Short() && time.Since(start) >= 10*time.Millisecond { + if testing.Short() && time.Since(w.start) >= w.dmax { return } fis, err := ioutil.ReadDir(dir) if err != nil { - t.Error(err) + w.errh(err) return } - // typecheck package in directory + // apply pkgh to the files in directory dir // but ignore files directly under $GOROOT/src (might be temporary test files). if dir != filepath.Join(runtime.GOROOT(), "src") { files, err := pkgFilenames(dir) if err != nil { - t.Error(err) + w.errh(err) return } if files != nil { - typecheck(t, dir, files) + w.pkgh(dir, files) } } // traverse subdirectories, but don't walk into testdata for _, fi := range fis { if fi.IsDir() && fi.Name() != "testdata" { - walkDirs(t, filepath.Join(dir, fi.Name())) + w.walk(filepath.Join(dir, fi.Name())) } } } -- cgit v1.2.3-54-g00ecf From 234e37bcda2b5efdd685ad11d1bb3d336415648e Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 11 Aug 2020 13:21:14 -0400 Subject: go/types: remove need to enumerate fixedbugs test files This is a port of CL 244628, updated to move some existing regression tests into the fixedbugs directory, and to use subtests. Also, 'TestFixed' is renamed to 'TestFixedBugs'. Change-Id: I43aac3f75f2bd850567d08e8b008d91aeb717064 Reviewed-on: https://go-review.googlesource.com/c/go/+/247904 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/check_test.go | 32 ++++++++++--- src/go/types/fixedbugs/issue23203a.src | 14 ++++++ src/go/types/fixedbugs/issue23203b.src | 14 ++++++ src/go/types/fixedbugs/issue26390.src | 13 ++++++ src/go/types/fixedbugs/issue28251.src | 65 +++++++++++++++++++++++++++ src/go/types/fixedbugs/issue6977.src | 82 ++++++++++++++++++++++++++++++++++ src/go/types/testdata/issue23203a.src | 14 ------ src/go/types/testdata/issue23203b.src | 14 ------ src/go/types/testdata/issue26390.src | 11 ----- src/go/types/testdata/issue28251.src | 65 --------------------------- src/go/types/testdata/issue6977.src | 82 ---------------------------------- 11 files changed, 214 insertions(+), 192 deletions(-) create mode 100644 src/go/types/fixedbugs/issue23203a.src create mode 100644 src/go/types/fixedbugs/issue23203b.src create mode 100644 src/go/types/fixedbugs/issue26390.src create mode 100644 src/go/types/fixedbugs/issue28251.src create mode 100644 src/go/types/fixedbugs/issue6977.src delete mode 100644 src/go/types/testdata/issue23203a.src delete mode 100644 src/go/types/testdata/issue23203b.src delete mode 100644 src/go/types/testdata/issue26390.src delete mode 100644 src/go/types/testdata/issue28251.src delete mode 100644 src/go/types/testdata/issue6977.src diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go index 89122d75ff..e01c3de13b 100644 --- a/src/go/types/check_test.go +++ b/src/go/types/check_test.go @@ -34,6 +34,7 @@ import ( "go/token" "internal/testenv" "io/ioutil" + "path/filepath" "regexp" "strings" "testing" @@ -93,11 +94,6 @@ var tests = [][]string{ {"testdata/issues.src"}, {"testdata/blank.src"}, {"testdata/issue25008b.src", "testdata/issue25008a.src"}, // order (b before a) is crucial! - {"testdata/issue26390.src"}, // stand-alone test to ensure case is triggered - {"testdata/issue23203a.src"}, - {"testdata/issue23203b.src"}, - {"testdata/issue28251.src"}, - {"testdata/issue6977.src"}, } var fset = token.NewFileSet() @@ -259,7 +255,7 @@ func checkFiles(t *testing.T, testfiles []string) { // typecheck and collect typechecker errors var conf Config // special case for importC.src - if len(testfiles) == 1 && testfiles[0] == "testdata/importC.src" { + if len(testfiles) == 1 && strings.HasSuffix(testfiles[0], "importC.src") { conf.FakeImportC = true } conf.Importer = importer.Default() @@ -316,3 +312,27 @@ func TestCheck(t *testing.T) { checkFiles(t, files) } } + +func TestFixedBugs(t *testing.T) { testDir(t, "fixedbugs") } + +func testDir(t *testing.T, dir string) { + testenv.MustHaveGoBuild(t) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + for _, fi := range fis { + testname := filepath.Base(fi.Name()) + testname = strings.TrimSuffix(testname, filepath.Ext(testname)) + t.Run(testname, func(t *testing.T) { + filename := filepath.Join(dir, fi.Name()) + if fi.IsDir() { + t.Errorf("skipped directory %q", filename) + return + } + checkFiles(t, []string{filename}) + }) + } +} diff --git a/src/go/types/fixedbugs/issue23203a.src b/src/go/types/fixedbugs/issue23203a.src new file mode 100644 index 0000000000..48cb5889cd --- /dev/null +++ b/src/go/types/fixedbugs/issue23203a.src @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +type T struct{} + +func (T) m1() {} +func (T) m2([unsafe.Sizeof(T.m1)]int) {} + +func main() {} diff --git a/src/go/types/fixedbugs/issue23203b.src b/src/go/types/fixedbugs/issue23203b.src new file mode 100644 index 0000000000..638ec6c5ce --- /dev/null +++ b/src/go/types/fixedbugs/issue23203b.src @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +type T struct{} + +func (T) m2([unsafe.Sizeof(T.m1)]int) {} +func (T) m1() {} + +func main() {} diff --git a/src/go/types/fixedbugs/issue26390.src b/src/go/types/fixedbugs/issue26390.src new file mode 100644 index 0000000000..9e0101f581 --- /dev/null +++ b/src/go/types/fixedbugs/issue26390.src @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// stand-alone test to ensure case is triggered + +package issue26390 + +type A = T + +func (t *T) m() *A { return t } + +type T struct{} diff --git a/src/go/types/fixedbugs/issue28251.src b/src/go/types/fixedbugs/issue28251.src new file mode 100644 index 0000000000..cd79e0e8b5 --- /dev/null +++ b/src/go/types/fixedbugs/issue28251.src @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains test cases for various forms of +// method receiver declarations, per the spec clarification +// https://golang.org/cl/142757. + +package issue28251 + +// test case from issue28251 +type T struct{} + +type T0 = *T + +func (T0) m() {} + +func _() { (&T{}).m() } + +// various alternative forms +type ( + T1 = (((T))) +) + +func ((*(T1))) m1() {} +func _() { (T{}).m2() } +func _() { (&T{}).m2() } + +type ( + T2 = (((T3))) + T3 = T +) + +func (T2) m2() {} +func _() { (T{}).m2() } +func _() { (&T{}).m2() } + +type ( + T4 = ((*(T5))) + T5 = T +) + +func (T4) m4() {} +func _() { (T{}).m4 /* ERROR "cannot call pointer method m4 on T" */ () } +func _() { (&T{}).m4() } + +type ( + T6 = (((T7))) + T7 = (*(T8)) + T8 = T +) + +func (T6) m6() {} +func _() { (T{}).m6 /* ERROR "cannot call pointer method m6 on T" */ () } +func _() { (&T{}).m6() } + +type ( + T9 = *T10 + T10 = *T11 + T11 = T +) + +func (T9 /* ERROR invalid receiver \*\*T */ ) m9() {} +func _() { (T{}).m9 /* ERROR has no field or method m9 */ () } +func _() { (&T{}).m9 /* ERROR has no field or method m9 */ () } diff --git a/src/go/types/fixedbugs/issue6977.src b/src/go/types/fixedbugs/issue6977.src new file mode 100644 index 0000000000..8f4e9ba2b2 --- /dev/null +++ b/src/go/types/fixedbugs/issue6977.src @@ -0,0 +1,82 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "io" + +// Alan's initial report. + +type I interface { f(); String() string } +type J interface { g(); String() string } + +type IJ1 = interface { I; J } +type IJ2 = interface { f(); g(); String() string } + +var _ = (*IJ1)(nil) == (*IJ2)(nil) // static assert that IJ1 and IJ2 are identical types + +// The canonical example. + +type ReadWriteCloser interface { io.ReadCloser; io.WriteCloser } + +// Some more cases. + +type M interface { m() } +type M32 interface { m() int32 } +type M64 interface { m() int64 } + +type U1 interface { m() } +type U2 interface { m(); M } +type U3 interface { M; m() } +type U4 interface { M; M; M } +type U5 interface { U1; U2; U3; U4 } + +type U6 interface { m(); m /* ERROR duplicate method */ () } +type U7 interface { M32 /* ERROR duplicate method */ ; m() } +type U8 interface { m(); M32 /* ERROR duplicate method */ } +type U9 interface { M32; M64 /* ERROR duplicate method */ } + +// Verify that repeated embedding of the same interface(s) +// eliminates duplicate methods early (rather than at the +// end) to prevent exponential memory and time use. +// Without early elimination, computing T29 may take dozens +// of minutes. +type ( + T0 interface { m() } + T1 interface { T0; T0 } + T2 interface { T1; T1 } + T3 interface { T2; T2 } + T4 interface { T3; T3 } + T5 interface { T4; T4 } + T6 interface { T5; T5 } + T7 interface { T6; T6 } + T8 interface { T7; T7 } + T9 interface { T8; T8 } + + T10 interface { T9; T9 } + T11 interface { T10; T10 } + T12 interface { T11; T11 } + T13 interface { T12; T12 } + T14 interface { T13; T13 } + T15 interface { T14; T14 } + T16 interface { T15; T15 } + T17 interface { T16; T16 } + T18 interface { T17; T17 } + T19 interface { T18; T18 } + + T20 interface { T19; T19 } + T21 interface { T20; T20 } + T22 interface { T21; T21 } + T23 interface { T22; T22 } + T24 interface { T23; T23 } + T25 interface { T24; T24 } + T26 interface { T25; T25 } + T27 interface { T26; T26 } + T28 interface { T27; T27 } + T29 interface { T28; T28 } +) + +// Verify that m is present. +var x T29 +var _ = x.m diff --git a/src/go/types/testdata/issue23203a.src b/src/go/types/testdata/issue23203a.src deleted file mode 100644 index 48cb5889cd..0000000000 --- a/src/go/types/testdata/issue23203a.src +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import "unsafe" - -type T struct{} - -func (T) m1() {} -func (T) m2([unsafe.Sizeof(T.m1)]int) {} - -func main() {} diff --git a/src/go/types/testdata/issue23203b.src b/src/go/types/testdata/issue23203b.src deleted file mode 100644 index 638ec6c5ce..0000000000 --- a/src/go/types/testdata/issue23203b.src +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import "unsafe" - -type T struct{} - -func (T) m2([unsafe.Sizeof(T.m1)]int) {} -func (T) m1() {} - -func main() {} diff --git a/src/go/types/testdata/issue26390.src b/src/go/types/testdata/issue26390.src deleted file mode 100644 index b8e67e9bdd..0000000000 --- a/src/go/types/testdata/issue26390.src +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package issue26390 - -type A = T - -func (t *T) m() *A { return t } - -type T struct{} diff --git a/src/go/types/testdata/issue28251.src b/src/go/types/testdata/issue28251.src deleted file mode 100644 index cd79e0e8b5..0000000000 --- a/src/go/types/testdata/issue28251.src +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains test cases for various forms of -// method receiver declarations, per the spec clarification -// https://golang.org/cl/142757. - -package issue28251 - -// test case from issue28251 -type T struct{} - -type T0 = *T - -func (T0) m() {} - -func _() { (&T{}).m() } - -// various alternative forms -type ( - T1 = (((T))) -) - -func ((*(T1))) m1() {} -func _() { (T{}).m2() } -func _() { (&T{}).m2() } - -type ( - T2 = (((T3))) - T3 = T -) - -func (T2) m2() {} -func _() { (T{}).m2() } -func _() { (&T{}).m2() } - -type ( - T4 = ((*(T5))) - T5 = T -) - -func (T4) m4() {} -func _() { (T{}).m4 /* ERROR "cannot call pointer method m4 on T" */ () } -func _() { (&T{}).m4() } - -type ( - T6 = (((T7))) - T7 = (*(T8)) - T8 = T -) - -func (T6) m6() {} -func _() { (T{}).m6 /* ERROR "cannot call pointer method m6 on T" */ () } -func _() { (&T{}).m6() } - -type ( - T9 = *T10 - T10 = *T11 - T11 = T -) - -func (T9 /* ERROR invalid receiver \*\*T */ ) m9() {} -func _() { (T{}).m9 /* ERROR has no field or method m9 */ () } -func _() { (&T{}).m9 /* ERROR has no field or method m9 */ () } diff --git a/src/go/types/testdata/issue6977.src b/src/go/types/testdata/issue6977.src deleted file mode 100644 index 8f4e9ba2b2..0000000000 --- a/src/go/types/testdata/issue6977.src +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package p - -import "io" - -// Alan's initial report. - -type I interface { f(); String() string } -type J interface { g(); String() string } - -type IJ1 = interface { I; J } -type IJ2 = interface { f(); g(); String() string } - -var _ = (*IJ1)(nil) == (*IJ2)(nil) // static assert that IJ1 and IJ2 are identical types - -// The canonical example. - -type ReadWriteCloser interface { io.ReadCloser; io.WriteCloser } - -// Some more cases. - -type M interface { m() } -type M32 interface { m() int32 } -type M64 interface { m() int64 } - -type U1 interface { m() } -type U2 interface { m(); M } -type U3 interface { M; m() } -type U4 interface { M; M; M } -type U5 interface { U1; U2; U3; U4 } - -type U6 interface { m(); m /* ERROR duplicate method */ () } -type U7 interface { M32 /* ERROR duplicate method */ ; m() } -type U8 interface { m(); M32 /* ERROR duplicate method */ } -type U9 interface { M32; M64 /* ERROR duplicate method */ } - -// Verify that repeated embedding of the same interface(s) -// eliminates duplicate methods early (rather than at the -// end) to prevent exponential memory and time use. -// Without early elimination, computing T29 may take dozens -// of minutes. -type ( - T0 interface { m() } - T1 interface { T0; T0 } - T2 interface { T1; T1 } - T3 interface { T2; T2 } - T4 interface { T3; T3 } - T5 interface { T4; T4 } - T6 interface { T5; T5 } - T7 interface { T6; T6 } - T8 interface { T7; T7 } - T9 interface { T8; T8 } - - T10 interface { T9; T9 } - T11 interface { T10; T10 } - T12 interface { T11; T11 } - T13 interface { T12; T12 } - T14 interface { T13; T13 } - T15 interface { T14; T14 } - T16 interface { T15; T15 } - T17 interface { T16; T16 } - T18 interface { T17; T17 } - T19 interface { T18; T18 } - - T20 interface { T19; T19 } - T21 interface { T20; T20 } - T22 interface { T21; T21 } - T23 interface { T22; T22 } - T24 interface { T23; T23 } - T25 interface { T24; T24 } - T26 interface { T25; T25 } - T27 interface { T26; T26 } - T28 interface { T27; T27 } - T29 interface { T28; T28 } -) - -// Verify that m is present. -var x T29 -var _ = x.m -- cgit v1.2.3-54-g00ecf From a98fe2632cf144d6ed41daf7255aa912f0e430db Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Mon, 24 Aug 2020 21:34:33 -0400 Subject: cmd/go/internal/modload: address issues missed in CL 244774 For #36460 Change-Id: I5e9a42d64e36679b881e96145833e66cf77b98a5 Reviewed-on: https://go-review.googlesource.com/c/go/+/250338 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/modfile.go | 4 ++-- src/cmd/go/internal/modload/mvs.go | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 0b135c5fb5..a45c4a63be 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -345,7 +345,7 @@ type retraction struct { // goModSummary returns a summary of the go.mod file for module m, // taking into account any replacements for m, exclusions of its dependencies, -// and or vendoring. +// and/or vendoring. // // goModSummary cannot be used on the Target module, as its requirements // may change. @@ -405,7 +405,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { return cached{nil, module.VersionError(actual, errors.New("parsing go.mod: missing module line"))} } - // In theory we should only allow mpath to be unequal to mod.Path here if the + // In theory we should only allow mpath to be unequal to m.Path here if the // version that we fetched lacks an explicit go.mod file: if the go.mod file // is explicit, then it should match exactly (to ensure that imports of other // packages within the module are interpreted correctly). Unfortunately, we diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index d023ab5094..24856260d4 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -14,7 +14,6 @@ import ( "cmd/go/internal/modfetch" "cmd/go/internal/mvs" - "cmd/go/internal/par" "golang.org/x/mod/module" "golang.org/x/mod/semver" @@ -24,7 +23,6 @@ import ( // with any exclusions or replacements applied internally. type mvsReqs struct { buildList []module.Version - cache par.Cache // module.Version → Required method results } // Reqs returns the current module requirement graph. -- cgit v1.2.3-54-g00ecf From 2013f70256e55f5f6f826757333aae42b4667561 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Tue, 25 Aug 2020 10:39:02 -0400 Subject: runtime: add lock partial order edge (fin -> wbufSpans) runfinq may have write barriers, thus it may need to take wbufSpans on any write. Fixes #41021 Change-Id: Ib69e20994b5d7d1526ad53d6ddb5e2e83bf2ed00 Reviewed-on: https://go-review.googlesource.com/c/go/+/250464 Run-TryBot: Michael Pratt TryBot-Result: Gobot Gobot Reviewed-by: Dan Scales --- src/runtime/lockrank.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index b23cf767be..042f10b1d3 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -230,7 +230,7 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan}, lockRankDefer: {}, lockRankSudog: {lockRankNotifyList, lockRankHchan}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan, lockRankMspanSpecial, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans, lockRankSpanSetSpine}, lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan}, lockRankGlobalAlloc: {lockRankProf, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial}, -- cgit v1.2.3-54-g00ecf From 7615b20d06500fe5c95c061f6ff32e0c97639a60 Mon Sep 17 00:00:00 2001 From: "Paul E. Murphy" Date: Thu, 20 Aug 2020 15:06:23 -0500 Subject: cmd/compile: generate subfic on ppc64 This merges an lis + subf into subfic, and for 32b constants lwa + subf into oris + ori + subf. The carry bit is no longer used in code generation, therefore I think we can clobber it as needed. Note, lowered borrow/carry arithmetic is self-contained and thus is not affected. A few extra rules are added to ensure early transformations to SUBFCconst don't trip up earlier rules, fold constant operations, or otherwise simplify lowering. Likewise, tests are added to ensure all rules are hit. Generic constant folding catches trivial cases, however some lowering rules insert arithmetic which can introduce new opportunities (e.g BitLen or Slicemask). I couldn't find a specific benchmark to demonstrate noteworthy improvements, but this is generating subfic in many of the default bent test binaries, so we are at least saving a little code space. Change-Id: Iad7c6e5767eaa9dc24dc1c989bd1c8cfe1982012 Reviewed-on: https://go-review.googlesource.com/c/go/+/249461 Run-TryBot: Lynn Boger TryBot-Result: Gobot Gobot Reviewed-by: Carlos Eduardo Seo --- src/cmd/compile/internal/ppc64/ssa.go | 8 + src/cmd/compile/internal/ssa/gen/PPC64.rules | 31 +- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 1 + src/cmd/compile/internal/ssa/opGen.go | 15 + src/cmd/compile/internal/ssa/rewritePPC64.go | 571 ++++++++++++++++++++++++++- test/codegen/arithmetic.go | 42 ++ test/codegen/mathbits.go | 14 + test/codegen/slices.go | 21 + 8 files changed, 686 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 9c4c01e935..f8d9ac2379 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -649,6 +649,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpPPC64SUBFCconst: + p := s.Prog(v.Op.Asm()) + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpPPC64ANDCCconst: p := s.Prog(v.Op.Asm()) p.Reg = v.Args[0].Reg() diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 509cfe1c4f..e5fb1e98c2 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -110,13 +110,21 @@ // Rotate generation with non-const shift // these match patterns from math/bits/RotateLeft[32|64], but there could be others (ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) => (ROTL x y) +(ADD (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) => (ROTL x y) ( OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) => (ROTL x y) +( OR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) => (ROTL x y) (XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) => (ROTL x y) +(XOR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) => (ROTL x y) + +(ADD (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) => (ROTLW x y) (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) => (ROTLW x y) +( OR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) => (ROTLW x y) ( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) => (ROTLW x y) +(XOR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) => (ROTLW x y) (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) => (ROTLW x y) + // Lowering rotates (RotateLeft32 x y) => (ROTLW x y) (RotateLeft64 x y) => (ROTL x y) @@ -192,11 +200,15 @@ (Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst [63] y)) (Rsh64Ux64 x (ANDconst [63] y)) => (SRD x (ANDconst [63] y)) (Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) => (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64Ux64 x (SUBFCconst [64] (ANDconst [63] y))) => (SRD x (SUBFCconst [64] (ANDconst [63] y))) (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64Ux64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SRD x (SUBFCconst [64] (ANDconst [63] y))) (Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst [63] y)) (Rsh64x64 x (ANDconst [63] y)) => (SRAD x (ANDconst [63] y)) (Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) => (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64x64 x (SUBFCconst [64] (ANDconst [63] y))) => (SRAD x (SUBFCconst [64] (ANDconst [63] y))) (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SRAD x (SUBFCconst [64] (ANDconst [63] y))) (Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) (Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) @@ -208,12 +220,16 @@ (Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst [31] y)) (Rsh32Ux64 x (ANDconst [31] y)) => (SRW x (ANDconst [31] y)) (Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) => (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) +(Rsh32Ux64 x (SUBFCconst [32] (ANDconst [31] y))) => (SRW x (SUBFCconst [32] (ANDconst [31] y))) (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) +(Rsh32Ux64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SRW x (SUBFCconst [32] (ANDconst [31] y))) (Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst [31] y)) (Rsh32x64 x (ANDconst [31] y)) => (SRAW x (ANDconst [31] y)) (Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) => (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) +(Rsh32x64 x (SUBFCconst [32] (ANDconst [31] y))) => (SRAW x (SUBFCconst [32] (ANDconst [31] y))) (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) +(Rsh32x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SRAW x (SUBFCconst [32] (ANDconst [31] y))) (Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) (Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) @@ -299,8 +315,8 @@ (Ctz16 x) => (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x))) (Ctz8 x) => (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x))) -(BitLen64 x) => (SUB (MOVDconst [64]) (CNTLZD x)) -(BitLen32 x) => (SUB (MOVDconst [32]) (CNTLZW x)) +(BitLen64 x) => (SUBFCconst [64] (CNTLZD x)) +(BitLen32 x) => (SUBFCconst [32] (CNTLZW x)) (PopCount64 ...) => (POPCNTD ...) (PopCount32 x) => (POPCNTW (MOVWZreg x)) @@ -770,10 +786,19 @@ (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x) (ADDconst [0] x) => x (SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x) -// TODO deal with subtract-from-const (ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x) +// Subtract from (with carry, but ignored) constant. +// Note, these clobber the carry bit. +(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x) +(SUBFCconst [c] (NEG x)) => (ADDconst [c] x) +(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x) +(SUBFCconst [0] x) => (NEG x) +(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x) +(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x) +(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x) + // Use register moves instead of stores and loads to move int<=>float values // Common with math Float64bits, Float64frombits (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index f91222446c..44f6a74c63 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -175,6 +175,7 @@ func init() { {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 + {name: "SUBFCconst", argLength: 1, reg: gp11, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (with carry) {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index e181174d11..45401898c8 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1828,6 +1828,7 @@ const ( OpPPC64FADD OpPPC64FADDS OpPPC64SUB + OpPPC64SUBFCconst OpPPC64FSUB OpPPC64FSUBS OpPPC64MULLD @@ -24313,6 +24314,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SUBFCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "FSUB", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 1a0b03e81c..152cdfdf4d 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -568,6 +568,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MOVWstorezero(v) case OpPPC64MTVSRD: return rewriteValuePPC64_OpPPC64MTVSRD(v) + case OpPPC64NEG: + return rewriteValuePPC64_OpPPC64NEG(v) case OpPPC64NOR: return rewriteValuePPC64_OpPPC64NOR(v) case OpPPC64NotEqual: @@ -596,6 +598,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64SRW(v) case OpPPC64SUB: return rewriteValuePPC64_OpPPC64SUB(v) + case OpPPC64SUBFCconst: + return rewriteValuePPC64_OpPPC64SUBFCconst(v) case OpPPC64XOR: return rewriteValuePPC64_OpPPC64XOR(v) case OpPPC64XORconst: @@ -1021,15 +1025,14 @@ func rewriteValuePPC64_OpBitLen32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (BitLen32 x) - // result: (SUB (MOVDconst [32]) (CNTLZW x)) + // result: (SUBFCconst [32] (CNTLZW x)) for { x := v_0 - v.reset(OpPPC64SUB) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int) - v1.AddArg(x) - v.AddArg2(v0, v1) + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int) + v0.AddArg(x) + v.AddArg(v0) return true } } @@ -1038,15 +1041,14 @@ func rewriteValuePPC64_OpBitLen64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) - // result: (SUB (MOVDconst [64]) (CNTLZD x)) + // result: (SUBFCconst [64] (CNTLZD x)) for { x := v_0 - v.reset(OpPPC64SUB) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int) - v1.AddArg(x) - v.AddArg2(v0, v1) + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(64) + v0 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int) + v0.AddArg(x) + v.AddArg(v0) return true } } @@ -3957,6 +3959,76 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } + // match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) + // result: (ROTL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64SLD { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 { + continue + } + y := v_0_1.Args[0] + if v_1.Op != OpPPC64SRD { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] { + continue + } + v.reset(OpPPC64ROTL) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) + // result: (ROTLW x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64SLW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 { + continue + } + y := v_0_1.Args[0] + if v_1.Op != OpPPC64SRW { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] { + continue + } + v.reset(OpPPC64ROTLW) + v.AddArg2(x, y) + return true + } + break + } // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { @@ -4069,6 +4141,24 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { v.AddArg(x) return true } + // match: (ADDconst [c] (SUBFCconst [d] x)) + // cond: is32Bit(c+d) + // result: (SUBFCconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SUBFCconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } return false } func rewriteValuePPC64_OpPPC64AND(v *Value) bool { @@ -10336,6 +10426,44 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64NEG(v *Value) bool { + v_0 := v.Args[0] + // match: (NEG (ADDconst [c] x)) + // cond: is32Bit(-c) + // result: (SUBFCconst [-c] x) + for { + if v_0.Op != OpPPC64ADDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c)) { + break + } + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (NEG (SUBFCconst [c] x)) + // cond: is32Bit(-c) + // result: (ADDconst [-c] x) + for { + if v_0.Op != OpPPC64SUBFCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c)) { + break + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(x) + return true + } + return false +} func rewriteValuePPC64_OpPPC64NOR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -10510,6 +10638,76 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } break } + // match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) + // result: (ROTL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64SLD { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 { + continue + } + y := v_0_1.Args[0] + if v_1.Op != OpPPC64SRD { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] { + continue + } + v.reset(OpPPC64ROTL) + v.AddArg2(x, y) + return true + } + break + } + // match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) + // result: (ROTLW x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64SLW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 { + continue + } + y := v_0_1.Args[0] + if v_1.Op != OpPPC64SRW { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] { + continue + } + v.reset(OpPPC64ROTLW) + v.AddArg2(x, y) + return true + } + break + } // match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { @@ -12109,6 +12307,69 @@ func rewriteValuePPC64_OpPPC64SUB(v *Value) bool { v.AddArg(x) return true } + // match: (SUB (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (SUBFCconst [c] x) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBFCconst [c] (NEG x)) + // result: (ADDconst [c] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64NEG { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBFCconst [c] (SUBFCconst [d] x)) + // cond: is32Bit(c-d) + // result: (ADDconst [c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SUBFCconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c - d)) { + break + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (SUBFCconst [0] x) + // result: (NEG x) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.reset(OpPPC64NEG) + v.AddArg(x) + return true + } return false } func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { @@ -12204,6 +12465,76 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { } break } + // match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) + // result: (ROTL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64SLD { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 { + continue + } + y := v_0_1.Args[0] + if v_1.Op != OpPPC64SRD { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] { + continue + } + v.reset(OpPPC64ROTL) + v.AddArg2(x, y) + return true + } + break + } + // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) + // result: (ROTLW x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64SLW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 { + continue + } + y := v_0_1.Args[0] + if v_1.Op != OpPPC64SRW { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] { + continue + } + v.reset(OpPPC64ROTLW) + v.AddArg2(x, y) + return true + } + break + } // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { @@ -13175,6 +13506,28 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh32Ux64 x (SUBFCconst [32] (ANDconst [31] y))) + // result: (SRW x (SUBFCconst [32] (ANDconst [31] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { + break + } + y := v_1_0.Args[0] + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) for { @@ -13212,6 +13565,37 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { } break } + // match: (Rsh32Ux64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) + // result: (SRW x (SUBFCconst [32] (ANDconst [31] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 { + continue + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + break + } // match: (Rsh32Ux64 x y) // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) for { @@ -13482,6 +13866,28 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh32x64 x (SUBFCconst [32] (ANDconst [31] y))) + // result: (SRAW x (SUBFCconst [32] (ANDconst [31] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { + break + } + y := v_1_0.Args[0] + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) for { @@ -13519,6 +13925,37 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { } break } + // match: (Rsh32x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) + // result: (SRAW x (SUBFCconst [32] (ANDconst [31] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 { + continue + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + break + } // match: (Rsh32x64 x y) // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) for { @@ -13787,6 +14224,28 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh64Ux64 x (SUBFCconst [64] (ANDconst [63] y))) + // result: (SRD x (SUBFCconst [64] (ANDconst [63] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { + break + } + y := v_1_0.Args[0] + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) for { @@ -13824,6 +14283,37 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { } break } + // match: (Rsh64Ux64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) + // result: (SRD x (SUBFCconst [64] (ANDconst [63] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 { + continue + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + break + } // match: (Rsh64Ux64 x y) // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) for { @@ -14094,6 +14584,28 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh64x64 x (SUBFCconst [64] (ANDconst [63] y))) + // result: (SRAD x (SUBFCconst [64] (ANDconst [63] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { + break + } + y := v_1_0.Args[0] + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) for { @@ -14131,6 +14643,37 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { } break } + // match: (Rsh64x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) + // result: (SRAD x (SUBFCconst [64] (ANDconst [63] y))) + for { + x := v_0 + if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 { + continue + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + break + } // match: (Rsh64x64 x y) // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) for { diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go index afd4d66bd9..0bdb66a376 100644 --- a/test/codegen/arithmetic.go +++ b/test/codegen/arithmetic.go @@ -42,6 +42,48 @@ func SubMem(arr []int, b, c, d int) int { return arr[0] - arr[1] } +func SubFromConst(a int) int { + // ppc64le: `SUBC\tR[0-9]+,\s[$]40,\sR` + // ppc64: `SUBC\tR[0-9]+,\s[$]40,\sR` + b := 40 - a + return b +} + +func SubFromConstNeg(a int) int { + // ppc64le: `ADD\t[$]40,\sR[0-9]+,\sR` + // ppc64: `ADD\t[$]40,\sR[0-9]+,\sR` + c := 40 - (-a) + return c +} + +func SubSubFromConst(a int) int { + // ppc64le: `ADD\t[$]20,\sR[0-9]+,\sR` + // ppc64: `ADD\t[$]20,\sR[0-9]+,\sR` + c := 40 - (20 - a) + return c +} + +func AddSubFromConst(a int) int { + // ppc64le: `SUBC\tR[0-9]+,\s[$]60,\sR` + // ppc64: `SUBC\tR[0-9]+,\s[$]60,\sR` + c := 40 + (20 - a) + return c +} + +func NegSubFromConst(a int) int { + // ppc64le: `ADD\t[$]-20,\sR[0-9]+,\sR` + // ppc64: `ADD\t[$]-20,\sR[0-9]+,\sR` + c := -(20 - a) + return c +} + +func NegAddFromConstNeg(a int) int { + // ppc64le: `SUBC\tR[0-9]+,\s[$]40,\sR` + // ppc64: `SUBC\tR[0-9]+,\s[$]40,\sR` + c := -(-40 + a) + return c +} + // -------------------- // // Multiplication // // -------------------- // diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index 942605de55..4c35f26997 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -76,9 +76,17 @@ func Len64(n uint64) int { // arm:"CLZ" arm64:"CLZ" // mips:"CLZ" // wasm:"I64Clz" + // ppc64le:"SUBC","CNTLZD" + // ppc64:"SUBC","CNTLZD" return bits.Len64(n) } +func SubFromLen64(n uint64) int { + // ppc64le:"CNTLZD",-"SUBC" + // ppc64:"CNTLZD",-"SUBC" + return 64 - bits.Len64(n) +} + func Len32(n uint32) int { // amd64:"BSRQ","LEAQ",-"CMOVQEQ" // s390x:"FLOGR" @@ -291,6 +299,12 @@ func TrailingZeros64(n uint64) int { return bits.TrailingZeros64(n) } +func TrailingZeros64Subtract(n uint64) int { + // ppc64le/power8:"NEG","SUBC","ANDN","POPCNTD" + // ppc64le/power9:"SUBC","CNTTZD" + return bits.TrailingZeros64(1 - n) +} + func TrailingZeros32(n uint32) int { // amd64:"BTSQ\\t\\$32","BSFQ" // arm:"CLZ" diff --git a/test/codegen/slices.go b/test/codegen/slices.go index 40e857f9f6..38e8a62f4b 100644 --- a/test/codegen/slices.go +++ b/test/codegen/slices.go @@ -347,3 +347,24 @@ func InitNotSmallSliceLiteral() []int { 42, } } + +// --------------------------------------- // +// Test PPC64 SUBFCconst folding rules // +// triggered by slice operations. // +// --------------------------------------- // + +func SliceWithConstCompare(a []int, b int) []int { + var c []int = []int{1, 2, 3, 4, 5} + if b+len(a) < len(c) { + // ppc64le:-"NEG" + // ppc64:-"NEG" + return c[b:] + } + return a +} + +func SliceWithSubtractBound(a []int, b int) []int { + // ppc64le:"SUBC",-"NEG" + // ppc64:"SUBC",-"NEG" + return a[(3 - b):] +} -- cgit v1.2.3-54-g00ecf From 29634436fd741a7c685bf8f242b6fd62f093d1ad Mon Sep 17 00:00:00 2001 From: Benjamin Barenblat Date: Thu, 27 Aug 2020 16:12:18 -0400 Subject: cmd/cgo: ensure GCC does not use ANSI escape sequences in errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cgo parses GCC’s error messages to classify C identifiers referenced from Go programs (are they integer constants? type names?). If GCC tries to colorize its errors, cgo can’t figure out what GCC is saying. GCC avoids escape sequences in this scenario by default, but the default behavior can be overridden in at least two places: - The user can set `CGO_COPTS=-fdiagnostics-color`. - Whoever compiled GCC can configure GCC itself to always colorize output. The most reliable way to ensure that GCC doesn’t colorize output is to append `-fdiagnostics-color=never` to the GCC command line; do so. Fixes #40415 Change-Id: Id4bdf8d92fac8b038340b4264f726e8fe38875b4 Reviewed-on: https://go-review.googlesource.com/c/go/+/248398 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/cgo/gcc.go | 28 +++++++++++++++++++++------- src/cmd/dist/test.go | 5 +++-- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index a59534ebd0..9179b5490e 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -369,7 +369,18 @@ func (p *Package) guessKinds(f *File) []*Name { fmt.Fprintf(&b, "#line 1 \"completed\"\n"+ "int __cgo__1 = __cgo__2;\n") - stderr := p.gccErrors(b.Bytes()) + // We need to parse the output from this gcc command, so ensure that it + // doesn't have any ANSI escape sequences in it. (TERM=dumb is + // insufficient; if the user specifies CGO_CFLAGS=-fdiagnostics-color, + // GCC will ignore TERM, and GCC can also be configured at compile-time + // to ignore TERM.) + stderr := p.gccErrors(b.Bytes(), "-fdiagnostics-color=never") + if strings.Contains(stderr, "unrecognized command line option") { + // We're using an old version of GCC that doesn't understand + // -fdiagnostics-color. Those versions can't print color anyway, + // so just rerun without that option. + stderr = p.gccErrors(b.Bytes()) + } if stderr == "" { fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes()) } @@ -1970,22 +1981,25 @@ func (p *Package) gccDefines(stdin []byte) string { // gccErrors runs gcc over the C program stdin and returns // the errors that gcc prints. That is, this function expects // gcc to fail. -func (p *Package) gccErrors(stdin []byte) string { +func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string { // TODO(rsc): require failure args := p.gccCmd() // Optimization options can confuse the error messages; remove them. - nargs := make([]string, 0, len(args)) + nargs := make([]string, 0, len(args)+len(extraArgs)) for _, arg := range args { if !strings.HasPrefix(arg, "-O") { nargs = append(nargs, arg) } } - // Force -O0 optimization but keep the trailing "-" at the end. - nargs = append(nargs, "-O0") - nl := len(nargs) - nargs[nl-2], nargs[nl-1] = nargs[nl-1], nargs[nl-2] + // Force -O0 optimization and append extra arguments, but keep the + // trailing "-" at the end. + li := len(nargs) - 1 + last := nargs[li] + nargs[li] = "-O0" + nargs = append(nargs, extraArgs...) + nargs = append(nargs, last) if *debugGcc { fmt.Fprintf(os.Stderr, "$ %s < Date: Fri, 21 Aug 2020 11:09:45 -0700 Subject: runtime: framepointers are no longer an experiment - hard code them I think they are no longer experimental status. Might as well promote them to permanent. Change-Id: Id1259601b3dd2061dd60df86ee48080bfb575d2f Reviewed-on: https://go-review.googlesource.com/c/go/+/249857 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/gc/pgen.go | 4 ++-- src/cmd/compile/internal/ssa/regalloc.go | 2 +- src/cmd/internal/obj/arm64/obj7.go | 10 +++++----- src/cmd/internal/obj/link.go | 7 +++---- src/cmd/internal/obj/sym.go | 1 - src/cmd/internal/obj/x86/asm6.go | 6 +++--- src/cmd/internal/obj/x86/obj6.go | 2 +- src/cmd/internal/objabi/util.go | 7 ++----- src/cmd/link/internal/ld/lib.go | 8 -------- src/runtime/cgocall.go | 9 ++------- src/runtime/proc.go | 3 --- src/runtime/runtime2.go | 8 +++++--- src/runtime/stack.go | 6 +----- src/runtime/traceback.go | 6 +++--- 14 files changed, 28 insertions(+), 51 deletions(-) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index ca8cccf4ae..74262595b0 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -507,7 +507,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { if Ctxt.FixedFrameSize() == 0 { offs -= int64(Widthptr) } - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" { + if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled offs -= int64(Widthptr) } @@ -703,7 +703,7 @@ func stackOffset(slot ssa.LocalSlot) int32 { if Ctxt.FixedFrameSize() == 0 { base -= int64(Widthptr) } - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" { + if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled base -= int64(Widthptr) } diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index a2be7bb596..64c6aed3e7 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -588,7 +588,7 @@ func (s *regAllocState) init(f *Func) { if s.f.Config.hasGReg { s.allocatable &^= 1 << s.GReg } - if s.f.Config.ctxt.Framepointer_enabled && s.f.Config.FPReg >= 0 { + if objabi.Framepointer_enabled && s.f.Config.FPReg >= 0 { s.allocatable &^= 1 << uint(s.f.Config.FPReg) } if s.f.Config.LinkReg != -1 { diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 0d74430053..f54429fabe 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -621,7 +621,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { prologueEnd.Pos = prologueEnd.Pos.WithXlogue(src.PosPrologueEnd) - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + if objabi.Framepointer_enabled { q1 = obj.Appendp(q1, c.newprog) q1.Pos = p.Pos q1.As = AMOVD @@ -764,7 +764,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.To.Reg = REGSP p.Spadj = -c.autosize - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + if objabi.Framepointer_enabled { p = obj.Appendp(p, c.newprog) p.As = ASUB p.From.Type = obj.TYPE_CONST @@ -777,7 +777,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } else { /* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/ - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + if objabi.Framepointer_enabled { p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP @@ -865,7 +865,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } case obj.ADUFFCOPY: - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + if objabi.Framepointer_enabled { // ADR ret_addr, R27 // STP (FP, R27), -24(SP) // SUB 24, SP, FP @@ -918,7 +918,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } case obj.ADUFFZERO: - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + if objabi.Framepointer_enabled { // ADR ret_addr, R27 // STP (FP, R27), -24(SP) // SUB 24, SP, FP diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 311e5ae2e8..1fc90db864 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -682,10 +682,9 @@ type Link struct { GenAbstractFunc func(fn *LSym) Errors int - InParallel bool // parallel backend phase in effect - Framepointer_enabled bool - UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges - IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) + InParallel bool // parallel backend phase in effect + UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges + IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) // state for writing objects Text []*LSym diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go index 34f61b7f62..d58877ee15 100644 --- a/src/cmd/internal/obj/sym.go +++ b/src/cmd/internal/obj/sym.go @@ -53,7 +53,6 @@ func Linknew(arch *LinkArch) *Link { } ctxt.Flag_optimize = true - ctxt.Framepointer_enabled = objabi.Framepointer_enabled(objabi.GOOS, arch.Name) return ctxt } diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index 82a2e6adc4..a530636373 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -4833,12 +4833,12 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { ctxt.Diag("directly calling duff when dynamically linking Go") } - if ctxt.Framepointer_enabled && yt.zcase == Zcallduff && ctxt.Arch.Family == sys.AMD64 { + if yt.zcase == Zcallduff && ctxt.Arch.Family == sys.AMD64 { // Maintain BP around call, since duffcopy/duffzero can't do it // (the call jumps into the middle of the function). // This makes it possible to see call sites for duffcopy/duffzero in // BP-based profiling tools like Linux perf (which is the - // whole point of obj.Framepointer_enabled). + // whole point of maintaining frame pointers in Go). // MOVQ BP, -16(SP) // LEAQ -16(SP), BP ab.Put(bpduff1) @@ -4852,7 +4852,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { r.Siz = 4 ab.PutInt32(0) - if ctxt.Framepointer_enabled && yt.zcase == Zcallduff && ctxt.Arch.Family == sys.AMD64 { + if yt.zcase == Zcallduff && ctxt.Arch.Family == sys.AMD64 { // Pop BP pushed above. // MOVQ 0(BP), BP ab.Put(bpduff2) diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index c1e5bea055..016c247ff5 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -582,7 +582,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } var bpsize int - if ctxt.Arch.Family == sys.AMD64 && ctxt.Framepointer_enabled && + if ctxt.Arch.Family == sys.AMD64 && !p.From.Sym.NoFrame() && // (1) below !(autoffset == 0 && p.From.Sym.NoSplit()) && // (2) below !(autoffset == 0 && !hasCall) { // (3) below diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index f7873a42b9..6c5a9ba441 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -133,9 +133,8 @@ func init() { } } -func Framepointer_enabled(goos, goarch string) bool { - return framepointer_enabled != 0 && (goarch == "amd64" || goarch == "arm64" && (goos == "linux" || goos == "darwin")) -} +// Note: must agree with runtime.framepointer_enabled. +var Framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" && (GOOS == "linux" || GOOS == "darwin") func addexp(s string) { // Could do general integer parsing here, but the runtime copy doesn't yet. @@ -159,7 +158,6 @@ func addexp(s string) { } var ( - framepointer_enabled int = 1 Fieldtrack_enabled int Preemptibleloops_enabled int Staticlockranking_enabled int @@ -174,7 +172,6 @@ var exper = []struct { val *int }{ {"fieldtrack", &Fieldtrack_enabled}, - {"framepointer", &framepointer_enabled}, {"preemptibleloops", &Preemptibleloops_enabled}, {"staticlockranking", &Staticlockranking_enabled}, } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 09c7bbfb53..d6ee437bca 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -775,14 +775,6 @@ func (ctxt *Link) linksetup() { sb.SetSize(0) sb.AddUint8(uint8(objabi.GOARM)) } - - if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { - fpe := ctxt.loader.LookupOrCreateSym("runtime.framepointer_enabled", 0) - sb := ctxt.loader.MakeSymbolUpdater(fpe) - sb.SetType(sym.SNOPTRDATA) - sb.SetSize(0) - sb.AddUint8(1) - } } else { // If OTOH the module does not contain the runtime package, // create a local symbol for the moduledata. diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 099aa540e0..427ed0ffb9 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -286,13 +286,8 @@ func cgocallbackg1(ctxt uintptr) { // Additional two words (16-byte alignment) are for saving FP. cb = (*args)(unsafe.Pointer(sp + 7*sys.PtrSize)) case "amd64": - // On amd64, stack frame is two words, plus caller PC. - if framepointer_enabled { - // In this case, there's also saved BP. - cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize)) - break - } - cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize)) + // On amd64, stack frame is two words, plus caller PC and BP. + cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize)) case "386": // On 386, stack frame is three words, plus caller PC. cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize)) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 5e38b3194c..341d52aea8 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -5459,9 +5459,6 @@ func setMaxThreads(in int) (out int) { } func haveexperiment(name string) bool { - if name == "framepointer" { - return framepointer_enabled // set by linker - } x := sys.Goexperiment for x != "" { xname := "" diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 64c6cc7198..a3157037e7 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -329,7 +329,7 @@ type gobuf struct { ctxt unsafe.Pointer ret sys.Uintreg lr uintptr - bp uintptr // for GOEXPERIMENT=framepointer + bp uintptr // for framepointer-enabled architectures } // sudog represents a g in a wait list, such as for sending/receiving @@ -1046,8 +1046,7 @@ var ( isIntel bool lfenceBeforeRdtsc bool - goarm uint8 // set by cmd/link on arm systems - framepointer_enabled bool // set by cmd/link + goarm uint8 // set by cmd/link on arm systems ) // Set by the linker so the runtime can determine the buildmode. @@ -1055,3 +1054,6 @@ var ( islibrary bool // -buildmode=c-shared isarchive bool // -buildmode=c-archive ) + +// Must agree with cmd/internal/objabi.Framepointer_enabled. +const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" && (GOOS == "linux" || GOOS == "darwin") diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 0e930f60db..403b3c313e 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -648,12 +648,8 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { } // Adjust saved base pointer if there is one. + // TODO what about arm64 frame pointer adjustment? if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { - if !framepointer_enabled { - print("runtime: found space for saved base pointer, but no framepointer experiment\n") - print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") - throw("bad frame layout") - } if stackDebug >= 3 { print(" saved bp\n") } diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 7850eceafa..94f4a44976 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -269,9 +269,9 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in frame.varp -= sys.RegSize } - // If framepointer_enabled and there's a frame, then - // there's a saved bp here. - if frame.varp > frame.sp && (framepointer_enabled && GOARCH == "amd64" || GOARCH == "arm64") { + // For architectures with frame pointers, if there's + // a frame, then there's a saved frame pointer here. + if frame.varp > frame.sp && (GOARCH == "amd64" || GOARCH == "arm64") { frame.varp -= sys.RegSize } -- cgit v1.2.3-54-g00ecf From 8247da36621986a282c96b2abc236bbda2fdef41 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 17 Jul 2020 12:12:59 -0700 Subject: cmd/compile,cmd/asm: simplify recording of branch targets We currently use two fields to store the targets of branches. Some phases use p.To.Val, some use p.Pcond. Rewrite so that every branch instruction uses p.To.Val. p.From.Val is also used in rare instances. Introduce a Pool link for use by arm/arm64, instead of repurposing Pcond. This is a cleanup CL in preparation for some stack frame CLs. Change-Id: I9055bf0a1d986aff421e47951a1dedc301c846f8 Reviewed-on: https://go-review.googlesource.com/c/go/+/243318 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/amd64/ssa.go | 4 ++-- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/s390x/ssa.go | 4 ++-- src/cmd/compile/internal/x86/ssa.go | 4 ++-- src/cmd/internal/obj/arm/asm5.go | 16 ++++++++-------- src/cmd/internal/obj/arm/obj5.go | 10 +++++----- src/cmd/internal/obj/arm64/asm7.go | 30 +++++++++++++++--------------- src/cmd/internal/obj/arm64/obj7.go | 16 ++++++++-------- src/cmd/internal/obj/link.go | 17 +++++++++++++++-- src/cmd/internal/obj/mips/asm0.go | 24 ++++++++++++------------ src/cmd/internal/obj/mips/obj0.go | 14 +++++++------- src/cmd/internal/obj/pass.go | 17 +++++++---------- src/cmd/internal/obj/ppc64/asm9.go | 18 +++++++++--------- src/cmd/internal/obj/ppc64/obj9.go | 12 ++++++------ src/cmd/internal/obj/riscv/obj.go | 28 ++++++++++++++-------------- src/cmd/internal/obj/s390x/asmz.go | 18 +++++++++--------- src/cmd/internal/obj/s390x/objz.go | 10 +++++----- src/cmd/internal/obj/util.go | 6 ++---- src/cmd/internal/obj/x86/asm6.go | 12 ++++++------ src/cmd/internal/obj/x86/obj6.go | 12 ++++++------ 21 files changed, 142 insertions(+), 134 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9d8a0920b3..4ac877986c 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -319,8 +319,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // TODO(khr): issue only the -1 fixup code we need. // For instance, if only the quotient is used, no point in zeroing the remainder. - j1.To.Val = n1 - j2.To.Val = s.Pc() + j1.To.SetTarget(n1) + j2.To.SetTarget(s.Pc()) } case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 15a84a8a43..480d411f49 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -342,6 +342,6 @@ func Patch(p *obj.Prog, to *obj.Prog) { if p.To.Type != obj.TYPE_BRANCH { Fatalf("patch: not a branch") } - p.To.Val = to + p.To.SetTarget(to) p.To.Offset = to.Pc } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 104dd403ea..52083d999e 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6182,7 +6182,7 @@ func genssa(f *ssa.Func, pp *Progs) { // Resolve branches, and relax DefaultStmt into NotStmt for _, br := range s.Branches { - br.P.To.Val = s.bstart[br.B.ID] + br.P.To.SetTarget(s.bstart[br.B.ID]) if br.P.Pos.IsStmt() != src.PosIsStmt { br.P.Pos = br.P.Pos.WithNotStmt() } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt { diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index 4cf4b70a32..00d253c95a 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -338,8 +338,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { n.To.Reg = dividend } - j.To.Val = n - j2.To.Val = s.Pc() + j.To.SetTarget(n) + j2.To.SetTarget(s.Pc()) } case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 2de978c28a..c21ac32297 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -261,8 +261,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { n.To.Reg = x86.REG_DX } - j.To.Val = n - j2.To.Val = s.Pc() + j.To.SetTarget(n) + j2.To.SetTarget(s.Pc()) } case ssa.Op386HMULL, ssa.Op386HMULLU: diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 7b7e42ee2e..269a4223d5 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -644,7 +644,7 @@ func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool { q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Link + q.To.SetTarget(p.Link) q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q @@ -705,7 +705,7 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { if t.Rel == nil { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.Rel == nil && q.To == t.To { - p.Pcond = q + p.Pool = q return } } @@ -724,8 +724,8 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl = q c.pool.size += 4 - // Store the link to the pool entry in Pcond. - p.Pcond = q + // Store the link to the pool entry in Pool. + p.Pool = q } func (c *ctxt5) regoff(a *obj.Addr) int32 { @@ -1584,8 +1584,8 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { break } - if p.Pcond != nil { - v = int32((p.Pcond.Pc - c.pc) - 8) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - c.pc) - 8) } o1 |= (uint32(v) >> 2) & 0xffffff @@ -3023,7 +3023,7 @@ func (c *ctxt5) omvr(p *obj.Prog, a *obj.Addr, dr int) uint32 { func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 uint32 - if p.Pcond == nil { + if p.Pool == nil { c.aclass(a) v := immrot(^uint32(c.instoffset)) if v == 0 { @@ -3035,7 +3035,7 @@ func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { o1 |= uint32(v) o1 |= (uint32(dr) & 15) << 12 } else { - v := int32(p.Pcond.Pc - p.Pc - 8) + v := int32(p.Pool.Pc - p.Pc - 8) o1 = c.olr(v, REGPC, dr, int(p.Scond)&C_SCOND) } diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go index 86831f2b44..4d9187b530 100644 --- a/src/cmd/internal/obj/arm/obj5.go +++ b/src/cmd/internal/obj/arm/obj5.go @@ -406,7 +406,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { mov.To.Reg = REG_R2 // B.NE branch target is MOVW above - bne.Pcond = mov + bne.To.SetTarget(mov) // ADD $(autosize+4), R13, R3 p = obj.Appendp(mov, newprog) @@ -428,7 +428,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = ABNE p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // ADD $4, R13, R4 p = obj.Appendp(p, newprog) @@ -452,7 +452,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AB p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // reset for subsequent passes p = end @@ -741,7 +741,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { movw.To.Type = obj.TYPE_REG movw.To.Reg = REG_R3 - bls.Pcond = movw + bls.To.SetTarget(movw) // BL runtime.morestack call := obj.Appendp(movw, c.newprog) @@ -762,7 +762,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { b := obj.Appendp(pcdata, c.newprog) b.As = obj.AJMP b.To.Type = obj.TYPE_BRANCH - b.Pcond = c.cursym.Func.Text.Link + b.To.SetTarget(c.cursym.Func.Text.Link) b.Spadj = +framesize return end diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 7a5a8ff38c..0fa0c20b6a 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -977,8 +977,8 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) /* very large branches */ - if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.Pcond != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like - otxt := p.Pcond.Pc - pc + if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.To.Target() != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like + otxt := p.To.Target().Pc - pc var toofar bool switch o.type_ { case 7, 39: // branch instruction encodes 19 bits @@ -992,14 +992,14 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.Link = q q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Pcond - p.Pcond = q + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = q.Link.Link + q.To.SetTarget(q.Link.Link) bflag = 1 } } @@ -1123,7 +1123,7 @@ func (c *ctxt7) flushpool(p *obj.Prog, skip int) { q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Link + q.To.SetTarget(p.Link) q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q @@ -1249,7 +1249,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.To == t.To { - p.Pcond = q + p.Pool = q return } } @@ -1266,7 +1266,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl = q c.pool.size = -c.pool.size & (funcAlign - 1) c.pool.size += uint32(sz) - p.Pcond = q + p.Pool = q } func (c *ctxt7) regoff(a *obj.Addr) uint32 { @@ -6042,15 +6042,15 @@ func (c *ctxt7) opimm(p *obj.Prog, a obj.As) uint32 { func (c *ctxt7) brdist(p *obj.Prog, preshift int, flen int, shift int) int64 { v := int64(0) t := int64(0) - if p.Pcond != nil { - v = (p.Pcond.Pc >> uint(preshift)) - (c.pc >> uint(preshift)) + if p.To.Target() != nil { + v = (p.To.Target().Pc >> uint(preshift)) - (c.pc >> uint(preshift)) if (v & ((1 << uint(shift)) - 1)) != 0 { c.ctxt.Diag("misaligned label\n%v", p) } v >>= uint(shift) t = int64(1) << uint(flen-1) if v < -t || v >= t { - c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, p.Pcond) + c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, p.To.Target()) panic("branch too far") } } @@ -6526,7 +6526,7 @@ func (c *ctxt7) oaddi(p *obj.Prog, o1 int32, v int32, r int, rt int) uint32 { */ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 int32 - if p.Pcond == nil { /* not in literal pool */ + if p.Pool == nil { /* not in literal pool */ c.aclass(a) c.ctxt.Logf("omovlit add %d (%#x)\n", c.instoffset, uint64(c.instoffset)) @@ -6552,11 +6552,11 @@ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { w = 1 /* 64-bit SIMD/FP */ case AMOVD: - if p.Pcond.As == ADWORD { + if p.Pool.As == ADWORD { w = 1 /* 64-bit */ - } else if p.Pcond.To.Offset < 0 { + } else if p.Pool.To.Offset < 0 { w = 2 /* 32-bit, sign-extended to 64-bit */ - } else if p.Pcond.To.Offset >= 0 { + } else if p.Pool.To.Offset >= 0 { w = 0 /* 32-bit, zero-extended to 64-bit */ } else { c.ctxt.Diag("invalid operand %v in %v", a, p) diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index f54429fabe..56da854f16 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -187,9 +187,9 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { movlr.To.Type = obj.TYPE_REG movlr.To.Reg = REG_R3 if q != nil { - q.Pcond = movlr + q.To.SetTarget(movlr) } - bls.Pcond = movlr + bls.To.SetTarget(movlr) debug := movlr if false { @@ -220,7 +220,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { jmp := obj.Appendp(pcdata, c.newprog) jmp.As = AB jmp.To.Type = obj.TYPE_BRANCH - jmp.Pcond = c.cursym.Func.Text.Link + jmp.To.SetTarget(c.cursym.Func.Text.Link) jmp.Spadj = +framesize return end @@ -697,7 +697,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { mov.To.Reg = REG_R2 // CBNZ branches to the MOV above - cbnz.Pcond = mov + cbnz.To.SetTarget(mov) // ADD $(autosize+8), SP, R3 q = obj.Appendp(mov, c.newprog) @@ -719,7 +719,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH - q.Pcond = end + q.To.SetTarget(end) // ADD $8, SP, R4 q = obj.Appendp(q, c.newprog) @@ -743,7 +743,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = end + q.To.SetTarget(end) } case obj.ARET: @@ -913,7 +913,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q5.Reg = REGSP q5.To.Type = obj.TYPE_REG q5.To.Reg = REGFP - q1.Pcond = q5 + q1.From.SetTarget(q5) p = q5 } @@ -966,7 +966,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q5.Reg = REGSP q5.To.Type = obj.TYPE_REG q5.To.Reg = REGFP - q1.Pcond = q5 + q1.From.SetTarget(q5) p = q5 } } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 1fc90db864..1d4217b5f5 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -237,6 +237,19 @@ const ( TYPE_REGLIST ) +func (a *Addr) Target() *Prog { + if a.Type == TYPE_BRANCH && a.Val != nil { + return a.Val.(*Prog) + } + return nil +} +func (a *Addr) SetTarget(t *Prog) { + if a.Type != TYPE_BRANCH { + panic("setting branch target when type is not TYPE_BRANCH") + } + a.Val = t +} + // Prog describes a single machine instruction. // // The general instruction form is: @@ -255,7 +268,7 @@ const ( // to avoid too much changes in a single swing. // (1) scheme is enough to express any kind of operand combination. // -// Jump instructions use the Pcond field to point to the target instruction, +// Jump instructions use the To.Val field to point to the target *Prog, // which must be in the same linked list as the jump instruction. // // The Progs for a given function are arranged in a list linked through the Link field. @@ -274,7 +287,7 @@ type Prog struct { From Addr // first source operand RestArgs []Addr // can pack any operands that not fit into {Prog.From, Prog.To} To Addr // destination operand (second is RegTo2 below) - Pcond *Prog // target of conditional jump + Pool *Prog // constant pool entry, for arm,arm64 back ends Forwd *Prog // for x86 back end Rel *Prog // for x86, arm back ends Pc int64 // for back ends or assembler: virtual or actual program counter, depending on phase diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index faa827da9f..6107974745 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -460,8 +460,8 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) // very large conditional branches - if o.type_ == 6 && p.Pcond != nil { - otxt = p.Pcond.Pc - pc + if o.type_ == 6 && p.To.Target() != nil { + otxt = p.To.Target().Pc - pc if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { q = c.newprog() q.Link = p.Link @@ -469,15 +469,15 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Pcond - p.Pcond = q + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH - q.Pcond = q.Link.Link + q.To.SetTarget(q.Link.Link) c.addnop(p.Link) c.addnop(p) @@ -1230,10 +1230,10 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { case 6: /* beq r1,[r2],sbra */ v := int32(0) - if p.Pcond == nil { + if p.To.Target() == nil { v = int32(-4) >> 2 } else { - v = int32(p.Pcond.Pc-p.Pc-4) >> 2 + v = int32(p.To.Target().Pc-p.Pc-4) >> 2 } if (v<<16)>>16 != v { c.ctxt.Diag("short branch too far\n%v", p) @@ -1285,25 +1285,25 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { // use PC-relative branch for short branches // BEQ R0, R0, sbra - if p.Pcond == nil { + if p.To.Target() == nil { v = int32(-4) >> 2 } else { - v = int32(p.Pcond.Pc-p.Pc-4) >> 2 + v = int32(p.To.Target().Pc-p.Pc-4) >> 2 } if (v<<16)>>16 == v { o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) break } } - if p.Pcond == nil { + if p.To.Target() == nil { v = int32(p.Pc) >> 2 } else { - v = int32(p.Pcond.Pc) >> 2 + v = int32(p.To.Target().Pc) >> 2 } o1 = OP_JMP(c.opirr(p.As), uint32(v)) if p.To.Sym == nil { p.To.Sym = c.cursym.Func.Text.From.Sym - p.To.Offset = p.Pcond.Pc + p.To.Offset = p.To.Target().Pc } rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go index 77cad979a6..f19facc00c 100644 --- a/src/cmd/internal/obj/mips/obj0.go +++ b/src/cmd/internal/obj/mips/obj0.go @@ -227,11 +227,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } else { p.Mark |= BRANCH } - q1 := p.Pcond + q1 := p.To.Target() if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link - p.Pcond = q1 + p.To.SetTarget(q1) } if q1.Mark&LEAF == 0 { @@ -424,8 +424,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, newprog) q.As = obj.ANOP - p1.Pcond = q - p2.Pcond = q + p1.To.SetTarget(q) + p2.To.SetTarget(q) } case ARET: @@ -778,7 +778,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 if q != nil { - q.Pcond = p + q.To.SetTarget(p) p.Mark |= LABEL } @@ -805,14 +805,14 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.As = AJMP p.To.Type = obj.TYPE_BRANCH - p.Pcond = c.cursym.Func.Text.Link + p.To.SetTarget(c.cursym.Func.Text.Link) p.Mark |= BRANCH // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder - q1.Pcond = p + q1.To.SetTarget(p) return p } diff --git a/src/cmd/internal/obj/pass.go b/src/cmd/internal/obj/pass.go index 4f156d969b..09d520b4e9 100644 --- a/src/cmd/internal/obj/pass.go +++ b/src/cmd/internal/obj/pass.go @@ -36,8 +36,8 @@ package obj // In the case of an infinite loop, brloop returns nil. func brloop(p *Prog) *Prog { c := 0 - for q := p; q != nil; q = q.Pcond { - if q.As != AJMP || q.Pcond == nil { + for q := p; q != nil; q = q.To.Target() { + if q.As != AJMP || q.To.Target() == nil { return q } c++ @@ -132,8 +132,6 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { continue } if p.To.Val != nil { - // TODO: Remove To.Val.(*Prog) in favor of p->pcond. - p.Pcond = p.To.Val.(*Prog) continue } @@ -158,8 +156,7 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { p.To.Type = TYPE_NONE } - p.To.Val = q - p.Pcond = q + p.To.SetTarget(q) } if !ctxt.Flag_optimize { @@ -168,12 +165,12 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { // Collapse series of jumps to jumps. for p := sym.Func.Text; p != nil; p = p.Link { - if p.Pcond == nil { + if p.To.Target() == nil { continue } - p.Pcond = brloop(p.Pcond) - if p.Pcond != nil && p.To.Type == TYPE_BRANCH { - p.To.Offset = p.Pcond.Pc + p.To.SetTarget(brloop(p.To.Target())) + if p.To.Target() != nil && p.To.Type == TYPE_BRANCH { + p.To.Offset = p.To.Target().Pc } } } diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 3c82477fc4..98b453de6c 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -725,22 +725,22 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) // very large conditional branches - if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil { - otxt = p.Pcond.Pc - pc + if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { + otxt = p.To.Target().Pc - pc if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { q = c.newprog() q.Link = p.Link p.Link = q q.As = ABR q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Pcond - p.Pcond = q + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = ABR q.To.Type = obj.TYPE_BRANCH - q.Pcond = q.Link.Link + q.To.SetTarget(q.Link.Link) //addnop(p->link); //addnop(p); @@ -2630,8 +2630,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 11: /* br/bl lbra */ v := int32(0) - if p.Pcond != nil { - v = int32(p.Pcond.Pc - p.Pc) + if p.To.Target() != nil { + v = int32(p.To.Target().Pc - p.Pc) if v&03 != 0 { c.ctxt.Diag("odd branch target address\n%v", p) v &^= 03 @@ -2781,8 +2781,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } } v := int32(0) - if p.Pcond != nil { - v = int32(p.Pcond.Pc - p.Pc) + if p.To.Target() != nil { + v = int32(p.To.Target().Pc - p.Pc) } if v&03 != 0 { c.ctxt.Diag("odd branch target address\n%v", p) diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index 749f7066de..c012762a18 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -556,7 +556,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ABVS: p.Mark |= BRANCH q = p - q1 = p.Pcond + q1 = p.To.Target() if q1 != nil { // NOPs are not removed due to #40689. @@ -841,8 +841,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = obj.ANOP - p1.Pcond = q - p2.Pcond = q + p1.To.SetTarget(q) + p2.To.SetTarget(q) } case obj.ARET: @@ -1153,7 +1153,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if q != nil { - q.Pcond = p + q.To.SetTarget(p) } p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) @@ -1248,13 +1248,13 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.Pcond = p0.Link + p.To.SetTarget(p0.Link) // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder - q1.Pcond = p + q1.To.SetTarget(p) return p } diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 2eb2935b31..d2816487e4 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -634,7 +634,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { getargp.Reg = 0 getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} - bneadj.Pcond = getargp + bneadj.To.SetTarget(getargp) calcargp := obj.Appendp(getargp, newprog) calcargp.As = AADDI @@ -647,7 +647,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} testargp.Reg = REG_X13 testargp.To.Type = obj.TYPE_BRANCH - testargp.Pcond = endadj + testargp.To.SetTarget(endadj) adjargp := obj.Appendp(testargp, newprog) adjargp.As = AADDI @@ -665,7 +665,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { godone.As = AJAL godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} godone.To.Type = obj.TYPE_BRANCH - godone.Pcond = endadj + godone.To.SetTarget(endadj) } // Update stack-based offsets. @@ -890,27 +890,27 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if p.To.Type != obj.TYPE_BRANCH { panic("assemble: instruction with branch-like opcode lacks destination") } - offset := p.Pcond.Pc - p.Pc + offset := p.To.Target().Pc - p.Pc if offset < -4096 || 4096 <= offset { // Branch is long. Replace it with a jump. jmp := obj.Appendp(p, newprog) jmp.As = AJAL jmp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} jmp.To = obj.Addr{Type: obj.TYPE_BRANCH} - jmp.Pcond = p.Pcond + jmp.To.SetTarget(p.To.Target()) p.As = InvertBranch(p.As) - p.Pcond = jmp.Link + p.To.SetTarget(jmp.Link) // We may have made previous branches too long, // so recheck them. rescan = true } case AJAL: - if p.Pcond == nil { + if p.To.Target() == nil { panic("intersymbol jumps should be expressed as AUIPC+JALR") } - offset := p.Pcond.Pc - p.Pc + offset := p.To.Target().Pc - p.Pc if offset < -(1<<20) || (1<<20) <= offset { // Replace with 2-instruction sequence. This assumes // that TMP is not live across J instructions, since @@ -946,16 +946,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ, AJAL: switch p.To.Type { case obj.TYPE_BRANCH: - p.To.Type, p.To.Offset = obj.TYPE_CONST, p.Pcond.Pc-p.Pc + p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc case obj.TYPE_MEM: panic("unhandled type") } case AAUIPC: if p.From.Type == obj.TYPE_BRANCH { - low, high, err := Split32BitImmediate(p.Pcond.Pc - p.Pc) + low, high, err := Split32BitImmediate(p.To.Target().Pc - p.Pc) if err != nil { - ctxt.Diag("%v: jump displacement %d too large", p, p.Pcond.Pc-p.Pc) + ctxt.Diag("%v: jump displacement %d too large", p, p.To.Target().Pc-p.Pc) } p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym} p.Link.From.Offset = low @@ -1098,7 +1098,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.To.Sym = ctxt.Lookup("runtime.morestack") } if to_more != nil { - to_more.Pcond = p + to_more.To.SetTarget(p) } p = jalrToSym(ctxt, p, newprog, REG_X5) @@ -1107,12 +1107,12 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.As = AJAL p.To = obj.Addr{Type: obj.TYPE_BRANCH} p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} - p.Pcond = cursym.Func.Text.Link + p.To.SetTarget(cursym.Func.Text.Link) // placeholder for to_done's jump target p = obj.Appendp(p, newprog) p.As = obj.ANOP // zero-width place holder - to_done.Pcond = p + to_done.To.SetTarget(p) return p } diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 29182ea805..68f01f1c5d 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -3001,8 +3001,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 11: // br/bl v := int32(0) - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } if p.As == ABR && p.To.Sym == nil && int32(int16(v)) == v { @@ -3122,8 +3122,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 16: // conditional branch v := int32(0) - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } mask := uint32(c.branchMask(p)) if p.To.Sym == nil && int32(int16(v)) == v { @@ -3440,7 +3440,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 41: // branch on count r1 := p.From.Reg - ri2 := (p.Pcond.Pc - p.Pc) >> 1 + ri2 := (p.To.Target().Pc - p.Pc) >> 1 if int64(int16(ri2)) != ri2 { c.ctxt.Diag("branch target too far away") } @@ -3885,8 +3885,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 89: // compare and branch reg reg var v int32 - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } // Some instructions take a mask as the first argument. @@ -3930,8 +3930,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 90: // compare and branch reg $constant var v int32 - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } // Some instructions take a mask as the first argument. diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index ef6335d849..625bb0f7b4 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -454,8 +454,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = obj.ANOP - p1.Pcond = q - p2.Pcond = q + p1.To.SetTarget(q) + p2.To.SetTarget(q) } case obj.ARET: @@ -679,14 +679,14 @@ func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, // MOVD LR, R5 p = obj.Appendp(pcdata, c.newprog) - pPre.Pcond = p + pPre.To.SetTarget(p) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if pPreempt != nil { - pPreempt.Pcond = p + pPreempt.To.SetTarget(p) } // BL runtime.morestack(SB) @@ -709,7 +709,7 @@ func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.Pcond = c.cursym.Func.Text.Link + p.To.SetTarget(c.cursym.Func.Text.Link) return p } diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index d020026445..a30ccf0564 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -251,10 +251,8 @@ func WriteDconv(w io.Writer, p *Prog, a *Addr) { case TYPE_BRANCH: if a.Sym != nil { fmt.Fprintf(w, "%s(SB)", a.Sym.Name) - } else if p != nil && p.Pcond != nil { - fmt.Fprint(w, p.Pcond.Pc) - } else if a.Val != nil { - fmt.Fprint(w, a.Val.(*Prog).Pc) + } else if a.Target() != nil { + fmt.Fprint(w, a.Target().Pc) } else { fmt.Fprintf(w, "%d(PC)", a.Offset) } diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index a530636373..fb99c620ad 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -1855,7 +1855,7 @@ func spadjop(ctxt *obj.Link, l, q obj.As) obj.As { // no standalone or macro-fused jump will straddle or end on a 32 byte boundary // by inserting NOPs before the jumps func isJump(p *obj.Prog) bool { - return p.Pcond != nil || p.As == obj.AJMP || p.As == obj.ACALL || + return p.To.Target() != nil || p.As == obj.AJMP || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO } @@ -1867,7 +1867,7 @@ func lookForJCC(p *obj.Prog) *obj.Prog { for q = p.Link; q != nil && (q.As == obj.APCDATA || q.As == obj.AFUNCDATA || q.As == obj.ANOP); q = q.Link { } - if q == nil || q.Pcond == nil || p.As == obj.AJMP || p.As == obj.ACALL { + if q == nil || q.To.Target() == nil || p.As == obj.AJMP || p.As == obj.ACALL { return nil } @@ -2051,8 +2051,8 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } for p := s.Func.Text; p != nil; p = p.Link { - if p.To.Type == obj.TYPE_BRANCH && p.Pcond == nil { - p.Pcond = p + if p.To.Type == obj.TYPE_BRANCH && p.To.Target() == nil { + p.To.SetTarget(p) } if p.As == AADJSP { p.To.Type = obj.TYPE_REG @@ -2088,7 +2088,7 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { for p := s.Func.Text; p != nil; p = p.Link { count++ p.Back = branchShort // use short branches first time through - if q := p.Pcond; q != nil && (q.Back&branchShort != 0) { + if q := p.To.Target(); q != nil && (q.Back&branchShort != 0) { p.Back |= branchBackwards q.Back |= branchLoopHead } @@ -4886,7 +4886,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { // TODO: Check in input, preserve in brchain. // Fill in backward jump now. - q = p.Pcond + q = p.To.Target() if q == nil { ctxt.Diag("jmp/branch/loop without target") diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 016c247ff5..18a6afcd77 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -765,7 +765,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } // Set jne branch target. - jne.Pcond = p + jne.To.SetTarget(p) // CMPQ panic_argp(BX), DI p = obj.Appendp(p, newprog) @@ -783,7 +783,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AJNE p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // MOVQ SP, panic_argp(BX) p = obj.Appendp(p, newprog) @@ -801,7 +801,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = obj.AJMP p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // Reset p for following code. p = end @@ -1144,12 +1144,12 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA jmp := obj.Appendp(pcdata, newprog) jmp.As = obj.AJMP jmp.To.Type = obj.TYPE_BRANCH - jmp.Pcond = cursym.Func.Text.Link + jmp.To.SetTarget(cursym.Func.Text.Link) jmp.Spadj = +framesize - jls.Pcond = call + jls.To.SetTarget(call) if q1 != nil { - q1.Pcond = call + q1.To.SetTarget(call) } return end -- cgit v1.2.3-54-g00ecf From cdc635547fc436dc49c91ddb172b0e101febd3d7 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 30 Jun 2020 15:59:40 -0700 Subject: cmd/compile: invalidate zero-use values during rewrite This helps remove uses that aren't needed any more. That in turn helps other rules with Uses==1 conditions fire. Update #39918 Change-Id: I68635b675472f1d59e59604e4d34b949a0016533 Reviewed-on: https://go-review.googlesource.com/c/go/+/249463 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/decompose.go | 8 +++++--- src/cmd/compile/internal/ssa/lower.go | 2 +- src/cmd/compile/internal/ssa/opt.go | 2 +- src/cmd/compile/internal/ssa/rewrite.go | 22 +++++++++++++++++++++- src/cmd/compile/internal/ssa/softfloat.go | 2 +- src/cmd/compile/internal/ssa/value.go | 20 ++++++++++++++++++++ 6 files changed, 49 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index 6e72e3825c..ab27ba85ae 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -23,9 +23,11 @@ func decomposeBuiltIn(f *Func) { } // Decompose other values - applyRewrite(f, rewriteBlockdec, rewriteValuedec) + // Note: deadcode is false because we need to keep the original + // values around so the name component resolution below can still work. + applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues) if f.Config.RegSize == 4 { - applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) + applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues) } // Split up named values into their components. @@ -215,7 +217,7 @@ func decomposeInterfacePhi(v *Value) { } func decomposeArgs(f *Func) { - applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs) + applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues) } func decomposeUser(f *Func) { diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go index ab0fa803bf..f332b2e028 100644 --- a/src/cmd/compile/internal/ssa/lower.go +++ b/src/cmd/compile/internal/ssa/lower.go @@ -7,7 +7,7 @@ package ssa // convert to machine-dependent ops func lower(f *Func) { // repeat rewrites until we find no more rewrites - applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue) + applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues) } // checkLower checks for unlowered opcodes and fails if we find one. diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go index 6e91fd7da3..128e614175 100644 --- a/src/cmd/compile/internal/ssa/opt.go +++ b/src/cmd/compile/internal/ssa/opt.go @@ -6,5 +6,5 @@ package ssa // machine-independent optimization func opt(f *Func) { - applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric) + applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues) } diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index fb35691296..3df9ad24f9 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -20,7 +20,15 @@ import ( "path/filepath" ) -func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) { +type deadValueChoice bool + +const ( + leaveDeadValues deadValueChoice = false + removeDeadValues = true +) + +// deadcode indicates that rewrite should try to remove any values that become dead. +func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) { // repeat rewrites until we find no more rewrites pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block pendingLines.clear() @@ -56,6 +64,18 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) { *v0 = *v v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing } + if v.Uses == 0 && v.removeable() { + if v.Op != OpInvalid && deadcode == removeDeadValues { + // Reset any values that are now unused, so that we decrement + // the use count of all of its arguments. + // Not quite a deadcode pass, because it does not handle cycles. + // But it should help Uses==1 rules to fire. + v.reset(OpInvalid) + change = true + } + // No point rewriting values which aren't used. + continue + } vchange := phielimValue(v) if vchange && debug > 1 { diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go index 4b578b133b..8db4334fef 100644 --- a/src/cmd/compile/internal/ssa/softfloat.go +++ b/src/cmd/compile/internal/ssa/softfloat.go @@ -72,7 +72,7 @@ func softfloat(f *Func) { if newInt64 && f.Config.RegSize == 4 { // On 32bit arch, decompose Uint64 introduced in the switch above. decomposeBuiltIn(f) - applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) + applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues) } } diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 7ead0ff300..7fc33772d3 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -460,3 +460,23 @@ func (v *Value) LackingPos() bool { return v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive || v.Op == OpPhi || (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem } + +// removeable reports whether the value v can be removed from the SSA graph entirely +// if its use count drops to 0. +func (v *Value) removeable() bool { + if v.Type.IsVoid() { + // Void ops, like nil pointer checks, must stay. + return false + } + if v.Type.IsMemory() { + // All memory ops aren't needed here, but we do need + // to keep calls at least (because they might have + // syncronization operations we can't see). + return false + } + if v.Op.HasSideEffects() { + // These are mostly synchronization operations. + return false + } + return true +} -- cgit v1.2.3-54-g00ecf From 4f76fe86756841befb6574ce4bf04113d14389d4 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 26 Aug 2020 17:26:05 -0700 Subject: cmd/go, testing, os: fail test that calls os.Exit(0) This catches cases where a test calls code that calls os.Exit(0), thereby skipping all subsequent tests. Fixes #29062 Change-Id: If9478972f40189e27623557e7141469ca4234d89 Reviewed-on: https://go-review.googlesource.com/c/go/+/250977 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- doc/go1.16.html | 12 +++- src/cmd/go/internal/test/flagdefs_test.go | 11 ++- src/cmd/go/internal/test/genflags.go | 7 +- src/cmd/go/internal/test/test.go | 3 +- src/cmd/go/testdata/script/test_exit.txt | 114 ++++++++++++++++++++++++++++++ src/internal/testlog/exit.go | 33 +++++++++ src/os/proc.go | 8 +++ src/testing/internal/testdeps/deps.go | 5 ++ src/testing/testing.go | 10 +++ 9 files changed, 195 insertions(+), 8 deletions(-) create mode 100644 src/cmd/go/testdata/script/test_exit.txt create mode 100644 src/internal/testlog/exit.go diff --git a/doc/go1.16.html b/doc/go1.16.html index c82b3b9276..805234bdab 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -52,6 +52,16 @@ Do not send CLs removing the interior tags from such phrases. TODO: write and link to tutorial or blog post

+

+ When using go test, a test that + calls os.Exit(0) during execution of a test function + will now be considered to fail. + This will help catch cases in which a test calls code that calls + os.Exit(0) and thereby stops running all future tests. + If a TestMain function calls os.Exit(0) + that is still considered to be a passing test. +

+

TODO

@@ -101,7 +111,7 @@ Do not send CLs removing the interior tags from such phrases.

net

-

+

The case of I/O on a closed network connection, or I/O on a network connection that is closed before any of the I/O completes, can now be detected using the new ErrClosed error. diff --git a/src/cmd/go/internal/test/flagdefs_test.go b/src/cmd/go/internal/test/flagdefs_test.go index 7562415298..ab5440b380 100644 --- a/src/cmd/go/internal/test/flagdefs_test.go +++ b/src/cmd/go/internal/test/flagdefs_test.go @@ -16,9 +16,14 @@ func TestPassFlagToTestIncludesAllTestFlags(t *testing.T) { return } name := strings.TrimPrefix(f.Name, "test.") - if name != "testlogfile" && !passFlagToTest[name] { - t.Errorf("passFlagToTest missing entry for %q (flag test.%s)", name, name) - t.Logf("(Run 'go generate cmd/go/internal/test' if it should be added.)") + switch name { + case "testlogfile", "paniconexit0": + // These are internal flags. + default: + if !passFlagToTest[name] { + t.Errorf("passFlagToTest missing entry for %q (flag test.%s)", name, name) + t.Logf("(Run 'go generate cmd/go/internal/test' if it should be added.)") + } } }) diff --git a/src/cmd/go/internal/test/genflags.go b/src/cmd/go/internal/test/genflags.go index 512fa1671e..5e83d53980 100644 --- a/src/cmd/go/internal/test/genflags.go +++ b/src/cmd/go/internal/test/genflags.go @@ -62,9 +62,10 @@ func testFlags() []string { } name := strings.TrimPrefix(f.Name, "test.") - if name == "testlogfile" { - // test.testlogfile is “for use only by cmd/go” - } else { + switch name { + case "testlogfile", "paniconexit0": + // These flags are only for use by cmd/go. + default: names = append(names, name) } }) diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 3aee6939d2..1ea6d2881e 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -1164,7 +1164,8 @@ func (c *runCache) builderRunTest(b *work.Builder, ctx context.Context, a *work. if !c.disableCache && len(execCmd) == 0 { testlogArg = []string{"-test.testlogfile=" + a.Objdir + "testlog.txt"} } - args := str.StringList(execCmd, a.Deps[0].BuiltTarget(), testlogArg, testArgs) + panicArg := "-test.paniconexit0" + args := str.StringList(execCmd, a.Deps[0].BuiltTarget(), testlogArg, panicArg, testArgs) if testCoverProfile != "" { // Write coverage to temporary profile, for merging later. diff --git a/src/cmd/go/testdata/script/test_exit.txt b/src/cmd/go/testdata/script/test_exit.txt new file mode 100644 index 0000000000..23a2429d1e --- /dev/null +++ b/src/cmd/go/testdata/script/test_exit.txt @@ -0,0 +1,114 @@ +# Builds and runs test binaries, so skip in short mode. +[short] skip + +env GO111MODULE=on + +# If a test invoked by 'go test' exits with a zero status code, +# it will panic. +! go test ./zero +! stdout ^ok +! stdout 'exit status' +stdout 'panic' +stdout ^FAIL + +# If a test exits with a non-zero status code, 'go test' fails normally. +! go test ./one +! stdout ^ok +stdout 'exit status' +! stdout 'panic' +stdout ^FAIL + +# Ensure that other flags still do the right thing. +go test -list=. ./zero +stdout ExitZero + +! go test -bench=. ./zero +stdout 'panic' + +# 'go test' with no args streams output without buffering. Ensure that it still +# catches a zero exit with missing output. +cd zero +! go test +stdout 'panic' +cd ../normal +go test +stdout ^ok +cd .. + +# If a TestMain exits with a zero status code, 'go test' shouldn't +# complain about that. It's a common way to skip testing a package +# entirely. +go test ./main_zero +! stdout 'skipping all tests' +stdout ^ok + +# With -v, we'll see the warning from TestMain. +go test -v ./main_zero +stdout 'skipping all tests' +stdout ^ok + +# Listing all tests won't actually give a result if TestMain exits. That's okay, +# because this is how TestMain works. If we decide to support -list even when +# TestMain is used to skip entire packages, we can change this test case. +go test -list=. ./main_zero +stdout 'skipping all tests' +! stdout TestNotListed + +-- go.mod -- +module m + +-- ./normal/normal.go -- +package normal +-- ./normal/normal_test.go -- +package normal + +import "testing" + +func TestExitZero(t *testing.T) { +} + +-- ./zero/zero.go -- +package zero +-- ./zero/zero_test.go -- +package zero + +import ( + "os" + "testing" +) + +func TestExitZero(t *testing.T) { + os.Exit(0) +} + +-- ./one/one.go -- +package one +-- ./one/one_test.go -- +package one + +import ( + "os" + "testing" +) + +func TestExitOne(t *testing.T) { + os.Exit(1) +} + +-- ./main_zero/zero.go -- +package zero +-- ./main_zero/zero_test.go -- +package zero + +import ( + "fmt" + "os" + "testing" +) + +func TestMain(m *testing.M) { + fmt.Println("skipping all tests") + os.Exit(0) +} + +func TestNotListed(t *testing.T) {} diff --git a/src/internal/testlog/exit.go b/src/internal/testlog/exit.go new file mode 100644 index 0000000000..e15defdb5b --- /dev/null +++ b/src/internal/testlog/exit.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testlog + +import "sync" + +// PanicOnExit0 reports whether to panic on a call to os.Exit(0). +// This is in the testlog package because, like other definitions in +// package testlog, it is a hook between the testing package and the +// os package. This is used to ensure that an early call to os.Exit(0) +// does not cause a test to pass. +func PanicOnExit0() bool { + panicOnExit0.mu.Lock() + defer panicOnExit0.mu.Unlock() + return panicOnExit0.val +} + +// panicOnExit0 is the flag used for PanicOnExit0. This uses a lock +// because the value can be cleared via a timer call that may race +// with calls to os.Exit +var panicOnExit0 struct { + mu sync.Mutex + val bool +} + +// SetPanicOnExit0 sets panicOnExit0 to v. +func SetPanicOnExit0(v bool) { + panicOnExit0.mu.Lock() + defer panicOnExit0.mu.Unlock() + panicOnExit0.val = v +} diff --git a/src/os/proc.go b/src/os/proc.go index 7364d631f2..cbd5a6aad9 100644 --- a/src/os/proc.go +++ b/src/os/proc.go @@ -7,6 +7,7 @@ package os import ( + "internal/testlog" "runtime" "syscall" ) @@ -60,6 +61,13 @@ func Getgroups() ([]int, error) { // For portability, the status code should be in the range [0, 125]. func Exit(code int) { if code == 0 { + if testlog.PanicOnExit0() { + // We were told to panic on calls to os.Exit(0). + // This is used to fail tests that make an early + // unexpected call to os.Exit(0). + panic("unexpected call to os.Exit(0) during test") + } + // Give race detector a chance to fail the program. // Racy programs do not have the right to finish successfully. runtime_beforeExit() diff --git a/src/testing/internal/testdeps/deps.go b/src/testing/internal/testdeps/deps.go index af08dd768a..3608d33294 100644 --- a/src/testing/internal/testdeps/deps.go +++ b/src/testing/internal/testdeps/deps.go @@ -121,3 +121,8 @@ func (TestDeps) StopTestLog() error { log.w = nil return err } + +// SetPanicOnExit0 tells the os package whether to panic on os.Exit(0). +func (TestDeps) SetPanicOnExit0(v bool) { + testlog.SetPanicOnExit0(v) +} diff --git a/src/testing/testing.go b/src/testing/testing.go index bf83df8863..d0334243f4 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -294,6 +294,7 @@ func Init() { blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)") mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution") mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()") + panicOnExit0 = flag.Bool("test.paniconexit0", false, "panic on call to os.Exit(0)") traceFile = flag.String("test.trace", "", "write an execution trace to `file`") timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)") cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with") @@ -320,6 +321,7 @@ var ( blockProfileRate *int mutexProfile *string mutexProfileFraction *int + panicOnExit0 *bool traceFile *string timeout *time.Duration cpuListStr *string @@ -1261,6 +1263,7 @@ func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return e func (f matchStringOnly) ImportPath() string { return "" } func (f matchStringOnly) StartTestLog(io.Writer) {} func (f matchStringOnly) StopTestLog() error { return errMain } +func (f matchStringOnly) SetPanicOnExit0(bool) {} // Main is an internal function, part of the implementation of the "go test" command. // It was exported because it is cross-package and predates "internal" packages. @@ -1296,6 +1299,7 @@ type M struct { type testDeps interface { ImportPath() string MatchString(pat, str string) (bool, error) + SetPanicOnExit0(bool) StartCPUProfile(io.Writer) error StopCPUProfile() StartTestLog(io.Writer) @@ -1521,11 +1525,17 @@ func (m *M) before() { m.deps.StartTestLog(f) testlogFile = f } + if *panicOnExit0 { + m.deps.SetPanicOnExit0(true) + } } // after runs after all testing. func (m *M) after() { m.afterOnce.Do(func() { + if *panicOnExit0 { + m.deps.SetPanicOnExit0(false) + } m.writeProfiles() }) } -- cgit v1.2.3-54-g00ecf From 42fd1306cea2dc2ff91bd5208d9593721ab5a30f Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 15 Jun 2020 11:08:36 -0700 Subject: cmd/compile: clean up equality generation We're using sort.SliceStable, so no need to keep track of indexes as well. Use a more robust test for whether a node is a call. Add a test that we're actually reordering comparisons. This test fails without the alg.go changes in this CL because eqstring uses OCALLFUNC instead of OCALL for its data comparisons. Update #8606 Change-Id: Ieeec33434c72e3aa328deb11cc415cfda05632e2 Reviewed-on: https://go-review.googlesource.com/c/go/+/237921 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 31 ++++++++----------- test/fixedbugs/issue8606b.go | 63 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 19 deletions(-) create mode 100644 test/fixedbugs/issue8606b.go diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index e2e2374717..2b63700569 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -646,17 +646,11 @@ func geneq(t *types.Type) *obj.LSym { // Build a list of conditions to satisfy. // The conditions are a list-of-lists. Conditions are reorderable // within each inner list. The outer lists must be evaluated in order. - // Even within each inner list, track their order so that we can preserve - // aspects of that order. (TODO: latter part needed?) - type nodeIdx struct { - n *Node - idx int - } - var conds [][]nodeIdx - conds = append(conds, []nodeIdx{}) + var conds [][]*Node + conds = append(conds, []*Node{}) and := func(n *Node) { i := len(conds) - 1 - conds[i] = append(conds[i], nodeIdx{n: n, idx: len(conds[i])}) + conds[i] = append(conds[i], n) } // Walk the struct using memequal for runs of AMEM @@ -674,7 +668,7 @@ func geneq(t *types.Type) *obj.LSym { if !IsRegularMemory(f.Type) { if EqCanPanic(f.Type) { // Enforce ordering by starting a new set of reorderable conditions. - conds = append(conds, []nodeIdx{}) + conds = append(conds, []*Node{}) } p := nodSym(OXDOT, np, f.Sym) q := nodSym(OXDOT, nq, f.Sym) @@ -688,7 +682,7 @@ func geneq(t *types.Type) *obj.LSym { } if EqCanPanic(f.Type) { // Also enforce ordering after something that can panic. - conds = append(conds, []nodeIdx{}) + conds = append(conds, []*Node{}) } i++ continue @@ -713,14 +707,13 @@ func geneq(t *types.Type) *obj.LSym { // Sort conditions to put runtime calls last. // Preserve the rest of the ordering. - var flatConds []nodeIdx + var flatConds []*Node for _, c := range conds { + isCall := func(n *Node) bool { + return n.Op == OCALL || n.Op == OCALLFUNC + } sort.SliceStable(c, func(i, j int) bool { - x, y := c[i], c[j] - if (x.n.Op != OCALL) == (y.n.Op != OCALL) { - return x.idx < y.idx - } - return x.n.Op != OCALL + return !isCall(c[i]) && isCall(c[j]) }) flatConds = append(flatConds, c...) } @@ -729,9 +722,9 @@ func geneq(t *types.Type) *obj.LSym { if len(flatConds) == 0 { cond = nodbool(true) } else { - cond = flatConds[0].n + cond = flatConds[0] for _, c := range flatConds[1:] { - cond = nod(OANDAND, cond, c.n) + cond = nod(OANDAND, cond, c) } } diff --git a/test/fixedbugs/issue8606b.go b/test/fixedbugs/issue8606b.go new file mode 100644 index 0000000000..448ea566f0 --- /dev/null +++ b/test/fixedbugs/issue8606b.go @@ -0,0 +1,63 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is an optimization check. We want to make sure that we compare +// string lengths, and other scalar fields, before checking string +// contents. There's no way to verify this in the language, and +// codegen tests in test/codegen can't really detect ordering +// optimizations like this. Instead, we generate invalid strings with +// bad backing store pointers but nonzero length, so we can check that +// the backing store never gets compared. +// +// We use two different bad strings so that pointer comparisons of +// backing store pointers fail. + +package main + +import ( + "fmt" + "reflect" + "unsafe" +) + +func bad1() string { + s := "foo" + (*reflect.StringHeader)(unsafe.Pointer(&s)).Data = 1 // write bad value to data ptr + return s +} +func bad2() string { + s := "foo" + (*reflect.StringHeader)(unsafe.Pointer(&s)).Data = 2 // write bad value to data ptr + return s +} + +type SI struct { + s string + i int +} + +type SS struct { + s string + t string +} + +func main() { + for _, test := range []struct { + a, b interface{} + }{ + {SI{s: bad1(), i: 1}, SI{s: bad2(), i: 2}}, + {SS{s: bad1(), t: "a"}, SS{s: bad2(), t: "aa"}}, + {SS{s: "a", t: bad1()}, SS{s: "b", t: bad2()}}, + // This one would panic because the length of both strings match, and we check + // the body of the bad strings before the body of the good strings. + //{SS{s: bad1(), t: "a"}, SS{s: bad2(), t: "b"}}, + } { + if test.a == test.b { + panic(fmt.Sprintf("values %#v and %#v should not be equal", test.a, test.b)) + } + } + +} -- cgit v1.2.3-54-g00ecf From 26ad27bb026a34149ad0b1d7d56723c8b5dee3d7 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 27 Aug 2020 23:34:32 +0000 Subject: Revert "cmd/compile,cmd/asm: simplify recording of branch targets" This reverts CL 243318. Reason for revert: Seems to be crashing some builders. Change-Id: I2ffc59bc5535be60b884b281c8d0eff4647dc756 Reviewed-on: https://go-review.googlesource.com/c/go/+/251169 Reviewed-by: Bryan C. Mills --- src/cmd/compile/internal/amd64/ssa.go | 4 ++-- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/s390x/ssa.go | 4 ++-- src/cmd/compile/internal/x86/ssa.go | 4 ++-- src/cmd/internal/obj/arm/asm5.go | 16 ++++++++-------- src/cmd/internal/obj/arm/obj5.go | 10 +++++----- src/cmd/internal/obj/arm64/asm7.go | 30 +++++++++++++++--------------- src/cmd/internal/obj/arm64/obj7.go | 16 ++++++++-------- src/cmd/internal/obj/link.go | 17 ++--------------- src/cmd/internal/obj/mips/asm0.go | 24 ++++++++++++------------ src/cmd/internal/obj/mips/obj0.go | 14 +++++++------- src/cmd/internal/obj/pass.go | 17 ++++++++++------- src/cmd/internal/obj/ppc64/asm9.go | 18 +++++++++--------- src/cmd/internal/obj/ppc64/obj9.go | 12 ++++++------ src/cmd/internal/obj/riscv/obj.go | 28 ++++++++++++++-------------- src/cmd/internal/obj/s390x/asmz.go | 18 +++++++++--------- src/cmd/internal/obj/s390x/objz.go | 10 +++++----- src/cmd/internal/obj/util.go | 6 ++++-- src/cmd/internal/obj/x86/asm6.go | 12 ++++++------ src/cmd/internal/obj/x86/obj6.go | 12 ++++++------ 21 files changed, 134 insertions(+), 142 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 4ac877986c..9d8a0920b3 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -319,8 +319,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // TODO(khr): issue only the -1 fixup code we need. // For instance, if only the quotient is used, no point in zeroing the remainder. - j1.To.SetTarget(n1) - j2.To.SetTarget(s.Pc()) + j1.To.Val = n1 + j2.To.Val = s.Pc() } case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 480d411f49..15a84a8a43 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -342,6 +342,6 @@ func Patch(p *obj.Prog, to *obj.Prog) { if p.To.Type != obj.TYPE_BRANCH { Fatalf("patch: not a branch") } - p.To.SetTarget(to) + p.To.Val = to p.To.Offset = to.Pc } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 52083d999e..104dd403ea 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6182,7 +6182,7 @@ func genssa(f *ssa.Func, pp *Progs) { // Resolve branches, and relax DefaultStmt into NotStmt for _, br := range s.Branches { - br.P.To.SetTarget(s.bstart[br.B.ID]) + br.P.To.Val = s.bstart[br.B.ID] if br.P.Pos.IsStmt() != src.PosIsStmt { br.P.Pos = br.P.Pos.WithNotStmt() } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt { diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index 00d253c95a..4cf4b70a32 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -338,8 +338,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { n.To.Reg = dividend } - j.To.SetTarget(n) - j2.To.SetTarget(s.Pc()) + j.To.Val = n + j2.To.Val = s.Pc() } case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index c21ac32297..2de978c28a 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -261,8 +261,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { n.To.Reg = x86.REG_DX } - j.To.SetTarget(n) - j2.To.SetTarget(s.Pc()) + j.To.Val = n + j2.To.Val = s.Pc() } case ssa.Op386HMULL, ssa.Op386HMULLU: diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 269a4223d5..7b7e42ee2e 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -644,7 +644,7 @@ func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool { q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(p.Link) + q.Pcond = p.Link q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q @@ -705,7 +705,7 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { if t.Rel == nil { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.Rel == nil && q.To == t.To { - p.Pool = q + p.Pcond = q return } } @@ -724,8 +724,8 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl = q c.pool.size += 4 - // Store the link to the pool entry in Pool. - p.Pool = q + // Store the link to the pool entry in Pcond. + p.Pcond = q } func (c *ctxt5) regoff(a *obj.Addr) int32 { @@ -1584,8 +1584,8 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { break } - if p.To.Target() != nil { - v = int32((p.To.Target().Pc - c.pc) - 8) + if p.Pcond != nil { + v = int32((p.Pcond.Pc - c.pc) - 8) } o1 |= (uint32(v) >> 2) & 0xffffff @@ -3023,7 +3023,7 @@ func (c *ctxt5) omvr(p *obj.Prog, a *obj.Addr, dr int) uint32 { func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 uint32 - if p.Pool == nil { + if p.Pcond == nil { c.aclass(a) v := immrot(^uint32(c.instoffset)) if v == 0 { @@ -3035,7 +3035,7 @@ func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { o1 |= uint32(v) o1 |= (uint32(dr) & 15) << 12 } else { - v := int32(p.Pool.Pc - p.Pc - 8) + v := int32(p.Pcond.Pc - p.Pc - 8) o1 = c.olr(v, REGPC, dr, int(p.Scond)&C_SCOND) } diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go index 4d9187b530..86831f2b44 100644 --- a/src/cmd/internal/obj/arm/obj5.go +++ b/src/cmd/internal/obj/arm/obj5.go @@ -406,7 +406,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { mov.To.Reg = REG_R2 // B.NE branch target is MOVW above - bne.To.SetTarget(mov) + bne.Pcond = mov // ADD $(autosize+4), R13, R3 p = obj.Appendp(mov, newprog) @@ -428,7 +428,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = ABNE p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(end) + p.Pcond = end // ADD $4, R13, R4 p = obj.Appendp(p, newprog) @@ -452,7 +452,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AB p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(end) + p.Pcond = end // reset for subsequent passes p = end @@ -741,7 +741,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { movw.To.Type = obj.TYPE_REG movw.To.Reg = REG_R3 - bls.To.SetTarget(movw) + bls.Pcond = movw // BL runtime.morestack call := obj.Appendp(movw, c.newprog) @@ -762,7 +762,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { b := obj.Appendp(pcdata, c.newprog) b.As = obj.AJMP b.To.Type = obj.TYPE_BRANCH - b.To.SetTarget(c.cursym.Func.Text.Link) + b.Pcond = c.cursym.Func.Text.Link b.Spadj = +framesize return end diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 0fa0c20b6a..7a5a8ff38c 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -977,8 +977,8 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) /* very large branches */ - if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.To.Target() != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like - otxt := p.To.Target().Pc - pc + if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.Pcond != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like + otxt := p.Pcond.Pc - pc var toofar bool switch o.type_ { case 7, 39: // branch instruction encodes 19 bits @@ -992,14 +992,14 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.Link = q q.As = AB q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(p.To.Target()) - p.To.SetTarget(q) + q.Pcond = p.Pcond + p.Pcond = q q = c.newprog() q.Link = p.Link p.Link = q q.As = AB q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(q.Link.Link) + q.Pcond = q.Link.Link bflag = 1 } } @@ -1123,7 +1123,7 @@ func (c *ctxt7) flushpool(p *obj.Prog, skip int) { q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(p.Link) + q.Pcond = p.Link q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q @@ -1249,7 +1249,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.To == t.To { - p.Pool = q + p.Pcond = q return } } @@ -1266,7 +1266,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl = q c.pool.size = -c.pool.size & (funcAlign - 1) c.pool.size += uint32(sz) - p.Pool = q + p.Pcond = q } func (c *ctxt7) regoff(a *obj.Addr) uint32 { @@ -6042,15 +6042,15 @@ func (c *ctxt7) opimm(p *obj.Prog, a obj.As) uint32 { func (c *ctxt7) brdist(p *obj.Prog, preshift int, flen int, shift int) int64 { v := int64(0) t := int64(0) - if p.To.Target() != nil { - v = (p.To.Target().Pc >> uint(preshift)) - (c.pc >> uint(preshift)) + if p.Pcond != nil { + v = (p.Pcond.Pc >> uint(preshift)) - (c.pc >> uint(preshift)) if (v & ((1 << uint(shift)) - 1)) != 0 { c.ctxt.Diag("misaligned label\n%v", p) } v >>= uint(shift) t = int64(1) << uint(flen-1) if v < -t || v >= t { - c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, p.To.Target()) + c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, p.Pcond) panic("branch too far") } } @@ -6526,7 +6526,7 @@ func (c *ctxt7) oaddi(p *obj.Prog, o1 int32, v int32, r int, rt int) uint32 { */ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 int32 - if p.Pool == nil { /* not in literal pool */ + if p.Pcond == nil { /* not in literal pool */ c.aclass(a) c.ctxt.Logf("omovlit add %d (%#x)\n", c.instoffset, uint64(c.instoffset)) @@ -6552,11 +6552,11 @@ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { w = 1 /* 64-bit SIMD/FP */ case AMOVD: - if p.Pool.As == ADWORD { + if p.Pcond.As == ADWORD { w = 1 /* 64-bit */ - } else if p.Pool.To.Offset < 0 { + } else if p.Pcond.To.Offset < 0 { w = 2 /* 32-bit, sign-extended to 64-bit */ - } else if p.Pool.To.Offset >= 0 { + } else if p.Pcond.To.Offset >= 0 { w = 0 /* 32-bit, zero-extended to 64-bit */ } else { c.ctxt.Diag("invalid operand %v in %v", a, p) diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 56da854f16..f54429fabe 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -187,9 +187,9 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { movlr.To.Type = obj.TYPE_REG movlr.To.Reg = REG_R3 if q != nil { - q.To.SetTarget(movlr) + q.Pcond = movlr } - bls.To.SetTarget(movlr) + bls.Pcond = movlr debug := movlr if false { @@ -220,7 +220,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { jmp := obj.Appendp(pcdata, c.newprog) jmp.As = AB jmp.To.Type = obj.TYPE_BRANCH - jmp.To.SetTarget(c.cursym.Func.Text.Link) + jmp.Pcond = c.cursym.Func.Text.Link jmp.Spadj = +framesize return end @@ -697,7 +697,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { mov.To.Reg = REG_R2 // CBNZ branches to the MOV above - cbnz.To.SetTarget(mov) + cbnz.Pcond = mov // ADD $(autosize+8), SP, R3 q = obj.Appendp(mov, c.newprog) @@ -719,7 +719,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(end) + q.Pcond = end // ADD $8, SP, R4 q = obj.Appendp(q, c.newprog) @@ -743,7 +743,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = AB q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(end) + q.Pcond = end } case obj.ARET: @@ -913,7 +913,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q5.Reg = REGSP q5.To.Type = obj.TYPE_REG q5.To.Reg = REGFP - q1.From.SetTarget(q5) + q1.Pcond = q5 p = q5 } @@ -966,7 +966,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q5.Reg = REGSP q5.To.Type = obj.TYPE_REG q5.To.Reg = REGFP - q1.From.SetTarget(q5) + q1.Pcond = q5 p = q5 } } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 1d4217b5f5..1fc90db864 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -237,19 +237,6 @@ const ( TYPE_REGLIST ) -func (a *Addr) Target() *Prog { - if a.Type == TYPE_BRANCH && a.Val != nil { - return a.Val.(*Prog) - } - return nil -} -func (a *Addr) SetTarget(t *Prog) { - if a.Type != TYPE_BRANCH { - panic("setting branch target when type is not TYPE_BRANCH") - } - a.Val = t -} - // Prog describes a single machine instruction. // // The general instruction form is: @@ -268,7 +255,7 @@ func (a *Addr) SetTarget(t *Prog) { // to avoid too much changes in a single swing. // (1) scheme is enough to express any kind of operand combination. // -// Jump instructions use the To.Val field to point to the target *Prog, +// Jump instructions use the Pcond field to point to the target instruction, // which must be in the same linked list as the jump instruction. // // The Progs for a given function are arranged in a list linked through the Link field. @@ -287,7 +274,7 @@ type Prog struct { From Addr // first source operand RestArgs []Addr // can pack any operands that not fit into {Prog.From, Prog.To} To Addr // destination operand (second is RegTo2 below) - Pool *Prog // constant pool entry, for arm,arm64 back ends + Pcond *Prog // target of conditional jump Forwd *Prog // for x86 back end Rel *Prog // for x86, arm back ends Pc int64 // for back ends or assembler: virtual or actual program counter, depending on phase diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 6107974745..faa827da9f 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -460,8 +460,8 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) // very large conditional branches - if o.type_ == 6 && p.To.Target() != nil { - otxt = p.To.Target().Pc - pc + if o.type_ == 6 && p.Pcond != nil { + otxt = p.Pcond.Pc - pc if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { q = c.newprog() q.Link = p.Link @@ -469,15 +469,15 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(p.To.Target()) - p.To.SetTarget(q) + q.Pcond = p.Pcond + p.Pcond = q q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(q.Link.Link) + q.Pcond = q.Link.Link c.addnop(p.Link) c.addnop(p) @@ -1230,10 +1230,10 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { case 6: /* beq r1,[r2],sbra */ v := int32(0) - if p.To.Target() == nil { + if p.Pcond == nil { v = int32(-4) >> 2 } else { - v = int32(p.To.Target().Pc-p.Pc-4) >> 2 + v = int32(p.Pcond.Pc-p.Pc-4) >> 2 } if (v<<16)>>16 != v { c.ctxt.Diag("short branch too far\n%v", p) @@ -1285,25 +1285,25 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { // use PC-relative branch for short branches // BEQ R0, R0, sbra - if p.To.Target() == nil { + if p.Pcond == nil { v = int32(-4) >> 2 } else { - v = int32(p.To.Target().Pc-p.Pc-4) >> 2 + v = int32(p.Pcond.Pc-p.Pc-4) >> 2 } if (v<<16)>>16 == v { o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) break } } - if p.To.Target() == nil { + if p.Pcond == nil { v = int32(p.Pc) >> 2 } else { - v = int32(p.To.Target().Pc) >> 2 + v = int32(p.Pcond.Pc) >> 2 } o1 = OP_JMP(c.opirr(p.As), uint32(v)) if p.To.Sym == nil { p.To.Sym = c.cursym.Func.Text.From.Sym - p.To.Offset = p.To.Target().Pc + p.To.Offset = p.Pcond.Pc } rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go index f19facc00c..77cad979a6 100644 --- a/src/cmd/internal/obj/mips/obj0.go +++ b/src/cmd/internal/obj/mips/obj0.go @@ -227,11 +227,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } else { p.Mark |= BRANCH } - q1 := p.To.Target() + q1 := p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link - p.To.SetTarget(q1) + p.Pcond = q1 } if q1.Mark&LEAF == 0 { @@ -424,8 +424,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, newprog) q.As = obj.ANOP - p1.To.SetTarget(q) - p2.To.SetTarget(q) + p1.Pcond = q + p2.Pcond = q } case ARET: @@ -778,7 +778,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 if q != nil { - q.To.SetTarget(p) + q.Pcond = p p.Mark |= LABEL } @@ -805,14 +805,14 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.As = AJMP p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(c.cursym.Func.Text.Link) + p.Pcond = c.cursym.Func.Text.Link p.Mark |= BRANCH // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder - q1.To.SetTarget(p) + q1.Pcond = p return p } diff --git a/src/cmd/internal/obj/pass.go b/src/cmd/internal/obj/pass.go index 09d520b4e9..4f156d969b 100644 --- a/src/cmd/internal/obj/pass.go +++ b/src/cmd/internal/obj/pass.go @@ -36,8 +36,8 @@ package obj // In the case of an infinite loop, brloop returns nil. func brloop(p *Prog) *Prog { c := 0 - for q := p; q != nil; q = q.To.Target() { - if q.As != AJMP || q.To.Target() == nil { + for q := p; q != nil; q = q.Pcond { + if q.As != AJMP || q.Pcond == nil { return q } c++ @@ -132,6 +132,8 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { continue } if p.To.Val != nil { + // TODO: Remove To.Val.(*Prog) in favor of p->pcond. + p.Pcond = p.To.Val.(*Prog) continue } @@ -156,7 +158,8 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { p.To.Type = TYPE_NONE } - p.To.SetTarget(q) + p.To.Val = q + p.Pcond = q } if !ctxt.Flag_optimize { @@ -165,12 +168,12 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { // Collapse series of jumps to jumps. for p := sym.Func.Text; p != nil; p = p.Link { - if p.To.Target() == nil { + if p.Pcond == nil { continue } - p.To.SetTarget(brloop(p.To.Target())) - if p.To.Target() != nil && p.To.Type == TYPE_BRANCH { - p.To.Offset = p.To.Target().Pc + p.Pcond = brloop(p.Pcond) + if p.Pcond != nil && p.To.Type == TYPE_BRANCH { + p.To.Offset = p.Pcond.Pc } } } diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 98b453de6c..3c82477fc4 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -725,22 +725,22 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) // very large conditional branches - if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { - otxt = p.To.Target().Pc - pc + if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil { + otxt = p.Pcond.Pc - pc if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { q = c.newprog() q.Link = p.Link p.Link = q q.As = ABR q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(p.To.Target()) - p.To.SetTarget(q) + q.Pcond = p.Pcond + p.Pcond = q q = c.newprog() q.Link = p.Link p.Link = q q.As = ABR q.To.Type = obj.TYPE_BRANCH - q.To.SetTarget(q.Link.Link) + q.Pcond = q.Link.Link //addnop(p->link); //addnop(p); @@ -2630,8 +2630,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 11: /* br/bl lbra */ v := int32(0) - if p.To.Target() != nil { - v = int32(p.To.Target().Pc - p.Pc) + if p.Pcond != nil { + v = int32(p.Pcond.Pc - p.Pc) if v&03 != 0 { c.ctxt.Diag("odd branch target address\n%v", p) v &^= 03 @@ -2781,8 +2781,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } } v := int32(0) - if p.To.Target() != nil { - v = int32(p.To.Target().Pc - p.Pc) + if p.Pcond != nil { + v = int32(p.Pcond.Pc - p.Pc) } if v&03 != 0 { c.ctxt.Diag("odd branch target address\n%v", p) diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index c012762a18..749f7066de 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -556,7 +556,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ABVS: p.Mark |= BRANCH q = p - q1 = p.To.Target() + q1 = p.Pcond if q1 != nil { // NOPs are not removed due to #40689. @@ -841,8 +841,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = obj.ANOP - p1.To.SetTarget(q) - p2.To.SetTarget(q) + p1.Pcond = q + p2.Pcond = q } case obj.ARET: @@ -1153,7 +1153,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if q != nil { - q.To.SetTarget(p) + q.Pcond = p } p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) @@ -1248,13 +1248,13 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(p0.Link) + p.Pcond = p0.Link // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder - q1.To.SetTarget(p) + q1.Pcond = p return p } diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index d2816487e4..2eb2935b31 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -634,7 +634,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { getargp.Reg = 0 getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} - bneadj.To.SetTarget(getargp) + bneadj.Pcond = getargp calcargp := obj.Appendp(getargp, newprog) calcargp.As = AADDI @@ -647,7 +647,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} testargp.Reg = REG_X13 testargp.To.Type = obj.TYPE_BRANCH - testargp.To.SetTarget(endadj) + testargp.Pcond = endadj adjargp := obj.Appendp(testargp, newprog) adjargp.As = AADDI @@ -665,7 +665,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { godone.As = AJAL godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} godone.To.Type = obj.TYPE_BRANCH - godone.To.SetTarget(endadj) + godone.Pcond = endadj } // Update stack-based offsets. @@ -890,27 +890,27 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if p.To.Type != obj.TYPE_BRANCH { panic("assemble: instruction with branch-like opcode lacks destination") } - offset := p.To.Target().Pc - p.Pc + offset := p.Pcond.Pc - p.Pc if offset < -4096 || 4096 <= offset { // Branch is long. Replace it with a jump. jmp := obj.Appendp(p, newprog) jmp.As = AJAL jmp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} jmp.To = obj.Addr{Type: obj.TYPE_BRANCH} - jmp.To.SetTarget(p.To.Target()) + jmp.Pcond = p.Pcond p.As = InvertBranch(p.As) - p.To.SetTarget(jmp.Link) + p.Pcond = jmp.Link // We may have made previous branches too long, // so recheck them. rescan = true } case AJAL: - if p.To.Target() == nil { + if p.Pcond == nil { panic("intersymbol jumps should be expressed as AUIPC+JALR") } - offset := p.To.Target().Pc - p.Pc + offset := p.Pcond.Pc - p.Pc if offset < -(1<<20) || (1<<20) <= offset { // Replace with 2-instruction sequence. This assumes // that TMP is not live across J instructions, since @@ -946,16 +946,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ, AJAL: switch p.To.Type { case obj.TYPE_BRANCH: - p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc + p.To.Type, p.To.Offset = obj.TYPE_CONST, p.Pcond.Pc-p.Pc case obj.TYPE_MEM: panic("unhandled type") } case AAUIPC: if p.From.Type == obj.TYPE_BRANCH { - low, high, err := Split32BitImmediate(p.To.Target().Pc - p.Pc) + low, high, err := Split32BitImmediate(p.Pcond.Pc - p.Pc) if err != nil { - ctxt.Diag("%v: jump displacement %d too large", p, p.To.Target().Pc-p.Pc) + ctxt.Diag("%v: jump displacement %d too large", p, p.Pcond.Pc-p.Pc) } p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym} p.Link.From.Offset = low @@ -1098,7 +1098,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.To.Sym = ctxt.Lookup("runtime.morestack") } if to_more != nil { - to_more.To.SetTarget(p) + to_more.Pcond = p } p = jalrToSym(ctxt, p, newprog, REG_X5) @@ -1107,12 +1107,12 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.As = AJAL p.To = obj.Addr{Type: obj.TYPE_BRANCH} p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} - p.To.SetTarget(cursym.Func.Text.Link) + p.Pcond = cursym.Func.Text.Link // placeholder for to_done's jump target p = obj.Appendp(p, newprog) p.As = obj.ANOP // zero-width place holder - to_done.To.SetTarget(p) + to_done.Pcond = p return p } diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 68f01f1c5d..29182ea805 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -3001,8 +3001,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 11: // br/bl v := int32(0) - if p.To.Target() != nil { - v = int32((p.To.Target().Pc - p.Pc) >> 1) + if p.Pcond != nil { + v = int32((p.Pcond.Pc - p.Pc) >> 1) } if p.As == ABR && p.To.Sym == nil && int32(int16(v)) == v { @@ -3122,8 +3122,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 16: // conditional branch v := int32(0) - if p.To.Target() != nil { - v = int32((p.To.Target().Pc - p.Pc) >> 1) + if p.Pcond != nil { + v = int32((p.Pcond.Pc - p.Pc) >> 1) } mask := uint32(c.branchMask(p)) if p.To.Sym == nil && int32(int16(v)) == v { @@ -3440,7 +3440,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 41: // branch on count r1 := p.From.Reg - ri2 := (p.To.Target().Pc - p.Pc) >> 1 + ri2 := (p.Pcond.Pc - p.Pc) >> 1 if int64(int16(ri2)) != ri2 { c.ctxt.Diag("branch target too far away") } @@ -3885,8 +3885,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 89: // compare and branch reg reg var v int32 - if p.To.Target() != nil { - v = int32((p.To.Target().Pc - p.Pc) >> 1) + if p.Pcond != nil { + v = int32((p.Pcond.Pc - p.Pc) >> 1) } // Some instructions take a mask as the first argument. @@ -3930,8 +3930,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 90: // compare and branch reg $constant var v int32 - if p.To.Target() != nil { - v = int32((p.To.Target().Pc - p.Pc) >> 1) + if p.Pcond != nil { + v = int32((p.Pcond.Pc - p.Pc) >> 1) } // Some instructions take a mask as the first argument. diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index 625bb0f7b4..ef6335d849 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -454,8 +454,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = obj.ANOP - p1.To.SetTarget(q) - p2.To.SetTarget(q) + p1.Pcond = q + p2.Pcond = q } case obj.ARET: @@ -679,14 +679,14 @@ func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, // MOVD LR, R5 p = obj.Appendp(pcdata, c.newprog) - pPre.To.SetTarget(p) + pPre.Pcond = p p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if pPreempt != nil { - pPreempt.To.SetTarget(p) + pPreempt.Pcond = p } // BL runtime.morestack(SB) @@ -709,7 +709,7 @@ func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(c.cursym.Func.Text.Link) + p.Pcond = c.cursym.Func.Text.Link return p } diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index a30ccf0564..d020026445 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -251,8 +251,10 @@ func WriteDconv(w io.Writer, p *Prog, a *Addr) { case TYPE_BRANCH: if a.Sym != nil { fmt.Fprintf(w, "%s(SB)", a.Sym.Name) - } else if a.Target() != nil { - fmt.Fprint(w, a.Target().Pc) + } else if p != nil && p.Pcond != nil { + fmt.Fprint(w, p.Pcond.Pc) + } else if a.Val != nil { + fmt.Fprint(w, a.Val.(*Prog).Pc) } else { fmt.Fprintf(w, "%d(PC)", a.Offset) } diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index fb99c620ad..a530636373 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -1855,7 +1855,7 @@ func spadjop(ctxt *obj.Link, l, q obj.As) obj.As { // no standalone or macro-fused jump will straddle or end on a 32 byte boundary // by inserting NOPs before the jumps func isJump(p *obj.Prog) bool { - return p.To.Target() != nil || p.As == obj.AJMP || p.As == obj.ACALL || + return p.Pcond != nil || p.As == obj.AJMP || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO } @@ -1867,7 +1867,7 @@ func lookForJCC(p *obj.Prog) *obj.Prog { for q = p.Link; q != nil && (q.As == obj.APCDATA || q.As == obj.AFUNCDATA || q.As == obj.ANOP); q = q.Link { } - if q == nil || q.To.Target() == nil || p.As == obj.AJMP || p.As == obj.ACALL { + if q == nil || q.Pcond == nil || p.As == obj.AJMP || p.As == obj.ACALL { return nil } @@ -2051,8 +2051,8 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } for p := s.Func.Text; p != nil; p = p.Link { - if p.To.Type == obj.TYPE_BRANCH && p.To.Target() == nil { - p.To.SetTarget(p) + if p.To.Type == obj.TYPE_BRANCH && p.Pcond == nil { + p.Pcond = p } if p.As == AADJSP { p.To.Type = obj.TYPE_REG @@ -2088,7 +2088,7 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { for p := s.Func.Text; p != nil; p = p.Link { count++ p.Back = branchShort // use short branches first time through - if q := p.To.Target(); q != nil && (q.Back&branchShort != 0) { + if q := p.Pcond; q != nil && (q.Back&branchShort != 0) { p.Back |= branchBackwards q.Back |= branchLoopHead } @@ -4886,7 +4886,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { // TODO: Check in input, preserve in brchain. // Fill in backward jump now. - q = p.To.Target() + q = p.Pcond if q == nil { ctxt.Diag("jmp/branch/loop without target") diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 18a6afcd77..016c247ff5 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -765,7 +765,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } // Set jne branch target. - jne.To.SetTarget(p) + jne.Pcond = p // CMPQ panic_argp(BX), DI p = obj.Appendp(p, newprog) @@ -783,7 +783,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AJNE p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(end) + p.Pcond = end // MOVQ SP, panic_argp(BX) p = obj.Appendp(p, newprog) @@ -801,7 +801,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = obj.AJMP p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(end) + p.Pcond = end // Reset p for following code. p = end @@ -1144,12 +1144,12 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA jmp := obj.Appendp(pcdata, newprog) jmp.As = obj.AJMP jmp.To.Type = obj.TYPE_BRANCH - jmp.To.SetTarget(cursym.Func.Text.Link) + jmp.Pcond = cursym.Func.Text.Link jmp.Spadj = +framesize - jls.To.SetTarget(call) + jls.Pcond = call if q1 != nil { - q1.To.SetTarget(call) + q1.Pcond = call } return end -- cgit v1.2.3-54-g00ecf From 18ea6f597c031d5f3c5160217db72d80cb62f689 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Thu, 27 Aug 2020 21:30:24 -0400 Subject: testing: restore os.Exit(0) after every call to (*M).Run cmd/go.TestScript/test_main_twice demonstrates a program that invokes (*M).Run twice in a row. If we only restore os.Exit(0) in m.afterOnce, we will fail to restore it after the second run and fail the test process despite both runs passing. Updates #29062 Updates #23129 Change-Id: Id22ec68f1708e4583c8dda14a8ba0efae7178b85 Reviewed-on: https://go-review.googlesource.com/c/go/+/251262 Run-TryBot: Bryan C. Mills Reviewed-by: Ian Lance Taylor TryBot-Result: Gobot Gobot --- src/testing/testing.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/testing/testing.go b/src/testing/testing.go index d0334243f4..01743969ee 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -1533,11 +1533,15 @@ func (m *M) before() { // after runs after all testing. func (m *M) after() { m.afterOnce.Do(func() { - if *panicOnExit0 { - m.deps.SetPanicOnExit0(false) - } m.writeProfiles() }) + + // Restore PanicOnExit0 after every run, because we set it to true before + // every run. Otherwise, if m.Run is called multiple times the behavior of + // os.Exit(0) will not be restored after the second run. + if *panicOnExit0 { + m.deps.SetPanicOnExit0(false) + } } func (m *M) writeProfiles() { -- cgit v1.2.3-54-g00ecf From 3b6c812f3defa90bfd7dbddf24bc6135adc5a7eb Mon Sep 17 00:00:00 2001 From: Santiago De la Cruz <51337247+xhit@users.noreply.github.com> Date: Thu, 27 Aug 2020 04:00:59 +0000 Subject: doc: add linux/riscv64 valid combination Mention valid combination GOOS=linux and GOARCH=riscv64 in the "Installing Go from source" document. Updates #27532 Change-Id: I8c1406087132f5c82a2eee5dbcda95d53c64d263 GitHub-Last-Rev: ee74ff9517bd428b75400956d5b7f774ff800ae5 GitHub-Pull-Request: golang/go#41063 Reviewed-on: https://go-review.googlesource.com/c/go/+/250997 Reviewed-by: Tobias Klauser --- doc/install-source.html | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/install-source.html b/doc/install-source.html index f8cda1dc21..cbf4eac70b 100644 --- a/doc/install-source.html +++ b/doc/install-source.html @@ -600,6 +600,9 @@ The valid combinations of $GOOS and $GOARCH are: linux mips64le +linux riscv64 + + linux s390x -- cgit v1.2.3-54-g00ecf From bd6dfe9a3e381e6aae7aa11787ba70fcb77909ff Mon Sep 17 00:00:00 2001 From: surechen Date: Wed, 13 May 2020 17:03:36 +0800 Subject: math/big: add a comment for SetMantExp Change-Id: I9ff5d1767cf70648c2251268e5e815944a7cb371 Reviewed-on: https://go-review.googlesource.com/c/go/+/233737 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/math/big/float.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/math/big/float.go b/src/math/big/float.go index da964eef3e..42050e2c39 100644 --- a/src/math/big/float.go +++ b/src/math/big/float.go @@ -322,10 +322,11 @@ func (z *Float) SetMantExp(mant *Float, exp int) *Float { mant.validate() } z.Copy(mant) - if z.form != finite { - return z + + if z.form == finite { + // 0 < |mant| < +Inf + z.setExpAndRound(int64(z.exp)+int64(exp), 0) } - z.setExpAndRound(int64(z.exp)+int64(exp), 0) return z } -- cgit v1.2.3-54-g00ecf From 5d0b35ca98f57b378e8b45839721d60c06603af2 Mon Sep 17 00:00:00 2001 From: Xiangdong Ji Date: Fri, 12 Jun 2020 16:27:08 +0000 Subject: cmd/asm: Always use go-style arrangement specifiers on ARM64 Fixing several error message and comment texts of the ARM64 assembler to use arrangement specifiers of Go's assembly style. Change-Id: Icdbb14fba7aaede40d57d0d754795b050366a1ab Reviewed-on: https://go-review.googlesource.com/c/go/+/237859 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/asm/internal/asm/testdata/arm64enc.s | 4 ++-- src/cmd/internal/obj/arm64/asm7.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/arm64enc.s b/src/cmd/asm/internal/asm/testdata/arm64enc.s index 56cf51c303..e802ee76f5 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64enc.s +++ b/src/cmd/asm/internal/asm/testdata/arm64enc.s @@ -591,7 +591,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 FMOVS R8, F15 // 0f01271e FMOVD F2, F9 // 4940601e FMOVS F4, F27 // 9b40201e - //TODO VFMOV $3.125, V8.2D // 28f5006f + //TODO VFMOV $3.125, V8.D2 // 28f5006f FMSUBS F13, F21, F13, F19 // b3d50d1f FMSUBD F11, F7, F15, F31 // ff9d4b1f //TODO VFMUL V9.S[2], F21, F19 // b39a895f @@ -648,7 +648,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 FSUBS F25, F23, F0 // e03a391e FSUBD F11, F13, F24 // b8396b1e //TODO SCVTFSS F30, F20 // d4db215e - //TODO VSCVTF V7.2S, V17.2S // f1d8210e + //TODO VSCVTF V7.S2, V17.S2 // f1d8210e SCVTFWS R3, F16 // 7000221e SCVTFWD R20, F4 // 8402621e SCVTFS R16, F12 // 0c02229e diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 7a5a8ff38c..bc27740469 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -4801,7 +4801,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { Q = 1 b = 15 } else { - c.ctxt.Diag("invalid arrangement, should be 8B or 16B: %v", p) + c.ctxt.Diag("invalid arrangement, should be B8 or B16: %v", p) break } -- cgit v1.2.3-54-g00ecf From 55cf84b0778c382967b46f3f2ffd402fba005a26 Mon Sep 17 00:00:00 2001 From: Xiangdong Ji Date: Thu, 18 Jun 2020 03:36:12 +0000 Subject: cmd/compile: Install testcases for flag constant Ops Flag constant Ops on arm and arm64 are under refactoring, this change adds a couple of testcases that verify the behavior of 'noov' branches. Updates #39505 Updates #38740 Updates #39303 Change-Id: I493344b52276900cd296c32da494d72932dfc9be Reviewed-on: https://go-review.googlesource.com/c/go/+/238677 Reviewed-by: Cherry Zhang Reviewed-by: Keith Randall Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot --- test/fixedbugs/issue39505.go | 31 +++++++ test/fixedbugs/issue39505b.go | 183 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 214 insertions(+) create mode 100644 test/fixedbugs/issue39505.go create mode 100644 test/fixedbugs/issue39505b.go diff --git a/test/fixedbugs/issue39505.go b/test/fixedbugs/issue39505.go new file mode 100644 index 0000000000..711b562867 --- /dev/null +++ b/test/fixedbugs/issue39505.go @@ -0,0 +1,31 @@ +// compile + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func f() { + if len([]int{})-1 < len([]int{}) { + } + + var st struct { + i int + } + g := func() string { + return "" + } + h := func(string) string { + return g() + g() + } + s, i := "", 0 + + st.i = len(s) + i = len(h(s[i+0:i+1])) + len(s[len(s)+1:i+1]) + s = s[(len(s[i+1:len(s)+1])+1):len(h(""))+1] + (s[i+1 : len([]int{})+i]) + i = 1 + len([]int{len([]string{s[i+len([]int{}) : len(s)+i]})}) + + var ch chan int + ch <- len(h("")) - len(s) +} diff --git a/test/fixedbugs/issue39505b.go b/test/fixedbugs/issue39505b.go new file mode 100644 index 0000000000..ecf1ab64f4 --- /dev/null +++ b/test/fixedbugs/issue39505b.go @@ -0,0 +1,183 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + ff := []func(){lt_f1, lt_f2, lt_f3, lt_f4, lt_f5, lt_f6, lt_f7, lt_f8, lt_f9, + gt_f1, gt_f2, gt_f3, le_f1, le_f2, le_f3, ge_f1, ge_f2, ge_f3} + + for _, f := range ff { + f() + } +} + +func lt_f1() { + const c = 1 + var a = 0 + var v *int = &a + if *v-c < len([]int{}) { + } else { + panic("bad") + } +} + +func lt_f2() { + const c = 10 + var a = 0 + var v *int = &a + if *v+c < len([]int{}) { + panic("bad") + } +} + +func lt_f3() { + const c = -10 + var a = 0 + var v *int = &a + if *v|0xff+c < len([]int{}) { + panic("bad") + } +} + +func lt_f4() { + const c = 10 + var a = 0 + var v *int = &a + if *v|0x0f+c < len([]int{}) { + panic("bad") + } +} + +func lt_f5() { + const c int32 = 1 + var a int32 = 0 + var v *int32 = &a + if *v-c < int32(len([]int32{})) { + } else { + panic("bad") + } +} + +func lt_f6() { + const c int32 = 10 + var a int32 = 0 + var v *int32 = &a + if *v+c < int32(len([]int32{})) { + panic("bad") + } +} + +func lt_f7() { + const c int32 = -10 + var a int32 = 0 + var v *int32 = &a + if *v|0xff+c < int32(len([]int{})) { + panic("bad") + } +} + +func lt_f8() { + const c int32 = 10 + var a int32 = 0 + var v *int32 = &a + if *v|0x0f+c < int32(len([]int{})) { + panic("bad") + } +} + +func lt_f9() { + const c int32 = -10 + var a int32 = 0 + var v *int32 = &a + if *v|0x0a+c < int32(len([]int{})) { + panic("bad") + } +} + +func gt_f1() { + const c = 1 + var a = 0 + var v *int = &a + if len([]int{}) > *v-c { + } else { + panic("bad") + } +} + +func gt_f2() { + const c = 10 + var a = 0 + var v *int = &a + if len([]int{}) > *v|0x0f+c { + panic("bad") + } +} + +func gt_f3() { + const c int32 = 10 + var a int32 = 0 + var v *int32 = &a + if int32(len([]int{})) > *v|0x0f+c { + panic("bad") + } +} + +func le_f1() { + const c = -10 + var a = 0 + var v *int = &a + if *v|0xff+c <= len([]int{}) { + panic("bad") + } +} + +func le_f2() { + const c = 0xf + var a = 0 + var v *int = &a + if *v|0xf-c <= len([]int{}) { + } else { + panic("bad") + } +} + +func le_f3() { + const c int32 = -10 + var a int32 = 0 + var v *int32 = &a + if *v|0xff+c <= int32(len([]int{})) { + panic("bad") + } +} + +func ge_f1() { + const c = -10 + var a = 0 + var v *int = &a + if len([]int{}) >= *v|0xff+c { + panic("bad") + } +} + +func ge_f2() { + const c int32 = 10 + var a int32 = 0 + var v *int32 = &a + if int32(len([]int{})) >= *v|0x0f+c { + panic("bad") + } +} + +func ge_f3() { + const c = -10 + var a = 0 + var v *int = &a + if len([]int{}) >= *v|0x0a+c { + } else { + panic("bad") + } +} -- cgit v1.2.3-54-g00ecf From ae7b6a3b779c4d6de96f59efbfed0b899c3ff6df Mon Sep 17 00:00:00 2001 From: Xiangdong Ji Date: Sat, 22 Feb 2020 08:12:54 +0000 Subject: math/big: tune addVW/subVW performance on arm64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an optimization for addVW and subVW over large-sized vectors, it switches from add/sub with carry to copy the rest of the vector when we are done with carries. Consistent performance improvement are observed on various arm64 machines. Add additional tests and benchmarks to increase the test coverage. TestFunVWExt: Testing with various types of input vector, using the result from go-version addVW/subVW as golden reference. BenchmarkAddVWext and BenchmarkSubVWext: Benchmarking using input vector having all 1s or all 0s, for evaluating the overhead of worst case. 1. Perf. comparison over randomly generated input vectors: Server 1: name old time/op new time/op delta AddVW/1 12.3ns ± 3% 12.0ns ± 0% -2.60% (p=0.001 n=10+8) AddVW/2 12.5ns ± 2% 12.3ns ± 0% -1.84% (p=0.001 n=10+8) AddVW/3 12.6ns ± 2% 12.3ns ± 0% -1.91% (p=0.009 n=10+10) AddVW/4 13.1ns ± 3% 12.7ns ± 0% -2.98% (p=0.006 n=10+8) AddVW/5 14.4ns ± 1% 13.9ns ± 0% -3.81% (p=0.000 n=10+10) AddVW/10 11.7ns ± 0% 11.7ns ± 0% ~ (all equal) AddVW/100 47.8ns ± 0% 29.9ns ± 2% -37.38% (p=0.000 n=10+9) AddVW/1000 446ns ± 0% 207ns ± 0% -53.59% (p=0.000 n=10+10) AddVW/10000 4.35µs ± 1% 2.92µs ± 0% -32.85% (p=0.000 n=10+10) AddVW/100000 43.6µs ± 0% 29.7µs ± 0% -31.92% (p=0.000 n=8+10) SubVW/1 12.6ns ± 0% 12.3ns ± 2% -2.22% (p=0.000 n=7+10) SubVW/2 12.7ns ± 0% 12.6ns ± 1% -0.39% (p=0.046 n=8+10) SubVW/3 12.7ns ± 1% 12.6ns ± 1% ~ (p=0.410 n=10+10) SubVW/4 13.3ns ± 3% 13.1ns ± 3% ~ (p=0.072 n=10+10) SubVW/5 14.2ns ± 0% 14.1ns ± 1% -0.63% (p=0.046 n=8+10) SubVW/10 11.7ns ± 0% 11.7ns ± 0% ~ (all equal) SubVW/100 47.8ns ± 0% 33.1ns ±19% -30.71% (p=0.000 n=10+10) SubVW/1000 446ns ± 0% 207ns ± 0% -53.59% (p=0.000 n=10+10) SubVW/10000 4.33µs ± 1% 2.92µs ± 0% -32.66% (p=0.000 n=10+6) SubVW/100000 43.4µs ± 0% 29.6µs ± 0% -31.90% (p=0.000 n=10+9) Server 2: name old time/op new time/op delta AddVW/1 5.49ns ± 0% 5.53ns ± 2% ~ (p=1.000 n=9+10) AddVW/2 5.96ns ± 2% 5.92ns ± 1% -0.69% (p=0.039 n=10+10) AddVW/3 6.72ns ± 0% 6.73ns ± 0% ~ (p=0.078 n=10+10) AddVW/4 7.07ns ± 0% 6.75ns ± 2% -4.55% (p=0.000 n=10+10) AddVW/5 8.14ns ± 0% 8.17ns ± 0% +0.46% (p=0.003 n=8+8) AddVW/10 10.0ns ± 0% 10.1ns ± 1% +0.70% (p=0.003 n=10+10) AddVW/100 43.0ns ± 0% 33.5ns ± 0% -22.09% (p=0.000 n=9+9) AddVW/1000 394ns ± 0% 278ns ± 0% -29.44% (p=0.000 n=10+10) AddVW/10000 4.18µs ± 0% 3.14µs ± 0% -24.81% (p=0.000 n=8+8) AddVW/100000 68.3µs ± 3% 62.1µs ± 5% -9.13% (p=0.000 n=10+10) SubVW/1 5.37ns ± 2% 5.42ns ± 1% ~ (p=0.990 n=10+10) SubVW/2 5.89ns ± 0% 5.92ns ± 1% +0.58% (p=0.000 n=8+10) SubVW/3 6.64ns ± 1% 6.82ns ± 3% +2.63% (p=0.000 n=9+10) SubVW/4 7.17ns ± 0% 6.69ns ± 2% -6.74% (p=0.000 n=10+9) SubVW/5 8.22ns ± 0% 8.18ns ± 0% -0.46% (p=0.001 n=8+9) SubVW/10 10.0ns ± 1% 10.1ns ± 1% ~ (p=0.341 n=10+10) SubVW/100 43.0ns ± 0% 33.5ns ± 0% -22.09% (p=0.000 n=7+10) SubVW/1000 394ns ± 0% 278ns ± 0% -29.44% (p=0.000 n=10+10) SubVW/10000 4.18µs ± 0% 3.15µs ± 0% -24.62% (p=0.000 n=9+9) SubVW/100000 67.7µs ± 4% 62.4µs ± 2% -7.92% (p=0.000 n=10+10) 2. Perf. comparison over input vectors of all 1s or all 0s Server 1: name old time/op new time/op delta AddVWext/1 12.6ns ± 0% 12.0ns ± 0% -4.76% (p=0.000 n=6+10) AddVWext/2 12.7ns ± 0% 12.4ns ± 1% -2.52% (p=0.000 n=10+10) AddVWext/3 12.7ns ± 0% 12.4ns ± 0% -2.36% (p=0.000 n=9+7) AddVWext/4 13.2ns ± 4% 12.7ns ± 0% -3.71% (p=0.001 n=10+9) AddVWext/5 14.6ns ± 0% 13.9ns ± 0% -4.79% (p=0.000 n=10+8) AddVWext/10 11.7ns ± 0% 11.7ns ± 0% ~ (all equal) AddVWext/100 47.8ns ± 0% 47.4ns ± 0% -0.84% (p=0.000 n=10+10) AddVWext/1000 446ns ± 0% 399ns ± 0% -10.54% (p=0.000 n=10+10) AddVWext/10000 4.34µs ± 1% 3.90µs ± 0% -10.12% (p=0.000 n=10+10) AddVWext/100000 43.9µs ± 1% 39.4µs ± 0% -10.18% (p=0.000 n=10+10) SubVWext/1 12.6ns ± 0% 12.3ns ± 2% -2.70% (p=0.000 n=7+10) SubVWext/2 12.6ns ± 1% 12.6ns ± 2% ~ (p=0.234 n=10+10) SubVWext/3 12.7ns ± 0% 12.6ns ± 2% -0.71% (p=0.033 n=10+10) SubVWext/4 13.4ns ± 0% 13.1ns ± 3% -2.01% (p=0.006 n=8+10) SubVWext/5 14.2ns ± 0% 14.1ns ± 1% -0.85% (p=0.003 n=10+10) SubVWext/10 11.7ns ± 0% 11.7ns ± 0% ~ (all equal) SubVWext/100 47.8ns ± 0% 47.4ns ± 0% -0.84% (p=0.000 n=10+10) SubVWext/1000 446ns ± 0% 399ns ± 0% -10.54% (p=0.000 n=10+10) SubVWext/10000 4.33µs ± 1% 3.90µs ± 0% -10.02% (p=0.000 n=10+10) SubVWext/100000 43.5µs ± 0% 39.5µs ± 1% -9.16% (p=0.000 n=7+10) Server 2: name old time/op new time/op delta AddVWext/1 5.48ns ± 0% 5.43ns ± 1% -0.97% (p=0.000 n=9+9) AddVWext/2 5.99ns ± 2% 5.93ns ± 1% ~ (p=0.054 n=10+10) AddVWext/3 6.74ns ± 0% 6.79ns ± 1% +0.80% (p=0.000 n=9+10) AddVWext/4 7.18ns ± 0% 7.21ns ± 1% +0.36% (p=0.034 n=9+10) AddVWext/5 7.93ns ± 3% 8.18ns ± 0% +3.18% (p=0.000 n=10+8) AddVWext/10 10.0ns ± 0% 10.1ns ± 1% +0.60% (p=0.011 n=10+10) AddVWext/100 43.0ns ± 0% 47.7ns ± 0% +10.93% (p=0.000 n=9+10) AddVWext/1000 394ns ± 0% 399ns ± 0% +1.27% (p=0.000 n=10+10) AddVWext/10000 4.18µs ± 0% 4.50µs ± 0% +7.73% (p=0.000 n=9+10) AddVWext/100000 67.6µs ± 2% 68.4µs ± 3% ~ (p=0.139 n=9+8) SubVWext/1 5.46ns ± 1% 5.43ns ± 0% -0.55% (p=0.002 n=9+9) SubVWext/2 5.89ns ± 0% 5.93ns ± 1% +0.68% (p=0.000 n=8+10) SubVWext/3 6.72ns ± 1% 6.79ns ± 1% +1.07% (p=0.000 n=10+10) SubVWext/4 6.98ns ± 1% 7.21ns ± 0% +3.25% (p=0.000 n=10+10) SubVWext/5 8.22ns ± 0% 7.99ns ± 3% -2.83% (p=0.000 n=8+10) SubVWext/10 10.0ns ± 1% 10.1ns ± 1% ~ (p=0.239 n=10+10) SubVWext/100 43.0ns ± 0% 47.7ns ± 0% +10.93% (p=0.000 n=8+10) SubVWext/1000 394ns ± 0% 399ns ± 0% +1.27% (p=0.000 n=10+10) SubVWext/10000 4.18µs ± 0% 4.51µs ± 0% +7.86% (p=0.000 n=8+8) SubVWext/100000 68.3µs ± 2% 68.0µs ± 3% ~ (p=0.515 n=10+8) Change-Id: I134a5194b8a2deaaebbaa2b771baf72846971d58 Reviewed-on: https://go-review.googlesource.com/c/go/+/229739 Reviewed-by: Cherry Zhang Reviewed-by: Robert Griesemer Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot --- src/math/big/arith_arm64.s | 113 +++++++++++++++++++++++++++++++++++++-------- src/math/big/arith_test.go | 96 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+), 20 deletions(-) diff --git a/src/math/big/arith_arm64.s b/src/math/big/arith_arm64.s index 18e513e2c3..da6e408e19 100644 --- a/src/math/big/arith_arm64.s +++ b/src/math/big/arith_arm64.s @@ -109,13 +109,59 @@ done: MOVD R0, c+72(FP) RET +#define vwOneOp(instr, op1) \ + MOVD.P 8(R1), R4; \ + instr op1, R4; \ + MOVD.P R4, 8(R3); + +// handle the first 1~4 elements before starting iteration in addVW/subVW +#define vwPreIter(instr1, instr2, counter, target) \ + vwOneOp(instr1, R2); \ + SUB $1, counter; \ + CBZ counter, target; \ + vwOneOp(instr2, $0); \ + SUB $1, counter; \ + CBZ counter, target; \ + vwOneOp(instr2, $0); \ + SUB $1, counter; \ + CBZ counter, target; \ + vwOneOp(instr2, $0); + +// do one iteration of add or sub in addVW/subVW +#define vwOneIter(instr, counter, exit) \ + CBZ counter, exit; \ // careful not to touch the carry flag + LDP.P 32(R1), (R4, R5); \ + LDP -16(R1), (R6, R7); \ + instr $0, R4, R8; \ + instr $0, R5, R9; \ + instr $0, R6, R10; \ + instr $0, R7, R11; \ + STP.P (R8, R9), 32(R3); \ + STP (R10, R11), -16(R3); \ + SUB $4, counter; + +// do one iteration of copy in addVW/subVW +#define vwOneIterCopy(counter, exit) \ + CBZ counter, exit; \ + LDP.P 32(R1), (R4, R5); \ + LDP -16(R1), (R6, R7); \ + STP.P (R4, R5), 32(R3); \ + STP (R6, R7), -16(R3); \ + SUB $4, counter; // func addVW(z, x []Word, y Word) (c Word) +// The 'large' branch handles large 'z'. It checks the carry flag on every iteration +// and switches to copy if we are done with carries. The copying is skipped as well +// if 'x' and 'z' happen to share the same underlying storage. +// The overhead of the checking and branching is visible when 'z' are small (~5%), +// so set a threshold of 32, and remain the small-sized part entirely untouched. TEXT ·addVW(SB),NOSPLIT,$0 MOVD z+0(FP), R3 MOVD z_len+8(FP), R0 MOVD x+24(FP), R1 MOVD y+48(FP), R2 + CMP $32, R0 + BGE large // large-sized 'z' and 'x' CBZ R0, len0 // the length of z is 0 MOVD.P 8(R1), R4 ADDS R2, R4 // z[0] = x[0] + y, set carry @@ -135,29 +181,46 @@ two: // do it twice STP.P (R8, R9), 16(R3) SUB $2, R0 loop: // do four times per round - CBZ R0, len1 // careful not to touch the carry flag - LDP.P 32(R1), (R4, R5) - LDP -16(R1), (R6, R7) - ADCS $0, R4, R8 - ADCS $0, R5, R9 - ADCS $0, R6, R10 - ADCS $0, R7, R11 - STP.P (R8, R9), 32(R3) - STP (R10, R11), -16(R3) - SUB $4, R0 + vwOneIter(ADCS, R0, len1) B loop len1: CSET HS, R2 // extract carry flag len0: MOVD R2, c+56(FP) +done: RET +large: + AND $0x3, R0, R10 + AND $~0x3, R0 + // unrolling for the first 1~4 elements to avoid saving the carry + // flag in each step, adjust $R0 if we unrolled 4 elements + vwPreIter(ADDS, ADCS, R10, add4) + SUB $4, R0 +add4: + BCC copy + vwOneIter(ADCS, R0, len1) + B add4 +copy: + MOVD ZR, c+56(FP) + CMP R1, R3 + BEQ done +copy_4: // no carry flag, copy the rest + vwOneIterCopy(R0, done) + B copy_4 // func subVW(z, x []Word, y Word) (c Word) +// The 'large' branch handles large 'z'. It checks the carry flag on every iteration +// and switches to copy if we are done with carries. The copying is skipped as well +// if 'x' and 'z' happen to share the same underlying storage. +// The overhead of the checking and branching is visible when 'z' are small (~5%), +// so set a threshold of 32, and remain the small-sized part entirely untouched. TEXT ·subVW(SB),NOSPLIT,$0 MOVD z+0(FP), R3 MOVD z_len+8(FP), R0 MOVD x+24(FP), R1 MOVD y+48(FP), R2 + CMP $32, R0 + BGE large // large-sized 'z' and 'x' CBZ R0, len0 // the length of z is 0 MOVD.P 8(R1), R4 SUBS R2, R4 // z[0] = x[0] - y, set carry @@ -177,22 +240,32 @@ two: // do it twice STP.P (R8, R9), 16(R3) SUB $2, R0 loop: // do four times per round - CBZ R0, len1 // careful not to touch the carry flag - LDP.P 32(R1), (R4, R5) - LDP -16(R1), (R6, R7) - SBCS $0, R4, R8 - SBCS $0, R5, R9 - SBCS $0, R6, R10 - SBCS $0, R7, R11 - STP.P (R8, R9), 32(R3) - STP (R10, R11), -16(R3) - SUB $4, R0 + vwOneIter(SBCS, R0, len1) B loop len1: CSET LO, R2 // extract carry flag len0: MOVD R2, c+56(FP) +done: RET +large: + AND $0x3, R0, R10 + AND $~0x3, R0 + // unrolling for the first 1~4 elements to avoid saving the carry + // flag in each step, adjust $R0 if we unrolled 4 elements + vwPreIter(SUBS, SBCS, R10, sub4) + SUB $4, R0 +sub4: + BCS copy + vwOneIter(SBCS, R0, len1) + B sub4 +copy: + MOVD ZR, c+56(FP) + CMP R1, R3 + BEQ done +copy_4: // no carry flag, copy the rest + vwOneIterCopy(R0, done) + B copy_4 // func shlVU(z, x []Word, s uint) (c Word) // This implementation handles the shift operation from the high word to the low word, diff --git a/src/math/big/arith_test.go b/src/math/big/arith_test.go index e2b982c89c..fc205934c5 100644 --- a/src/math/big/arith_test.go +++ b/src/math/big/arith_test.go @@ -179,6 +179,23 @@ func testFunVW(t *testing.T, msg string, f funVW, a argVW) { } } +func testFunVWext(t *testing.T, msg string, f funVW, f_g funVW, a argVW) { + // using the result of addVW_g/subVW_g as golden + z_g := make(nat, len(a.z)) + c_g := f_g(z_g, a.x, a.y) + c := f(a.z, a.x, a.y) + + for i, zi := range a.z { + if zi != z_g[i] { + t.Errorf("%s\n\tgot z[%d] = %#x; want %#x", msg, i, zi, z_g[i]) + break + } + } + if c != c_g { + t.Errorf("%s\n\tgot c = %#x; want %#x", msg, c, c_g) + } +} + func makeFunVW(f func(z, x []Word, s uint) (c Word)) funVW { return func(z, x []Word, s Word) (c Word) { return f(z, x, uint(s)) @@ -213,6 +230,49 @@ func TestFunVW(t *testing.T) { } } +// Construct a vector comprising the same word, usually '0' or 'maximum uint' +func makeWordVec(e Word, n int) []Word { + v := make([]Word, n) + for i := range v { + v[i] = e + } + return v +} + +// Extended testing to addVW and subVW using various kinds of input data. +// We utilize the results of addVW_g and subVW_g as golden reference to check +// correctness. +func TestFunVWExt(t *testing.T) { + // 32 is the current threshold that triggers an optimized version of + // calculation for large-sized vector, ensure we have sizes around it tested. + var vwSizes = []int{0, 1, 3, 4, 5, 8, 9, 23, 31, 32, 33, 34, 35, 36, 50, 120} + for _, n := range vwSizes { + // vector of random numbers, using the result of addVW_g/subVW_g as golden + x := rndV(n) + y := rndW() + z := make(nat, n) + arg := argVW{z, x, y, 0} + testFunVWext(t, "addVW, random inputs", addVW, addVW_g, arg) + testFunVWext(t, "subVW, random inputs", subVW, subVW_g, arg) + + // vector of random numbers, but make 'x' and 'z' share storage + arg = argVW{x, x, y, 0} + testFunVWext(t, "addVW, random inputs, sharing storage", addVW, addVW_g, arg) + testFunVWext(t, "subVW, random inputs, sharing storage", subVW, subVW_g, arg) + + // vector of maximum uint, to force carry flag set in each 'add' + y = ^Word(0) + x = makeWordVec(y, n) + arg = argVW{z, x, y, 0} + testFunVWext(t, "addVW, vector of max uint", addVW, addVW_g, arg) + + // vector of '0', to force carry flag set in each 'sub' + x = makeWordVec(0, n) + arg = argVW{z, x, 1, 0} + testFunVWext(t, "subVW, vector of zero", subVW, subVW_g, arg) + } +} + type argVU struct { d []Word // d is a Word slice, the input parameters x and z come from this array. l uint // l is the length of the input parameters x and z. @@ -299,6 +359,24 @@ func BenchmarkAddVW(b *testing.B) { } } +// Benchmarking addVW using vector of maximum uint to force carry flag set +func BenchmarkAddVWext(b *testing.B) { + for _, n := range benchSizes { + if isRaceBuilder && n > 1e3 { + continue + } + y := ^Word(0) + x := makeWordVec(y, n) + z := make([]Word, n) + b.Run(fmt.Sprint(n), func(b *testing.B) { + b.SetBytes(int64(n * _S)) + for i := 0; i < b.N; i++ { + addVW(z, x, y) + } + }) + } +} + func BenchmarkSubVW(b *testing.B) { for _, n := range benchSizes { if isRaceBuilder && n > 1e3 { @@ -316,6 +394,24 @@ func BenchmarkSubVW(b *testing.B) { } } +// Benchmarking subVW using vector of zero to force carry flag set +func BenchmarkSubVWext(b *testing.B) { + for _, n := range benchSizes { + if isRaceBuilder && n > 1e3 { + continue + } + x := makeWordVec(0, n) + y := Word(1) + z := make([]Word, n) + b.Run(fmt.Sprint(n), func(b *testing.B) { + b.SetBytes(int64(n * _S)) + for i := 0; i < b.N; i++ { + subVW(z, x, y) + } + }) + } +} + type funVWW func(z, x []Word, y, r Word) (c Word) type argVWW struct { z, x nat -- cgit v1.2.3-54-g00ecf From 42e09dc1ba1e820af44b2cbd4db0d60abb5559a2 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 12 Jul 2020 22:36:34 -0400 Subject: go/types: factor out usage of implicit type There was some duplication of logic interpreting the implicit type of an operand in assignableTo and convertUntyped. Factor out this logic to a new 'implicitType' function, which returns the implicit type of an untyped operand when used in a context where a target type is expected. I believe this resolves some comments about code duplication. There is other similar code in assignable, assignableTo, and convertUntypes, but I found it to to be sufficiently semantically distinct to not warrant factoring out. Change-Id: I199298a2e58fcf05344318fca0226b460c57867d Reviewed-on: https://go-review.googlesource.com/c/go/+/242084 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/api_test.go | 20 +++---- src/go/types/assignments.go | 4 +- src/go/types/expr.go | 125 ++++++++++++++++++++++++-------------------- src/go/types/operand.go | 43 ++++----------- 4 files changed, 88 insertions(+), 104 deletions(-) diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index 6c129cd01b..75cebc9826 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -1243,11 +1243,9 @@ func TestConvertibleTo(t *testing.T) { {newDefined(new(Struct)), new(Struct), true}, {newDefined(Typ[Int]), new(Struct), false}, {Typ[UntypedInt], Typ[Int], true}, - // TODO (rFindley): the below behavior is undefined as non-constant untyped - // string values are not permitted by the spec. But we should consider - // changing this case to return 'true', to have more reasonable behavior in - // cases where the API is used for constant expressions. - {Typ[UntypedString], Typ[String], false}, + // Untyped string values are not permitted by the spec, so the below + // behavior is undefined. + {Typ[UntypedString], Typ[String], true}, } { if got := ConvertibleTo(test.v, test.t); got != test.want { t.Errorf("ConvertibleTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) @@ -1266,13 +1264,11 @@ func TestAssignableTo(t *testing.T) { {newDefined(new(Struct)), new(Struct), true}, {Typ[UntypedBool], Typ[Bool], true}, {Typ[UntypedString], Typ[Bool], false}, - // TODO (rFindley): the below behavior is undefined as AssignableTo is - // intended for non-constant values (and neither UntypedString or - // UntypedInt assignments arise during normal type checking). But as - // described in TestConvertibleTo above, we should consider changing this - // behavior. - {Typ[UntypedString], Typ[String], false}, - {Typ[UntypedInt], Typ[Int], false}, + // Neither untyped string nor untyped numeric assignments arise during + // normal type checking, so the below behavior is technically undefined by + // the spec. + {Typ[UntypedString], Typ[String], true}, + {Typ[UntypedInt], Typ[Int], true}, } { if got := AssignableTo(test.v, test.t); got != test.want { t.Errorf("AssignableTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go index 9697e504cd..4e8ec278fc 100644 --- a/src/go/types/assignments.go +++ b/src/go/types/assignments.go @@ -34,8 +34,8 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // spec: "If an untyped constant is assigned to a variable of interface // type or the blank identifier, the constant is first converted to type // bool, rune, int, float64, complex128 or string respectively, depending - // on whether the value is a boolean, rune, integer, floating-point, complex, - // or string constant." + // on whether the value is a boolean, rune, integer, floating-point, + // complex, or string constant." if T == nil || IsInterface(T) { if T == nil && x.typ == Typ[UntypedNil] { check.errorf(x.pos(), "use of untyped nil in %s", context) diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 8503a521f6..94d98f0fbb 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -506,8 +506,6 @@ func (check *Checker) canConvertUntyped(x *operand, target Type) error { if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] { return nil } - // TODO(gri) Sloppy code - clean up. This function is central - // to assignment and expression checking. if isUntyped(target) { // both x and target are untyped @@ -519,80 +517,91 @@ func (check *Checker) canConvertUntyped(x *operand, target Type) error { check.updateExprType(x.expr, target, false) } } else if xkind != tkind { - goto Error + return check.newErrorf(x.pos(), "cannot convert %s to %s", x, target) } return nil } - // typed target + if t, ok := target.Underlying().(*Basic); ok && x.mode == constant_ { + if err := check.isRepresentable(x, t); err != nil { + return err + } + // Expression value may have been rounded - update if needed. + check.updateExprVal(x.expr, x.val) + } else { + newTarget := check.implicitType(x, target) + if newTarget == nil { + return check.newErrorf(x.pos(), "cannot convert %s to %s", x, target) + } + target = newTarget + } + x.typ = target + // Even though implicitType can return UntypedNil, this value is final: the + // predeclared identifier nil has no type. + check.updateExprType(x.expr, target, true) + return nil +} + +// implicitType returns the implicit type of x when used in a context where the +// target type is expected. If no such implicit conversion is possible, it +// returns nil. +func (check *Checker) implicitType(x *operand, target Type) Type { + assert(isUntyped(x.typ)) switch t := target.Underlying().(type) { case *Basic: - if x.mode == constant_ { - if err := check.isRepresentable(x, t); err != nil { - return err + assert(x.mode != constant_) + // Non-constant untyped values may appear as the + // result of comparisons (untyped bool), intermediate + // (delayed-checked) rhs operands of shifts, and as + // the value nil. + switch x.typ.(*Basic).kind { + case UntypedBool: + if !isBoolean(target) { + return nil } - // expression value may have been rounded - update if needed - check.updateExprVal(x.expr, x.val) - } else { - // Non-constant untyped values may appear as the - // result of comparisons (untyped bool), intermediate - // (delayed-checked) rhs operands of shifts, and as - // the value nil. - switch x.typ.(*Basic).kind { - case UntypedBool: - if !isBoolean(target) { - goto Error - } - case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex: - if !isNumeric(target) { - goto Error - } - case UntypedString: - // Non-constant untyped string values are not - // permitted by the spec and should not occur. - unreachable() - case UntypedNil: - // Unsafe.Pointer is a basic type that includes nil. - if !hasNil(target) { - goto Error - } - default: - goto Error + case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex: + if !isNumeric(target) { + return nil + } + case UntypedString: + // Non-constant untyped string values are not permitted by the spec and + // should not occur during normal typechecking passes, but this path is + // reachable via the AssignableTo API. + if !isString(target) { + return nil } + case UntypedNil: + // Unsafe.Pointer is a basic type that includes nil. + if !hasNil(target) { + return nil + } + default: + return nil } case *Interface: - // Update operand types to the default type rather then - // the target (interface) type: values must have concrete - // dynamic types. If the value is nil, keep it untyped - // (this is important for tools such as go vet which need - // the dynamic type for argument checking of say, print + // Values must have concrete dynamic types. If the value is nil, + // keep it untyped (this is important for tools such as go vet which + // need the dynamic type for argument checking of say, print // functions) if x.isNil() { - target = Typ[UntypedNil] - } else { - // cannot assign untyped values to non-empty interfaces - check.completeInterface(t) - if !t.Empty() { - goto Error - } - target = Default(x.typ) + return Typ[UntypedNil] + } + // cannot assign untyped values to non-empty interfaces + check.completeInterface(t) + if !t.Empty() { + return nil } + return Default(x.typ) case *Pointer, *Signature, *Slice, *Map, *Chan: if !x.isNil() { - goto Error + return nil } - // keep nil untyped - see comment for interfaces, above - target = Typ[UntypedNil] + // Keep nil untyped - see comment for interfaces, above. + return Typ[UntypedNil] default: - goto Error + return nil } - - x.typ = target - check.updateExprType(x.expr, target, true) // UntypedNils are final - return nil - -Error: - return check.newErrorf(x.pos(), "cannot convert %s to %s", x, target) + return target } func (check *Checker) comparison(x, y *operand, op token.Token) { diff --git a/src/go/types/operand.go b/src/go/types/operand.go index 80d11e2f21..6fbfe09627 100644 --- a/src/go/types/operand.go +++ b/src/go/types/operand.go @@ -205,15 +205,11 @@ func (x *operand) isNil() bool { return x.mode == value && x.typ == Typ[UntypedNil] } -// TODO(gri) The functions operand.assignableTo, checker.convertUntyped, -// checker.representable, and checker.assignment are -// overlapping in functionality. Need to simplify and clean up. - -// assignableTo reports whether x is assignable to a variable of type T. -// If the result is false and a non-nil reason is provided, it may be set -// to a more detailed explanation of the failure (result != ""). -// The check parameter may be nil if assignableTo is invoked through -// an exported API call, i.e., when all methods have been type-checked. +// assignableTo reports whether x is assignable to a variable of type T. If the +// result is false and a non-nil reason is provided, it may be set to a more +// detailed explanation of the failure (result != ""). The check parameter may +// be nil if assignableTo is invoked through an exported API call, i.e., when +// all methods have been type-checked. func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool { if x.mode == invalid || T == Typ[Invalid] { return true // avoid spurious errors @@ -229,34 +225,17 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool { Vu := V.Underlying() Tu := T.Underlying() - // x is an untyped value representable by a value of type T - // TODO(gri) This is borrowing from checker.convertUntyped and - // checker.representable. Need to clean up. + // x is an untyped value representable by a value of type T. if isUntyped(Vu) { - switch t := Tu.(type) { - case *Basic: - if x.isNil() && t.kind == UnsafePointer { - return true - } - if x.mode == constant_ { - return representableConst(x.val, check, t, nil) - } - // The result of a comparison is an untyped boolean, - // but may not be a constant. - if Vb, _ := Vu.(*Basic); Vb != nil { - return Vb.kind == UntypedBool && isBoolean(Tu) - } - case *Interface: - check.completeInterface(t) - return x.isNil() || t.Empty() - case *Pointer, *Signature, *Slice, *Map, *Chan: - return x.isNil() + if t, ok := Tu.(*Basic); ok && x.mode == constant_ { + return representableConst(x.val, check, t, nil) } + return check.implicitType(x, Tu) != nil } // Vu is typed - // x's type V and T have identical underlying types - // and at least one of V or T is not a named type + // x's type V and T have identical underlying types and at least one of V or + // T is not a named type. if check.identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) { return true } -- cgit v1.2.3-54-g00ecf From 45e12e95e683d0720f8c93ba059a87b2b0e58e09 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 28 Aug 2020 12:10:31 -0400 Subject: go/types: add tests for conversion of non-constant untyped values This was fixed by CL 242084. Retroactively add some tests that would have failed before the fix. Also, remove some existing duplicate tests. Change-Id: I95f7a215d4a9651ded6d739f89c574f33f573c60 Reviewed-on: https://go-review.googlesource.com/c/go/+/251397 Run-TryBot: Robert Findley TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/go/types/testdata/shifts.src | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/src/go/types/testdata/shifts.src b/src/go/types/testdata/shifts.src index ebc95ba4d7..c9a38ae169 100644 --- a/src/go/types/testdata/shifts.src +++ b/src/go/types/testdata/shifts.src @@ -193,14 +193,27 @@ func shifts6() { _ = float32(1.0 /* ERROR "must be integer" */ < Date: Fri, 28 Aug 2020 10:34:16 -0700 Subject: testing: run a Cleanup registered by a Cleanup Fixes #41085 Change-Id: Ieafc60cbc8e09f1935d38b1767b084d78dae5cb4 Reviewed-on: https://go-review.googlesource.com/c/go/+/251457 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/testing/sub_test.go | 27 ++++++++++++++++++ src/testing/testing.go | 76 ++++++++++++++++++++++++++++++------------------- 2 files changed, 74 insertions(+), 29 deletions(-) diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go index 8eb0084b1c..51fc0ccc39 100644 --- a/src/testing/sub_test.go +++ b/src/testing/sub_test.go @@ -928,3 +928,30 @@ func TestCleanupParallelSubtests(t *T) { t.Errorf("unexpected cleanup count; got %d want 1", ranCleanup) } } + +func TestNestedCleanup(t *T) { + ranCleanup := 0 + t.Run("test", func(t *T) { + t.Cleanup(func() { + if ranCleanup != 2 { + t.Errorf("unexpected cleanup count in first cleanup: got %d want 2", ranCleanup) + } + ranCleanup++ + }) + t.Cleanup(func() { + if ranCleanup != 0 { + t.Errorf("unexpected cleanup count in second cleanup: got %d want 0", ranCleanup) + } + ranCleanup++ + t.Cleanup(func() { + if ranCleanup != 1 { + t.Errorf("unexpected cleanup count in nested cleanup: got %d want 1", ranCleanup) + } + ranCleanup++ + }) + }) + }) + if ranCleanup != 3 { + t.Errorf("unexpected cleanup count: got %d want 3", ranCleanup) + } +} diff --git a/src/testing/testing.go b/src/testing/testing.go index 01743969ee..f4f0060523 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -403,7 +403,7 @@ type common struct { skipped bool // Test of benchmark has been skipped. done bool // Test is finished and all subtests have completed. helpers map[string]struct{} // functions to be skipped when writing file/line info - cleanup func() // optional function to be called at the end of the test + cleanups []func() // optional functions to be called at the end of the test cleanupName string // Name of the cleanup function. cleanupPc []uintptr // The stack trace at the point where Cleanup was called. @@ -855,28 +855,31 @@ func (c *common) Helper() { // subtests complete. Cleanup functions will be called in last added, // first called order. func (c *common) Cleanup(f func()) { - c.mu.Lock() - defer c.mu.Unlock() - oldCleanup := c.cleanup - oldCleanupPc := c.cleanupPc - c.cleanup = func() { - if oldCleanup != nil { - defer func() { - c.mu.Lock() - c.cleanupPc = oldCleanupPc - c.mu.Unlock() - oldCleanup() - }() - } + var pc [maxStackLen]uintptr + // Skip two extra frames to account for this function and runtime.Callers itself. + n := runtime.Callers(2, pc[:]) + cleanupPc := pc[:n] + + fn := func() { + defer func() { + c.mu.Lock() + defer c.mu.Unlock() + c.cleanupName = "" + c.cleanupPc = nil + }() + + name := callerName(0) c.mu.Lock() - c.cleanupName = callerName(0) + c.cleanupName = name + c.cleanupPc = cleanupPc c.mu.Unlock() + f() } - var pc [maxStackLen]uintptr - // Skip two extra frames to account for this function and runtime.Callers itself. - n := runtime.Callers(2, pc[:]) - c.cleanupPc = pc[:n] + + c.mu.Lock() + defer c.mu.Unlock() + c.cleanups = append(c.cleanups, fn) } var tempDirReplacer struct { @@ -934,22 +937,37 @@ const ( // If catchPanic is true, this will catch panics, and return the recovered // value if any. func (c *common) runCleanup(ph panicHandling) (panicVal interface{}) { - c.mu.Lock() - cleanup := c.cleanup - c.cleanup = nil - c.mu.Unlock() - if cleanup == nil { - return nil - } - if ph == recoverAndReturnPanic { defer func() { panicVal = recover() }() } - cleanup() - return nil + // Make sure that if a cleanup function panics, + // we still run the remaining cleanup functions. + defer func() { + c.mu.Lock() + recur := len(c.cleanups) > 0 + c.mu.Unlock() + if recur { + c.runCleanup(normalPanic) + } + }() + + for { + var cleanup func() + c.mu.Lock() + if len(c.cleanups) > 0 { + last := len(c.cleanups) - 1 + cleanup = c.cleanups[last] + c.cleanups = c.cleanups[:last] + } + c.mu.Unlock() + if cleanup == nil { + return nil + } + cleanup() + } } // callerName gives the function name (qualified with a package path) -- cgit v1.2.3-54-g00ecf From a58a8d2e97d605f9f115a0e77ba09cd36bb82ba6 Mon Sep 17 00:00:00 2001 From: zdjones Date: Thu, 16 Jul 2020 12:33:25 +0100 Subject: test: document specifying individual test files as operands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current command will run this entire set of tests, which takes a noticeable amount of time. Contributors may wish to run only a subset of these tests to save time/compute (e.g. when iterating on a CL that failed tests in that subset). Listing file(s) as operands to the command will run only those tests. Change-Id: I1874c43681a594190bc40b61cee0b8d321be73f8 Reviewed-on: https://go-review.googlesource.com/c/go/+/242997 Reviewed-by: Daniel Martí --- test/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/README.md b/test/README.md index 068dc1b22b..432d36b653 100644 --- a/test/README.md +++ b/test/README.md @@ -6,6 +6,10 @@ To run just these tests, execute: ../bin/go run run.go +To run just tests from specified files in this directory, execute: + + ../bin/go run run.go -- file1.go file2.go ... + Standard library tests should be written as regular Go tests in the appropriate package. The tool chain and runtime also have regular Go tests in their packages. -- cgit v1.2.3-54-g00ecf From c8ea03828b0645b1fd5725888e44873b75fcfbb6 Mon Sep 17 00:00:00 2001 From: Ariel Mashraki Date: Wed, 22 Apr 2020 22:17:56 +0300 Subject: text/template: add CommentNode to template parse tree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #34652 Change-Id: Icf6e3eda593fed826736f34f95a9d66f5450cc98 Reviewed-on: https://go-review.googlesource.com/c/go/+/229398 Reviewed-by: Daniel Martí Run-TryBot: Daniel Martí TryBot-Result: Gobot Gobot --- api/next.txt | 14 ++++++++++++++ doc/go1.16.html | 10 ++++++++++ src/html/template/escape.go | 2 ++ src/html/template/template_test.go | 16 ++++++++++++++++ src/text/template/exec.go | 1 + src/text/template/parse/lex.go | 8 +++++++- src/text/template/parse/lex_test.go | 7 +++++-- src/text/template/parse/node.go | 33 +++++++++++++++++++++++++++++++++ src/text/template/parse/parse.go | 22 +++++++++++++++++++--- src/text/template/parse/parse_test.go | 25 +++++++++++++++++++++++++ 10 files changed, 132 insertions(+), 6 deletions(-) diff --git a/api/next.txt b/api/next.txt index fe7509bf82..076f39ec34 100644 --- a/api/next.txt +++ b/api/next.txt @@ -3,3 +3,17 @@ pkg unicode, var Chorasmian *RangeTable pkg unicode, var Dives_Akuru *RangeTable pkg unicode, var Khitan_Small_Script *RangeTable pkg unicode, var Yezidi *RangeTable +pkg text/template/parse, const NodeComment = 20 +pkg text/template/parse, const NodeComment NodeType +pkg text/template/parse, const ParseComments = 1 +pkg text/template/parse, const ParseComments Mode +pkg text/template/parse, method (*CommentNode) Copy() Node +pkg text/template/parse, method (*CommentNode) String() string +pkg text/template/parse, method (CommentNode) Position() Pos +pkg text/template/parse, method (CommentNode) Type() NodeType +pkg text/template/parse, type CommentNode struct +pkg text/template/parse, type CommentNode struct, Text string +pkg text/template/parse, type CommentNode struct, embedded NodeType +pkg text/template/parse, type CommentNode struct, embedded Pos +pkg text/template/parse, type Mode uint +pkg text/template/parse, type Tree struct, Mode Mode diff --git a/doc/go1.16.html b/doc/go1.16.html index 805234bdab..7738cbdada 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -121,6 +121,16 @@ Do not send CLs removing the interior tags from such phrases. with "use of closed network connection".

+ +

text/template/parse

+ +

+ A new CommentNode + was added to the parse tree. The Mode + field in the parse.Tree enables access to it. +

+ +

unicode

diff --git a/src/html/template/escape.go b/src/html/template/escape.go index f12dafa870..8739735cb7 100644 --- a/src/html/template/escape.go +++ b/src/html/template/escape.go @@ -124,6 +124,8 @@ func (e *escaper) escape(c context, n parse.Node) context { switch n := n.(type) { case *parse.ActionNode: return e.escapeAction(c, n) + case *parse.CommentNode: + return c case *parse.IfNode: return e.escapeBranch(c, &n.BranchNode, "if") case *parse.ListNode: diff --git a/src/html/template/template_test.go b/src/html/template/template_test.go index 86bd4db444..1f2c888bbe 100644 --- a/src/html/template/template_test.go +++ b/src/html/template/template_test.go @@ -10,6 +10,7 @@ import ( . "html/template" "strings" "testing" + "text/template/parse" ) func TestTemplateClone(t *testing.T) { @@ -160,6 +161,21 @@ func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) { } } +func TestSkipEscapeComments(t *testing.T) { + c := newTestCase(t) + tr := parse.New("root") + tr.Mode = parse.ParseComments + newT, err := tr.Parse("{{/* A comment */}}{{ 1 }}{{/* Another comment */}}", "", "", make(map[string]*parse.Tree)) + if err != nil { + t.Fatalf("Cannot parse template text: %v", err) + } + c.root, err = c.root.AddParseTree("root", newT) + if err != nil { + t.Fatalf("Cannot add parse tree to template: %v", err) + } + c.mustExecute(c.root, nil, "1") +} + type testCase struct { t *testing.T root *Template diff --git a/src/text/template/exec.go b/src/text/template/exec.go index ac3e741390..7ac5175006 100644 --- a/src/text/template/exec.go +++ b/src/text/template/exec.go @@ -256,6 +256,7 @@ func (s *state) walk(dot reflect.Value, node parse.Node) { if len(node.Pipe.Decl) == 0 { s.printValue(node, val) } + case *parse.CommentNode: case *parse.IfNode: s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) case *parse.ListNode: diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go index 30371f2862..e41373a002 100644 --- a/src/text/template/parse/lex.go +++ b/src/text/template/parse/lex.go @@ -41,6 +41,7 @@ const ( itemBool // boolean constant itemChar // printable ASCII character; grab bag for comma etc. itemCharConstant // character constant + itemComment // comment text itemComplex // complex constant (1+2i); imaginary is just a number itemAssign // equals ('=') introducing an assignment itemDeclare // colon-equals (':=') introducing a declaration @@ -112,6 +113,7 @@ type lexer struct { leftDelim string // start of action rightDelim string // end of action trimRightDelim string // end of action with trim marker + emitComment bool // emit itemComment tokens. pos Pos // current position in the input start Pos // start position of this item width Pos // width of last rune read from input @@ -203,7 +205,7 @@ func (l *lexer) drain() { } // lex creates a new scanner for the input string. -func lex(name, input, left, right string) *lexer { +func lex(name, input, left, right string, emitComment bool) *lexer { if left == "" { left = leftDelim } @@ -216,6 +218,7 @@ func lex(name, input, left, right string) *lexer { leftDelim: left, rightDelim: right, trimRightDelim: rightTrimMarker + right, + emitComment: emitComment, items: make(chan item), line: 1, startLine: 1, @@ -323,6 +326,9 @@ func lexComment(l *lexer) stateFn { if !delim { return l.errorf("comment ends before closing delimiter") } + if l.emitComment { + l.emit(itemComment) + } if trimSpace { l.pos += trimMarkerLen } diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go index 563c4fc1cb..f6d5f285ed 100644 --- a/src/text/template/parse/lex_test.go +++ b/src/text/template/parse/lex_test.go @@ -15,6 +15,7 @@ var itemName = map[itemType]string{ itemBool: "bool", itemChar: "char", itemCharConstant: "charconst", + itemComment: "comment", itemComplex: "complex", itemDeclare: ":=", itemEOF: "EOF", @@ -90,6 +91,7 @@ var lexTests = []lexTest{ {"text", `now is the time`, []item{mkItem(itemText, "now is the time"), tEOF}}, {"text with comment", "hello-{{/* this is a comment */}}-world", []item{ mkItem(itemText, "hello-"), + mkItem(itemComment, "/* this is a comment */"), mkItem(itemText, "-world"), tEOF, }}, @@ -311,6 +313,7 @@ var lexTests = []lexTest{ }}, {"trimming spaces before and after comment", "hello- {{- /* hello */ -}} -world", []item{ mkItem(itemText, "hello-"), + mkItem(itemComment, "/* hello */"), mkItem(itemText, "-world"), tEOF, }}, @@ -389,7 +392,7 @@ var lexTests = []lexTest{ // collect gathers the emitted items into a slice. func collect(t *lexTest, left, right string) (items []item) { - l := lex(t.name, t.input, left, right) + l := lex(t.name, t.input, left, right, true) for { item := l.nextItem() items = append(items, item) @@ -529,7 +532,7 @@ func TestPos(t *testing.T) { func TestShutdown(t *testing.T) { // We need to duplicate template.Parse here to hold on to the lexer. const text = "erroneous{{define}}{{else}}1234" - lexer := lex("foo", text, "{{", "}}") + lexer := lex("foo", text, "{{", "}}", false) _, err := New("root").parseLexer(lexer) if err == nil { t.Fatalf("expected error") diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go index dddc7752a2..177482f9b2 100644 --- a/src/text/template/parse/node.go +++ b/src/text/template/parse/node.go @@ -70,6 +70,7 @@ const ( NodeTemplate // A template invocation action. NodeVariable // A $ variable. NodeWith // A with action. + NodeComment // A comment. ) // Nodes. @@ -149,6 +150,38 @@ func (t *TextNode) Copy() Node { return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} } +// CommentNode holds a comment. +type CommentNode struct { + NodeType + Pos + tr *Tree + Text string // Comment text. +} + +func (t *Tree) newComment(pos Pos, text string) *CommentNode { + return &CommentNode{tr: t, NodeType: NodeComment, Pos: pos, Text: text} +} + +func (c *CommentNode) String() string { + var sb strings.Builder + c.writeTo(&sb) + return sb.String() +} + +func (c *CommentNode) writeTo(sb *strings.Builder) { + sb.WriteString("{{") + sb.WriteString(c.Text) + sb.WriteString("}}") +} + +func (c *CommentNode) tree() *Tree { + return c.tr +} + +func (c *CommentNode) Copy() Node { + return &CommentNode{tr: c.tr, NodeType: NodeComment, Pos: c.Pos, Text: c.Text} +} + // PipeNode holds a pipeline with optional declaration type PipeNode struct { NodeType diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go index c9b80f4a24..496d8bfa1d 100644 --- a/src/text/template/parse/parse.go +++ b/src/text/template/parse/parse.go @@ -21,6 +21,7 @@ type Tree struct { Name string // name of the template represented by the tree. ParseName string // name of the top-level template during parsing, for error messages. Root *ListNode // top-level root of the tree. + Mode Mode // parsing mode. text string // text parsed to create the template (or its parent) // Parsing only; cleared after parse. funcs []map[string]interface{} @@ -29,8 +30,16 @@ type Tree struct { peekCount int vars []string // variables defined at the moment. treeSet map[string]*Tree + mode Mode } +// A mode value is a set of flags (or 0). Modes control parser behavior. +type Mode uint + +const ( + ParseComments Mode = 1 << iota // parse comments and add them to AST +) + // Copy returns a copy of the Tree. Any parsing state is discarded. func (t *Tree) Copy() *Tree { if t == nil { @@ -220,7 +229,8 @@ func (t *Tree) stopParse() { func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { defer t.recover(&err) t.ParseName = t.Name - t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim), treeSet) + emitComment := t.Mode&ParseComments != 0 + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet) t.text = text t.parse() t.add() @@ -240,12 +250,14 @@ func (t *Tree) add() { } } -// IsEmptyTree reports whether this tree (node) is empty of everything but space. +// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments. func IsEmptyTree(n Node) bool { switch n := n.(type) { case nil: return true case *ActionNode: + case *CommentNode: + return true case *IfNode: case *ListNode: for _, node := range n.Nodes { @@ -276,6 +288,7 @@ func (t *Tree) parse() { if t.nextNonSpace().typ == itemDefine { newT := New("definition") // name will be updated once we know it. newT.text = t.text + newT.Mode = t.Mode newT.ParseName = t.ParseName newT.startParse(t.funcs, t.lex, t.treeSet) newT.parseDefinition() @@ -331,13 +344,15 @@ func (t *Tree) itemList() (list *ListNode, next Node) { } // textOrAction: -// text | action +// text | comment | action func (t *Tree) textOrAction() Node { switch token := t.nextNonSpace(); token.typ { case itemText: return t.newText(token.pos, token.val) case itemLeftDelim: return t.action() + case itemComment: + return t.newComment(token.pos, token.val) default: t.unexpected(token, "input") } @@ -539,6 +554,7 @@ func (t *Tree) blockControl() Node { block := New(name) // name will be updated once we know it. block.text = t.text + block.Mode = t.Mode block.ParseName = t.ParseName block.startParse(t.funcs, t.lex, t.treeSet) var end Node diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go index 4e09a7852c..d9c13c5d95 100644 --- a/src/text/template/parse/parse_test.go +++ b/src/text/template/parse/parse_test.go @@ -348,6 +348,30 @@ func TestParseCopy(t *testing.T) { testParse(true, t) } +func TestParseWithComments(t *testing.T) { + textFormat = "%q" + defer func() { textFormat = "%s" }() + tests := [...]parseTest{ + {"comment", "{{/*\n\n\n*/}}", noError, "{{/*\n\n\n*/}}"}, + {"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"{{/* hi */}}`}, + {"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `{{/* hi */}}"y"`}, + {"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x"{{/* */}}"y"`}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tr := New(test.name) + tr.Mode = ParseComments + tmpl, err := tr.Parse(test.input, "", "", make(map[string]*Tree)) + if err != nil { + t.Errorf("%q: expected error; got none", test.name) + } + if result := tmpl.Root.String(); result != test.result { + t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result) + } + }) + } +} + type isEmptyTest struct { name string input string @@ -358,6 +382,7 @@ var isEmptyTests = []isEmptyTest{ {"empty", ``, true}, {"nonempty", `hello`, false}, {"spaces only", " \t\n \t\n", true}, + {"comment only", "{{/* comment */}}", true}, {"definition", `{{define "x"}}something{{end}}`, true}, {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true}, {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false}, -- cgit v1.2.3-54-g00ecf From 49bae984955bd57302895beba2ecd8b9d5cd764c Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 27 Aug 2020 16:31:30 -0700 Subject: test: add test that gccgo failed to compile For #38125 Change-Id: Id6ef10d74f0f9dbad2851531e0fe019cd145cf7c Reviewed-on: https://go-review.googlesource.com/c/go/+/251168 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- test/fixedbugs/issue38125.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 test/fixedbugs/issue38125.go diff --git a/test/fixedbugs/issue38125.go b/test/fixedbugs/issue38125.go new file mode 100644 index 0000000000..1207aecd39 --- /dev/null +++ b/test/fixedbugs/issue38125.go @@ -0,0 +1,22 @@ +// compile + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// gccgo mishandled embedded methods of type aliases. + +package p + +type I int + +func (I) M() {} + +type T = struct { + I +} + +func F() { + _ = T.M + _ = struct { I }.M +} -- cgit v1.2.3-54-g00ecf From 27a30186abc18a8fc22b8ab40e3ee9f29d81c1d4 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sat, 29 Aug 2020 03:25:21 +0700 Subject: cmd/compile,runtime: skip zero'ing order array for select statements The order array was zero initialized by the compiler, but ends up being overwritten by the runtime anyway. So let the runtime takes full responsibility for initializing, save us one instruction per select. Fixes #40399 Change-Id: Iec1eca27ad7180d4fcb3cc9ef97348206b7fe6b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/251517 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/select.go | 4 +--- src/runtime/select.go | 1 + test/codegen/select.go | 20 ++++++++++++++++++++ 3 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 test/codegen/select.go diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 3812a0e1fa..97e0424ce0 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -251,10 +251,8 @@ func walkselectcases(cases *Nodes) []*Node { r = typecheck(r, ctxStmt) init = append(init, r) + // No initialization for order; runtime.selectgo is responsible for that. order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas))) - r = nod(OAS, order, nil) - r = typecheck(r, ctxStmt) - init = append(init, r) var pc0, pcs *Node if flag_race { diff --git a/src/runtime/select.go b/src/runtime/select.go index 80768b285b..a506747910 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -118,6 +118,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo scases := cas1[:ncases:ncases] pollorder := order1[:ncases:ncases] lockorder := order1[ncases:][:ncases:ncases] + // NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler. // Even when raceenabled is true, there might be select // statements in packages compiled without -race (e.g., diff --git a/test/codegen/select.go b/test/codegen/select.go new file mode 100644 index 0000000000..4426924b36 --- /dev/null +++ b/test/codegen/select.go @@ -0,0 +1,20 @@ +// asmcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +func f() { + ch1 := make(chan int) + ch2 := make(chan int) + for { + // amd64:-`MOVQ\t[$]0, ""..autotmp_3` + select { + case <-ch1: + case <-ch2: + default: + } + } +} -- cgit v1.2.3-54-g00ecf From 4fc3896e7933e31822caa50e024d4e139befc75f Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada <6warashi9@gmail.com> Date: Mon, 17 Aug 2020 20:37:51 +0900 Subject: database/sql: shortestIdleTimeLocked correct min comparison When zero or less, maxIdleTime and maxLifetime means unlimited. Helper function shortestIdleTimeLocked must not return the minimum of the two until both are verified to be greater then zero. Fixes #40841 Change-Id: I1130332baf4ad259cd90c10f4221f5def8510655 Reviewed-on: https://go-review.googlesource.com/c/go/+/248817 Reviewed-by: Daniel Theophanes --- src/database/sql/sql.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index 0b85db66b9..e3580698fd 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -869,6 +869,13 @@ func (db *DB) maxIdleConnsLocked() int { } func (db *DB) shortestIdleTimeLocked() time.Duration { + if db.maxIdleTime <= 0 { + return db.maxLifetime + } + if db.maxLifetime <= 0 { + return db.maxIdleTime + } + min := db.maxIdleTime if min > db.maxLifetime { min = db.maxLifetime -- cgit v1.2.3-54-g00ecf From ba0fab3cb731fe9a383bd61c3480cccfe32bb1f4 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Sat, 29 Aug 2020 14:11:18 -0700 Subject: debug/elf: run relocation tests in parallel Also don't restart DWARF reading from beginning when we are testing multiple entries. Also reformat relocationTests slice to use indexed literals. Change-Id: Ia5f17214483394d0ef033be516df61f0bdc521b6 Reviewed-on: https://go-review.googlesource.com/c/go/+/251637 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/debug/elf/file_test.go | 550 ++++++++++++++++++++++++--------------------- 1 file changed, 296 insertions(+), 254 deletions(-) diff --git a/src/debug/elf/file_test.go b/src/debug/elf/file_test.go index b13d13ebf0..4da580da5a 100644 --- a/src/debug/elf/file_test.go +++ b/src/debug/elf/file_test.go @@ -304,367 +304,409 @@ var relocationTests = []relocationTest{ { "testdata/go-relocation-test-gcc441-x86-64.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.4.1", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: uint64(0x6), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.4.1", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: uint64(0x6), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc441-x86.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.4.1", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "t.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: uint64(0x5), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.4.1", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "t.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: uint64(0x5), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc424-x86-64.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.2.4 (Ubuntu 4.2.4-1ubuntu4)", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc424.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: uint64(0x6), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.2.4 (Ubuntu 4.2.4-1ubuntu4)", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc424.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: uint64(0x6), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc482-aarch64.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -g -fstack-protector", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(0x24), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -g -fstack-protector", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(0x24), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc492-arm.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 20141224 (prerelease) -march=armv7-a -mfloat-abi=hard -mfpu=vfpv3-d16 -mtls-dialect=gnu -g", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc492.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/root/go/src/debug/elf/testdata", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(0x28), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 20141224 (prerelease) -march=armv7-a -mfloat-abi=hard -mfpu=vfpv3-d16 -mtls-dialect=gnu -g", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc492.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/root/go/src/debug/elf/testdata", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(0x28), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-clang-arm.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrStmtList, Val: int64(0x0), Class: dwarf.ClassLinePtr}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(48), Class: dwarf.ClassConstant}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrStmtList, Val: int64(0x0), Class: dwarf.ClassLinePtr}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(48), Class: dwarf.ClassConstant}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc5-ppc.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C11 5.0.0 20150116 (experimental) -Asystem=linux -Asystem=unix -Asystem=posix -g", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc5-ppc.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(0x44), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C11 5.0.0 20150116 (experimental) -Asystem=linux -Asystem=unix -Asystem=posix -g", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc5-ppc.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(0x44), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc482-ppc64le.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -Asystem=linux -Asystem=unix -Asystem=posix -msecure-plt -mtune=power8 -mcpu=power7 -gdwarf-2 -fstack-protector", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482-ppc64le.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: uint64(0x24), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -Asystem=linux -Asystem=unix -Asystem=posix -msecure-plt -mtune=power8 -mcpu=power7 -gdwarf-2 -fstack-protector", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482-ppc64le.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: uint64(0x24), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc492-mips64.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 -meb -mabi=64 -march=mips3 -mtune=mips64 -mllsc -mno-shared -g", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 -meb -mabi=64 -march=mips3 -mtune=mips64 -mllsc -mno-shared -g", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc531-s390x.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C11 5.3.1 20160316 -march=zEC12 -m64 -mzarch -g -fstack-protector-strong", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(58), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C11 5.3.1 20160316 -march=zEC12 -m64 -mzarch -g -fstack-protector-strong", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(58), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc620-sparc64.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C11 6.2.0 20160914 -mcpu=v9 -g -fstack-protector-strong", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(0x2c), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C11 6.2.0 20160914 -mcpu=v9 -g -fstack-protector-strong", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(0x2c), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc492-mipsle.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 -mel -march=mips2 -mtune=mips32 -mllsc -mno-shared -mabi=32 -g", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(0x58), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 -mel -march=mips2 -mtune=mips32 -mllsc -mno-shared -mabi=32 -g", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(0x58), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc540-mips.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C11 5.4.0 20160609 -meb -mips32 -mtune=mips32r2 -mfpxx -mllsc -mno-shared -mabi=32 -g -gdwarf-2", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: uint64(0x5c), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C11 5.4.0 20160609 -meb -mips32 -mtune=mips32r2 -mfpxx -mllsc -mno-shared -mabi=32 -g -gdwarf-2", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: uint64(0x5c), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc493-mips64le.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.3 -mel -mabi=64 -mllsc -mno-shared -g -fstack-protector-strong", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.3 -mel -mabi=64 -mllsc -mno-shared -g -fstack-protector-strong", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-gcc720-riscv64.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "GNU C11 7.2.0 -march=rv64imafdc -mabi=lp64d -g -gdwarf-2", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: uint64(0x2c), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C11 7.2.0 -march=rv64imafdc -mabi=lp64d -g -gdwarf-2", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrHighpc, Val: uint64(0x2c), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, }, - }}, + }, }, }, { "testdata/go-relocation-test-clang-x86.obj", []relocationTestEntry{ - {0, &dwarf.Entry{ - Offset: 0xb, - Tag: dwarf.TagCompileUnit, - Children: true, - Field: []dwarf.Field{ - {Attr: dwarf.AttrProducer, Val: "clang version google3-trunk (trunk r209387)", Class: dwarf.ClassString}, - {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrName, Val: "go-relocation-test-clang.c", Class: dwarf.ClassString}, - {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, - {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "clang version google3-trunk (trunk r209387)", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "go-relocation-test-clang.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + }, }, - }}, + }, }, }, { "testdata/gcc-amd64-openbsd-debug-with-rela.obj", []relocationTestEntry{ - {203, &dwarf.Entry{ - Offset: 0xc62, - Tag: dwarf.TagMember, - Children: false, - Field: []dwarf.Field{ - {Attr: dwarf.AttrName, Val: "it_interval", Class: dwarf.ClassString}, - {Attr: dwarf.AttrDeclFile, Val: int64(7), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrDeclLine, Val: int64(236), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrType, Val: dwarf.Offset(0xb7f), Class: dwarf.ClassReference}, - {Attr: dwarf.AttrDataMemberLoc, Val: []byte{0x23, 0x0}, Class: dwarf.ClassExprLoc}, + { + entryNumber: 203, + entry: &dwarf.Entry{ + Offset: 0xc62, + Tag: dwarf.TagMember, + Children: false, + Field: []dwarf.Field{ + {Attr: dwarf.AttrName, Val: "it_interval", Class: dwarf.ClassString}, + {Attr: dwarf.AttrDeclFile, Val: int64(7), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrDeclLine, Val: int64(236), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrType, Val: dwarf.Offset(0xb7f), Class: dwarf.ClassReference}, + {Attr: dwarf.AttrDataMemberLoc, Val: []byte{0x23, 0x0}, Class: dwarf.ClassExprLoc}, + }, }, - }}, - {204, &dwarf.Entry{ - Offset: 0xc70, - Tag: dwarf.TagMember, - Children: false, - Field: []dwarf.Field{ - {Attr: dwarf.AttrName, Val: "it_value", Class: dwarf.ClassString}, - {Attr: dwarf.AttrDeclFile, Val: int64(7), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrDeclLine, Val: int64(237), Class: dwarf.ClassConstant}, - {Attr: dwarf.AttrType, Val: dwarf.Offset(0xb7f), Class: dwarf.ClassReference}, - {Attr: dwarf.AttrDataMemberLoc, Val: []byte{0x23, 0x10}, Class: dwarf.ClassExprLoc}, + }, + { + entryNumber: 204, + entry: &dwarf.Entry{ + Offset: 0xc70, + Tag: dwarf.TagMember, + Children: false, + Field: []dwarf.Field{ + {Attr: dwarf.AttrName, Val: "it_value", Class: dwarf.ClassString}, + {Attr: dwarf.AttrDeclFile, Val: int64(7), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrDeclLine, Val: int64(237), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrType, Val: dwarf.Offset(0xb7f), Class: dwarf.ClassReference}, + {Attr: dwarf.AttrDataMemberLoc, Val: []byte{0x23, 0x10}, Class: dwarf.ClassExprLoc}, + }, }, - }}, + }, }, }, } func TestDWARFRelocations(t *testing.T) { - for i, test := range relocationTests { - f, err := Open(test.file) - if err != nil { - t.Error(err) - continue - } - dwarf, err := f.DWARF() - if err != nil { - t.Error(err) - continue - } - for _, testEntry := range test.entries { - reader := dwarf.Reader() - for j := 0; j < testEntry.entryNumber; j++ { - entry, err := reader.Next() - if entry == nil || err != nil { - t.Errorf("Failed to skip to entry %d: %v", testEntry.entryNumber, err) - continue - } + for _, test := range relocationTests { + test := test + t.Run(test.file, func(t *testing.T) { + t.Parallel() + f, err := Open(test.file) + if err != nil { + t.Fatal(err) } - entry, err := reader.Next() + dwarf, err := f.DWARF() if err != nil { - t.Error(err) - continue + t.Fatal(err) } - if !reflect.DeepEqual(testEntry.entry, entry) { - t.Errorf("#%d/%d: mismatch: got:%#v want:%#v", i, testEntry.entryNumber, entry, testEntry.entry) - continue + reader := dwarf.Reader() + idx := 0 + for _, testEntry := range test.entries { + if testEntry.entryNumber < idx { + t.Fatalf("internal test error: %d < %d", testEntry.entryNumber, idx) + } + for ; idx < testEntry.entryNumber; idx++ { + entry, err := reader.Next() + if entry == nil || err != nil { + t.Fatalf("Failed to skip to entry %d: %v", testEntry.entryNumber, err) + } + } + entry, err := reader.Next() + idx++ + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEntry.entry, entry) { + t.Errorf("entry %d mismatch: got:%#v want:%#v", testEntry.entryNumber, entry, testEntry.entry) + } } - } + }) } } -- cgit v1.2.3-54-g00ecf From 9e70564f639252aade60369b51a121f3325e9d6c Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 28 Aug 2020 17:10:32 +0000 Subject: cmd/compile,cmd/asm: simplify recording of branch targets, take 2 We currently use two fields to store the targets of branches. Some phases use p.To.Val, some use p.Pcond. Rewrite so that every branch instruction uses p.To.Val. p.From.Val is also used in rare instances. Introduce a Pool link for use by arm/arm64, instead of repurposing Pcond. This is a cleanup CL in preparation for some stack frame CLs. Change-Id: If8239177e4b1ea2bccd0608eb39553d23210d405 Reviewed-on: https://go-review.googlesource.com/c/go/+/251437 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/amd64/ssa.go | 4 ++-- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/s390x/ssa.go | 4 ++-- src/cmd/compile/internal/x86/ssa.go | 4 ++-- src/cmd/internal/obj/arm/asm5.go | 16 ++++++++-------- src/cmd/internal/obj/arm/obj5.go | 10 +++++----- src/cmd/internal/obj/arm64/asm7.go | 36 ++++++++++++++++++++--------------- src/cmd/internal/obj/arm64/obj7.go | 16 ++++++++-------- src/cmd/internal/obj/link.go | 17 +++++++++++++++-- src/cmd/internal/obj/mips/asm0.go | 24 +++++++++++------------ src/cmd/internal/obj/mips/obj0.go | 14 +++++++------- src/cmd/internal/obj/pass.go | 17 +++++++---------- src/cmd/internal/obj/ppc64/asm9.go | 18 +++++++++--------- src/cmd/internal/obj/ppc64/obj9.go | 12 ++++++------ src/cmd/internal/obj/riscv/obj.go | 29 ++++++++++++++-------------- src/cmd/internal/obj/s390x/asmz.go | 18 +++++++++--------- src/cmd/internal/obj/s390x/objz.go | 10 +++++----- src/cmd/internal/obj/util.go | 6 ++---- src/cmd/internal/obj/x86/asm6.go | 12 ++++++------ src/cmd/internal/obj/x86/obj6.go | 12 ++++++------ 21 files changed, 149 insertions(+), 134 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9d8a0920b3..4ac877986c 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -319,8 +319,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // TODO(khr): issue only the -1 fixup code we need. // For instance, if only the quotient is used, no point in zeroing the remainder. - j1.To.Val = n1 - j2.To.Val = s.Pc() + j1.To.SetTarget(n1) + j2.To.SetTarget(s.Pc()) } case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 15a84a8a43..480d411f49 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -342,6 +342,6 @@ func Patch(p *obj.Prog, to *obj.Prog) { if p.To.Type != obj.TYPE_BRANCH { Fatalf("patch: not a branch") } - p.To.Val = to + p.To.SetTarget(to) p.To.Offset = to.Pc } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 104dd403ea..52083d999e 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6182,7 +6182,7 @@ func genssa(f *ssa.Func, pp *Progs) { // Resolve branches, and relax DefaultStmt into NotStmt for _, br := range s.Branches { - br.P.To.Val = s.bstart[br.B.ID] + br.P.To.SetTarget(s.bstart[br.B.ID]) if br.P.Pos.IsStmt() != src.PosIsStmt { br.P.Pos = br.P.Pos.WithNotStmt() } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt { diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index 4cf4b70a32..00d253c95a 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -338,8 +338,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { n.To.Reg = dividend } - j.To.Val = n - j2.To.Val = s.Pc() + j.To.SetTarget(n) + j2.To.SetTarget(s.Pc()) } case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 2de978c28a..c21ac32297 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -261,8 +261,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { n.To.Reg = x86.REG_DX } - j.To.Val = n - j2.To.Val = s.Pc() + j.To.SetTarget(n) + j2.To.SetTarget(s.Pc()) } case ssa.Op386HMULL, ssa.Op386HMULLU: diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 7b7e42ee2e..269a4223d5 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -644,7 +644,7 @@ func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool { q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Link + q.To.SetTarget(p.Link) q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q @@ -705,7 +705,7 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { if t.Rel == nil { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.Rel == nil && q.To == t.To { - p.Pcond = q + p.Pool = q return } } @@ -724,8 +724,8 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl = q c.pool.size += 4 - // Store the link to the pool entry in Pcond. - p.Pcond = q + // Store the link to the pool entry in Pool. + p.Pool = q } func (c *ctxt5) regoff(a *obj.Addr) int32 { @@ -1584,8 +1584,8 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { break } - if p.Pcond != nil { - v = int32((p.Pcond.Pc - c.pc) - 8) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - c.pc) - 8) } o1 |= (uint32(v) >> 2) & 0xffffff @@ -3023,7 +3023,7 @@ func (c *ctxt5) omvr(p *obj.Prog, a *obj.Addr, dr int) uint32 { func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 uint32 - if p.Pcond == nil { + if p.Pool == nil { c.aclass(a) v := immrot(^uint32(c.instoffset)) if v == 0 { @@ -3035,7 +3035,7 @@ func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { o1 |= uint32(v) o1 |= (uint32(dr) & 15) << 12 } else { - v := int32(p.Pcond.Pc - p.Pc - 8) + v := int32(p.Pool.Pc - p.Pc - 8) o1 = c.olr(v, REGPC, dr, int(p.Scond)&C_SCOND) } diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go index 86831f2b44..4d9187b530 100644 --- a/src/cmd/internal/obj/arm/obj5.go +++ b/src/cmd/internal/obj/arm/obj5.go @@ -406,7 +406,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { mov.To.Reg = REG_R2 // B.NE branch target is MOVW above - bne.Pcond = mov + bne.To.SetTarget(mov) // ADD $(autosize+4), R13, R3 p = obj.Appendp(mov, newprog) @@ -428,7 +428,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = ABNE p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // ADD $4, R13, R4 p = obj.Appendp(p, newprog) @@ -452,7 +452,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AB p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // reset for subsequent passes p = end @@ -741,7 +741,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { movw.To.Type = obj.TYPE_REG movw.To.Reg = REG_R3 - bls.Pcond = movw + bls.To.SetTarget(movw) // BL runtime.morestack call := obj.Appendp(movw, c.newprog) @@ -762,7 +762,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { b := obj.Appendp(pcdata, c.newprog) b.As = obj.AJMP b.To.Type = obj.TYPE_BRANCH - b.Pcond = c.cursym.Func.Text.Link + b.To.SetTarget(c.cursym.Func.Text.Link) b.Spadj = +framesize return end diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index bc27740469..65f7898332 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -977,8 +977,8 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) /* very large branches */ - if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.Pcond != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like - otxt := p.Pcond.Pc - pc + if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.To.Target() != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like + otxt := p.To.Target().Pc - pc var toofar bool switch o.type_ { case 7, 39: // branch instruction encodes 19 bits @@ -992,14 +992,14 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.Link = q q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Pcond - p.Pcond = q + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = q.Link.Link + q.To.SetTarget(q.Link.Link) bflag = 1 } } @@ -1123,7 +1123,7 @@ func (c *ctxt7) flushpool(p *obj.Prog, skip int) { q := c.newprog() q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Link + q.To.SetTarget(p.Link) q.Link = c.blitrl q.Pos = p.Pos c.blitrl = q @@ -1249,7 +1249,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.To == t.To { - p.Pcond = q + p.Pool = q return } } @@ -1266,7 +1266,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl = q c.pool.size = -c.pool.size & (funcAlign - 1) c.pool.size += uint32(sz) - p.Pcond = q + p.Pool = q } func (c *ctxt7) regoff(a *obj.Addr) uint32 { @@ -6042,15 +6042,21 @@ func (c *ctxt7) opimm(p *obj.Prog, a obj.As) uint32 { func (c *ctxt7) brdist(p *obj.Prog, preshift int, flen int, shift int) int64 { v := int64(0) t := int64(0) - if p.Pcond != nil { - v = (p.Pcond.Pc >> uint(preshift)) - (c.pc >> uint(preshift)) + q := p.To.Target() + if q == nil { + // TODO: don't use brdist for this case, as it isn't a branch. + // (Calls from omovlit, and maybe adr/adrp opcodes as well.) + q = p.Pool + } + if q != nil { + v = (q.Pc >> uint(preshift)) - (c.pc >> uint(preshift)) if (v & ((1 << uint(shift)) - 1)) != 0 { c.ctxt.Diag("misaligned label\n%v", p) } v >>= uint(shift) t = int64(1) << uint(flen-1) if v < -t || v >= t { - c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, p.Pcond) + c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, q) panic("branch too far") } } @@ -6526,7 +6532,7 @@ func (c *ctxt7) oaddi(p *obj.Prog, o1 int32, v int32, r int, rt int) uint32 { */ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 int32 - if p.Pcond == nil { /* not in literal pool */ + if p.Pool == nil { /* not in literal pool */ c.aclass(a) c.ctxt.Logf("omovlit add %d (%#x)\n", c.instoffset, uint64(c.instoffset)) @@ -6552,11 +6558,11 @@ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { w = 1 /* 64-bit SIMD/FP */ case AMOVD: - if p.Pcond.As == ADWORD { + if p.Pool.As == ADWORD { w = 1 /* 64-bit */ - } else if p.Pcond.To.Offset < 0 { + } else if p.Pool.To.Offset < 0 { w = 2 /* 32-bit, sign-extended to 64-bit */ - } else if p.Pcond.To.Offset >= 0 { + } else if p.Pool.To.Offset >= 0 { w = 0 /* 32-bit, zero-extended to 64-bit */ } else { c.ctxt.Diag("invalid operand %v in %v", a, p) diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index f54429fabe..56da854f16 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -187,9 +187,9 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { movlr.To.Type = obj.TYPE_REG movlr.To.Reg = REG_R3 if q != nil { - q.Pcond = movlr + q.To.SetTarget(movlr) } - bls.Pcond = movlr + bls.To.SetTarget(movlr) debug := movlr if false { @@ -220,7 +220,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { jmp := obj.Appendp(pcdata, c.newprog) jmp.As = AB jmp.To.Type = obj.TYPE_BRANCH - jmp.Pcond = c.cursym.Func.Text.Link + jmp.To.SetTarget(c.cursym.Func.Text.Link) jmp.Spadj = +framesize return end @@ -697,7 +697,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { mov.To.Reg = REG_R2 // CBNZ branches to the MOV above - cbnz.Pcond = mov + cbnz.To.SetTarget(mov) // ADD $(autosize+8), SP, R3 q = obj.Appendp(mov, c.newprog) @@ -719,7 +719,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH - q.Pcond = end + q.To.SetTarget(end) // ADD $8, SP, R4 q = obj.Appendp(q, c.newprog) @@ -743,7 +743,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = AB q.To.Type = obj.TYPE_BRANCH - q.Pcond = end + q.To.SetTarget(end) } case obj.ARET: @@ -913,7 +913,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q5.Reg = REGSP q5.To.Type = obj.TYPE_REG q5.To.Reg = REGFP - q1.Pcond = q5 + q1.From.SetTarget(q5) p = q5 } @@ -966,7 +966,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q5.Reg = REGSP q5.To.Type = obj.TYPE_REG q5.To.Reg = REGFP - q1.Pcond = q5 + q1.From.SetTarget(q5) p = q5 } } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 1fc90db864..1d4217b5f5 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -237,6 +237,19 @@ const ( TYPE_REGLIST ) +func (a *Addr) Target() *Prog { + if a.Type == TYPE_BRANCH && a.Val != nil { + return a.Val.(*Prog) + } + return nil +} +func (a *Addr) SetTarget(t *Prog) { + if a.Type != TYPE_BRANCH { + panic("setting branch target when type is not TYPE_BRANCH") + } + a.Val = t +} + // Prog describes a single machine instruction. // // The general instruction form is: @@ -255,7 +268,7 @@ const ( // to avoid too much changes in a single swing. // (1) scheme is enough to express any kind of operand combination. // -// Jump instructions use the Pcond field to point to the target instruction, +// Jump instructions use the To.Val field to point to the target *Prog, // which must be in the same linked list as the jump instruction. // // The Progs for a given function are arranged in a list linked through the Link field. @@ -274,7 +287,7 @@ type Prog struct { From Addr // first source operand RestArgs []Addr // can pack any operands that not fit into {Prog.From, Prog.To} To Addr // destination operand (second is RegTo2 below) - Pcond *Prog // target of conditional jump + Pool *Prog // constant pool entry, for arm,arm64 back ends Forwd *Prog // for x86 back end Rel *Prog // for x86, arm back ends Pc int64 // for back ends or assembler: virtual or actual program counter, depending on phase diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index faa827da9f..6107974745 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -460,8 +460,8 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) // very large conditional branches - if o.type_ == 6 && p.Pcond != nil { - otxt = p.Pcond.Pc - pc + if o.type_ == 6 && p.To.Target() != nil { + otxt = p.To.Target().Pc - pc if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { q = c.newprog() q.Link = p.Link @@ -469,15 +469,15 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Pcond - p.Pcond = q + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH - q.Pcond = q.Link.Link + q.To.SetTarget(q.Link.Link) c.addnop(p.Link) c.addnop(p) @@ -1230,10 +1230,10 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { case 6: /* beq r1,[r2],sbra */ v := int32(0) - if p.Pcond == nil { + if p.To.Target() == nil { v = int32(-4) >> 2 } else { - v = int32(p.Pcond.Pc-p.Pc-4) >> 2 + v = int32(p.To.Target().Pc-p.Pc-4) >> 2 } if (v<<16)>>16 != v { c.ctxt.Diag("short branch too far\n%v", p) @@ -1285,25 +1285,25 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { // use PC-relative branch for short branches // BEQ R0, R0, sbra - if p.Pcond == nil { + if p.To.Target() == nil { v = int32(-4) >> 2 } else { - v = int32(p.Pcond.Pc-p.Pc-4) >> 2 + v = int32(p.To.Target().Pc-p.Pc-4) >> 2 } if (v<<16)>>16 == v { o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) break } } - if p.Pcond == nil { + if p.To.Target() == nil { v = int32(p.Pc) >> 2 } else { - v = int32(p.Pcond.Pc) >> 2 + v = int32(p.To.Target().Pc) >> 2 } o1 = OP_JMP(c.opirr(p.As), uint32(v)) if p.To.Sym == nil { p.To.Sym = c.cursym.Func.Text.From.Sym - p.To.Offset = p.Pcond.Pc + p.To.Offset = p.To.Target().Pc } rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go index 77cad979a6..f19facc00c 100644 --- a/src/cmd/internal/obj/mips/obj0.go +++ b/src/cmd/internal/obj/mips/obj0.go @@ -227,11 +227,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } else { p.Mark |= BRANCH } - q1 := p.Pcond + q1 := p.To.Target() if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link - p.Pcond = q1 + p.To.SetTarget(q1) } if q1.Mark&LEAF == 0 { @@ -424,8 +424,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, newprog) q.As = obj.ANOP - p1.Pcond = q - p2.Pcond = q + p1.To.SetTarget(q) + p2.To.SetTarget(q) } case ARET: @@ -778,7 +778,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 if q != nil { - q.Pcond = p + q.To.SetTarget(p) p.Mark |= LABEL } @@ -805,14 +805,14 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.As = AJMP p.To.Type = obj.TYPE_BRANCH - p.Pcond = c.cursym.Func.Text.Link + p.To.SetTarget(c.cursym.Func.Text.Link) p.Mark |= BRANCH // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder - q1.Pcond = p + q1.To.SetTarget(p) return p } diff --git a/src/cmd/internal/obj/pass.go b/src/cmd/internal/obj/pass.go index 4f156d969b..09d520b4e9 100644 --- a/src/cmd/internal/obj/pass.go +++ b/src/cmd/internal/obj/pass.go @@ -36,8 +36,8 @@ package obj // In the case of an infinite loop, brloop returns nil. func brloop(p *Prog) *Prog { c := 0 - for q := p; q != nil; q = q.Pcond { - if q.As != AJMP || q.Pcond == nil { + for q := p; q != nil; q = q.To.Target() { + if q.As != AJMP || q.To.Target() == nil { return q } c++ @@ -132,8 +132,6 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { continue } if p.To.Val != nil { - // TODO: Remove To.Val.(*Prog) in favor of p->pcond. - p.Pcond = p.To.Val.(*Prog) continue } @@ -158,8 +156,7 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { p.To.Type = TYPE_NONE } - p.To.Val = q - p.Pcond = q + p.To.SetTarget(q) } if !ctxt.Flag_optimize { @@ -168,12 +165,12 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { // Collapse series of jumps to jumps. for p := sym.Func.Text; p != nil; p = p.Link { - if p.Pcond == nil { + if p.To.Target() == nil { continue } - p.Pcond = brloop(p.Pcond) - if p.Pcond != nil && p.To.Type == TYPE_BRANCH { - p.To.Offset = p.Pcond.Pc + p.To.SetTarget(brloop(p.To.Target())) + if p.To.Target() != nil && p.To.Type == TYPE_BRANCH { + p.To.Offset = p.To.Target().Pc } } } diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 3c82477fc4..98b453de6c 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -725,22 +725,22 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) // very large conditional branches - if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil { - otxt = p.Pcond.Pc - pc + if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { + otxt = p.To.Target().Pc - pc if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { q = c.newprog() q.Link = p.Link p.Link = q q.As = ABR q.To.Type = obj.TYPE_BRANCH - q.Pcond = p.Pcond - p.Pcond = q + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = ABR q.To.Type = obj.TYPE_BRANCH - q.Pcond = q.Link.Link + q.To.SetTarget(q.Link.Link) //addnop(p->link); //addnop(p); @@ -2630,8 +2630,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 11: /* br/bl lbra */ v := int32(0) - if p.Pcond != nil { - v = int32(p.Pcond.Pc - p.Pc) + if p.To.Target() != nil { + v = int32(p.To.Target().Pc - p.Pc) if v&03 != 0 { c.ctxt.Diag("odd branch target address\n%v", p) v &^= 03 @@ -2781,8 +2781,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } } v := int32(0) - if p.Pcond != nil { - v = int32(p.Pcond.Pc - p.Pc) + if p.To.Target() != nil { + v = int32(p.To.Target().Pc - p.Pc) } if v&03 != 0 { c.ctxt.Diag("odd branch target address\n%v", p) diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index 749f7066de..c012762a18 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -556,7 +556,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ABVS: p.Mark |= BRANCH q = p - q1 = p.Pcond + q1 = p.To.Target() if q1 != nil { // NOPs are not removed due to #40689. @@ -841,8 +841,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = obj.ANOP - p1.Pcond = q - p2.Pcond = q + p1.To.SetTarget(q) + p2.To.SetTarget(q) } case obj.ARET: @@ -1153,7 +1153,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if q != nil { - q.Pcond = p + q.To.SetTarget(p) } p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) @@ -1248,13 +1248,13 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.Pcond = p0.Link + p.To.SetTarget(p0.Link) // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder - q1.Pcond = p + q1.To.SetTarget(p) return p } diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 2eb2935b31..77d383b290 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -634,7 +634,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { getargp.Reg = 0 getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} - bneadj.Pcond = getargp + bneadj.To.SetTarget(getargp) calcargp := obj.Appendp(getargp, newprog) calcargp.As = AADDI @@ -647,7 +647,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12} testargp.Reg = REG_X13 testargp.To.Type = obj.TYPE_BRANCH - testargp.Pcond = endadj + testargp.To.SetTarget(endadj) adjargp := obj.Appendp(testargp, newprog) adjargp.As = AADDI @@ -665,7 +665,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { godone.As = AJAL godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} godone.To.Type = obj.TYPE_BRANCH - godone.Pcond = endadj + godone.To.SetTarget(endadj) } // Update stack-based offsets. @@ -890,27 +890,27 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if p.To.Type != obj.TYPE_BRANCH { panic("assemble: instruction with branch-like opcode lacks destination") } - offset := p.Pcond.Pc - p.Pc + offset := p.To.Target().Pc - p.Pc if offset < -4096 || 4096 <= offset { // Branch is long. Replace it with a jump. jmp := obj.Appendp(p, newprog) jmp.As = AJAL jmp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} jmp.To = obj.Addr{Type: obj.TYPE_BRANCH} - jmp.Pcond = p.Pcond + jmp.To.SetTarget(p.To.Target()) p.As = InvertBranch(p.As) - p.Pcond = jmp.Link + p.To.SetTarget(jmp.Link) // We may have made previous branches too long, // so recheck them. rescan = true } case AJAL: - if p.Pcond == nil { + if p.To.Target() == nil { panic("intersymbol jumps should be expressed as AUIPC+JALR") } - offset := p.Pcond.Pc - p.Pc + offset := p.To.Target().Pc - p.Pc if offset < -(1<<20) || (1<<20) <= offset { // Replace with 2-instruction sequence. This assumes // that TMP is not live across J instructions, since @@ -925,6 +925,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // fixed up in the next loop. p.As = AAUIPC p.From = obj.Addr{Type: obj.TYPE_BRANCH, Sym: p.From.Sym} + p.From.SetTarget(p.To.Target()) p.Reg = 0 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP} @@ -946,16 +947,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ, AJAL: switch p.To.Type { case obj.TYPE_BRANCH: - p.To.Type, p.To.Offset = obj.TYPE_CONST, p.Pcond.Pc-p.Pc + p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc case obj.TYPE_MEM: panic("unhandled type") } case AAUIPC: if p.From.Type == obj.TYPE_BRANCH { - low, high, err := Split32BitImmediate(p.Pcond.Pc - p.Pc) + low, high, err := Split32BitImmediate(p.From.Target().Pc - p.Pc) if err != nil { - ctxt.Diag("%v: jump displacement %d too large", p, p.Pcond.Pc-p.Pc) + ctxt.Diag("%v: jump displacement %d too large", p, p.To.Target().Pc-p.Pc) } p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym} p.Link.From.Offset = low @@ -1098,7 +1099,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.To.Sym = ctxt.Lookup("runtime.morestack") } if to_more != nil { - to_more.Pcond = p + to_more.To.SetTarget(p) } p = jalrToSym(ctxt, p, newprog, REG_X5) @@ -1107,12 +1108,12 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.As = AJAL p.To = obj.Addr{Type: obj.TYPE_BRANCH} p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO} - p.Pcond = cursym.Func.Text.Link + p.To.SetTarget(cursym.Func.Text.Link) // placeholder for to_done's jump target p = obj.Appendp(p, newprog) p.As = obj.ANOP // zero-width place holder - to_done.Pcond = p + to_done.To.SetTarget(p) return p } diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 29182ea805..68f01f1c5d 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -3001,8 +3001,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 11: // br/bl v := int32(0) - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } if p.As == ABR && p.To.Sym == nil && int32(int16(v)) == v { @@ -3122,8 +3122,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 16: // conditional branch v := int32(0) - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } mask := uint32(c.branchMask(p)) if p.To.Sym == nil && int32(int16(v)) == v { @@ -3440,7 +3440,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 41: // branch on count r1 := p.From.Reg - ri2 := (p.Pcond.Pc - p.Pc) >> 1 + ri2 := (p.To.Target().Pc - p.Pc) >> 1 if int64(int16(ri2)) != ri2 { c.ctxt.Diag("branch target too far away") } @@ -3885,8 +3885,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 89: // compare and branch reg reg var v int32 - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } // Some instructions take a mask as the first argument. @@ -3930,8 +3930,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 90: // compare and branch reg $constant var v int32 - if p.Pcond != nil { - v = int32((p.Pcond.Pc - p.Pc) >> 1) + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - p.Pc) >> 1) } // Some instructions take a mask as the first argument. diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index ef6335d849..625bb0f7b4 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -454,8 +454,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, c.newprog) q.As = obj.ANOP - p1.Pcond = q - p2.Pcond = q + p1.To.SetTarget(q) + p2.To.SetTarget(q) } case obj.ARET: @@ -679,14 +679,14 @@ func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, // MOVD LR, R5 p = obj.Appendp(pcdata, c.newprog) - pPre.Pcond = p + pPre.To.SetTarget(p) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if pPreempt != nil { - pPreempt.Pcond = p + pPreempt.To.SetTarget(p) } // BL runtime.morestack(SB) @@ -709,7 +709,7 @@ func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.Pcond = c.cursym.Func.Text.Link + p.To.SetTarget(c.cursym.Func.Text.Link) return p } diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index d020026445..a30ccf0564 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -251,10 +251,8 @@ func WriteDconv(w io.Writer, p *Prog, a *Addr) { case TYPE_BRANCH: if a.Sym != nil { fmt.Fprintf(w, "%s(SB)", a.Sym.Name) - } else if p != nil && p.Pcond != nil { - fmt.Fprint(w, p.Pcond.Pc) - } else if a.Val != nil { - fmt.Fprint(w, a.Val.(*Prog).Pc) + } else if a.Target() != nil { + fmt.Fprint(w, a.Target().Pc) } else { fmt.Fprintf(w, "%d(PC)", a.Offset) } diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index a530636373..fb99c620ad 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -1855,7 +1855,7 @@ func spadjop(ctxt *obj.Link, l, q obj.As) obj.As { // no standalone or macro-fused jump will straddle or end on a 32 byte boundary // by inserting NOPs before the jumps func isJump(p *obj.Prog) bool { - return p.Pcond != nil || p.As == obj.AJMP || p.As == obj.ACALL || + return p.To.Target() != nil || p.As == obj.AJMP || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO } @@ -1867,7 +1867,7 @@ func lookForJCC(p *obj.Prog) *obj.Prog { for q = p.Link; q != nil && (q.As == obj.APCDATA || q.As == obj.AFUNCDATA || q.As == obj.ANOP); q = q.Link { } - if q == nil || q.Pcond == nil || p.As == obj.AJMP || p.As == obj.ACALL { + if q == nil || q.To.Target() == nil || p.As == obj.AJMP || p.As == obj.ACALL { return nil } @@ -2051,8 +2051,8 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } for p := s.Func.Text; p != nil; p = p.Link { - if p.To.Type == obj.TYPE_BRANCH && p.Pcond == nil { - p.Pcond = p + if p.To.Type == obj.TYPE_BRANCH && p.To.Target() == nil { + p.To.SetTarget(p) } if p.As == AADJSP { p.To.Type = obj.TYPE_REG @@ -2088,7 +2088,7 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { for p := s.Func.Text; p != nil; p = p.Link { count++ p.Back = branchShort // use short branches first time through - if q := p.Pcond; q != nil && (q.Back&branchShort != 0) { + if q := p.To.Target(); q != nil && (q.Back&branchShort != 0) { p.Back |= branchBackwards q.Back |= branchLoopHead } @@ -4886,7 +4886,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { // TODO: Check in input, preserve in brchain. // Fill in backward jump now. - q = p.Pcond + q = p.To.Target() if q == nil { ctxt.Diag("jmp/branch/loop without target") diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 016c247ff5..18a6afcd77 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -765,7 +765,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } // Set jne branch target. - jne.Pcond = p + jne.To.SetTarget(p) // CMPQ panic_argp(BX), DI p = obj.Appendp(p, newprog) @@ -783,7 +783,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AJNE p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // MOVQ SP, panic_argp(BX) p = obj.Appendp(p, newprog) @@ -801,7 +801,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = obj.AJMP p.To.Type = obj.TYPE_BRANCH - p.Pcond = end + p.To.SetTarget(end) // Reset p for following code. p = end @@ -1144,12 +1144,12 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA jmp := obj.Appendp(pcdata, newprog) jmp.As = obj.AJMP jmp.To.Type = obj.TYPE_BRANCH - jmp.Pcond = cursym.Func.Text.Link + jmp.To.SetTarget(cursym.Func.Text.Link) jmp.Spadj = +framesize - jls.Pcond = call + jls.To.SetTarget(call) if q1 != nil { - q1.Pcond = call + q1.To.SetTarget(call) } return end -- cgit v1.2.3-54-g00ecf From f0c7e3e9463069f60b3d31696860f6fb75aa3e87 Mon Sep 17 00:00:00 2001 From: David Finkel Date: Sat, 25 Apr 2020 13:32:06 -0400 Subject: cmd/compile: adjust some AMD64 rewrite rules to use typed aux fields Remove an extra int32-representable check when deciding to use an int32 constant as an immediate value. Comment out a broken optimization that relies on MaxUint32 being representable by a signed int32. It never triggers and when fixed, the signedness of the auxint prevents other optimization passes from handling it properly, thus causing segfaults in the runtime. Remove a couple offset representable in 32-bits checks on 32-bit aux vals. toolstash-check clean Change-Id: I148b53403fde523c90d692cb90e412460664b439 Reviewed-on: https://go-review.googlesource.com/c/go/+/230458 Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 835 ++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 4227 +++++++++++++------------- 2 files changed, 2515 insertions(+), 2547 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 5111ef79d3..8898fe55eb 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -436,69 +436,69 @@ // Absorb InvertFlags (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) - -> (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) + => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) - -> (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) + => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) - -> (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) + => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) // Absorb constants generated during lower -(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) -> x -(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) -> y -(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) -> x -(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) -> y -(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) -> x -(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) -> y -(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) -> x -(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) -> y -(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) -> x -(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) -> y +(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x +(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y +(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x +(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y +(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x +(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y +(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x +(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y +(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x +(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y // Miscellaneous -(IsNonNil p) -> (SETNE (TESTQ p p)) -(IsInBounds idx len) -> (SETB (CMPQ idx len)) -(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len)) -(NilCheck ...) -> (LoweredNilCheck ...) -(GetG ...) -> (LoweredGetG ...) -(GetClosurePtr ...) -> (LoweredGetClosurePtr ...) -(GetCallerPC ...) -> (LoweredGetCallerPC ...) -(GetCallerSP ...) -> (LoweredGetCallerSP ...) - -(HasCPUFeature {s}) -> (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s}))) +(IsNonNil p) => (SETNE (TESTQ p p)) +(IsInBounds idx len) => (SETB (CMPQ idx len)) +(IsSliceInBounds idx len) => (SETBE (CMPQ idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(GetG ...) => (LoweredGetG ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) + +(HasCPUFeature {s}) => (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s}))) (Addr ...) -> (LEAQ ...) -(LocalAddr {sym} base _) -> (LEAQ {sym} base) - -(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEstore [off] {sym} ptr x mem) +(LocalAddr {sym} base _) => (LEAQ {sym} base) + +(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem) // block rewrites -(If (SETL cmp) yes no) -> (LT cmp yes no) -(If (SETLE cmp) yes no) -> (LE cmp yes no) -(If (SETG cmp) yes no) -> (GT cmp yes no) -(If (SETGE cmp) yes no) -> (GE cmp yes no) -(If (SETEQ cmp) yes no) -> (EQ cmp yes no) -(If (SETNE cmp) yes no) -> (NE cmp yes no) -(If (SETB cmp) yes no) -> (ULT cmp yes no) -(If (SETBE cmp) yes no) -> (ULE cmp yes no) -(If (SETA cmp) yes no) -> (UGT cmp yes no) -(If (SETAE cmp) yes no) -> (UGE cmp yes no) -(If (SETO cmp) yes no) -> (OS cmp yes no) +(If (SETL cmp) yes no) => (LT cmp yes no) +(If (SETLE cmp) yes no) => (LE cmp yes no) +(If (SETG cmp) yes no) => (GT cmp yes no) +(If (SETGE cmp) yes no) => (GE cmp yes no) +(If (SETEQ cmp) yes no) => (EQ cmp yes no) +(If (SETNE cmp) yes no) => (NE cmp yes no) +(If (SETB cmp) yes no) => (ULT cmp yes no) +(If (SETBE cmp) yes no) => (ULE cmp yes no) +(If (SETA cmp) yes no) => (UGT cmp yes no) +(If (SETAE cmp) yes no) => (UGE cmp yes no) +(If (SETO cmp) yes no) => (OS cmp yes no) // Special case for floating point - LF/LEF not generated -(If (SETGF cmp) yes no) -> (UGT cmp yes no) -(If (SETGEF cmp) yes no) -> (UGE cmp yes no) -(If (SETEQF cmp) yes no) -> (EQF cmp yes no) -(If (SETNEF cmp) yes no) -> (NEF cmp yes no) +(If (SETGF cmp) yes no) => (UGT cmp yes no) +(If (SETGEF cmp) yes no) => (UGE cmp yes no) +(If (SETEQF cmp) yes no) => (EQF cmp yes no) +(If (SETNEF cmp) yes no) => (NEF cmp yes no) -(If cond yes no) -> (NE (TESTB cond cond) yes no) +(If cond yes no) => (NE (TESTB cond cond) yes no) // Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here. (AtomicLoad8 ...) -> (MOVBatomicload ...) @@ -508,22 +508,22 @@ // Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load. // TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those? -(AtomicStore8 ptr val mem) -> (Select1 (XCHGB val ptr mem)) -(AtomicStore32 ptr val mem) -> (Select1 (XCHGL val ptr mem)) -(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ val ptr mem)) -(AtomicStorePtrNoWB ptr val mem) -> (Select1 (XCHGQ val ptr mem)) +(AtomicStore8 ptr val mem) => (Select1 (XCHGB val ptr mem)) +(AtomicStore32 ptr val mem) => (Select1 (XCHGL val ptr mem)) +(AtomicStore64 ptr val mem) => (Select1 (XCHGQ val ptr mem)) +(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ val ptr mem)) // Atomic exchanges. -(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem) -(AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem) +(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem) +(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem) // Atomic adds. -(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (XADDLlock val ptr mem)) -(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (XADDQlock val ptr mem)) -(Select0 (AddTupleFirst32 val tuple)) -> (ADDL val (Select0 tuple)) -(Select1 (AddTupleFirst32 _ tuple)) -> (Select1 tuple) -(Select0 (AddTupleFirst64 val tuple)) -> (ADDQ val (Select0 tuple)) -(Select1 (AddTupleFirst64 _ tuple)) -> (Select1 tuple) +(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem)) +(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem)) +(Select0 (AddTupleFirst32 val tuple)) => (ADDL val (Select0 tuple)) +(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple) +(Select0 (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 tuple)) +(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple) // Atomic compare and swap. (AtomicCompareAndSwap32 ...) -> (CMPXCHGLlock ...) @@ -536,9 +536,9 @@ // Write barrier. (WB ...) -> (LoweredWB ...) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) // *************************** // Above: lowering rules @@ -547,23 +547,23 @@ // TODO: Should the optimizations be a separate pass? // Fold boolean tests into blocks -(NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no) -(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no) -(NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no) -(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE cmp yes no) -(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ cmp yes no) -(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE cmp yes no) -(NE (TESTB (SETB cmp) (SETB cmp)) yes no) -> (ULT cmp yes no) -(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no) -(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) -(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) -(NE (TESTB (SETO cmp) (SETO cmp)) yes no) -> (OS cmp yes no) +(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no) +(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no) +(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no) +(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no) +(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no) +(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no) +(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no) +(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no) +(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no) // Unsigned comparisons to 0/1 -(ULT (TEST(Q|L|W|B) x x) yes no) -> (First no yes) -(UGE (TEST(Q|L|W|B) x x) yes no) -> (First yes no) -(SETB (TEST(Q|L|W|B) x x)) -> (ConstBool [0]) -(SETAE (TEST(Q|L|W|B) x x)) -> (ConstBool [1]) +(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes) +(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no) +(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false]) +(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true]) // x & 1 != 0 -> x & 1 (SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x) @@ -574,75 +574,75 @@ // into tests for carry flags. // ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis // mutandis, for UGE and SETAE, and CC and SETCC. -((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> ((ULT|UGE) (BTL x y)) -((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> ((ULT|UGE) (BTQ x y)) -((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) - -> ((ULT|UGE) (BTLconst [log2uint32(c)] x)) -((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) - -> ((ULT|UGE) (BTQconst [log2(c)] x)) +((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y)) +((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y)) +((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c)) + => ((ULT|UGE) (BTLconst [int8(log32(c))] x)) +((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c)) + => ((ULT|UGE) (BTQconst [int8(log32(c))] x)) ((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) - -> ((ULT|UGE) (BTQconst [log2(c)] x)) -(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> (SET(B|AE) (BTL x y)) -(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> (SET(B|AE) (BTQ x y)) -(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) - -> (SET(B|AE) (BTLconst [log2uint32(c)] x)) -(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) - -> (SET(B|AE) (BTQconst [log2(c)] x)) + => ((ULT|UGE) (BTQconst [int8(log2(c))] x)) +(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y)) +(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y)) +(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c)) + => (SET(B|AE) (BTLconst [int8(log32(c))] x)) +(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c)) + => (SET(B|AE) (BTQconst [int8(log32(c))] x)) (SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) - -> (SET(B|AE) (BTQconst [log2(c)] x)) + => (SET(B|AE) (BTQconst [int8(log2(c))] x)) // SET..store variant (SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) - -> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem) + => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem) (SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) - -> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem) -(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) - -> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) -(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) - -> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c)) + => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c)) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) (SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) - -> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log2(c))] x) mem) // Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules // and further combining shifts. -(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 -> (BTQconst [c+d] x) -(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d -> (BT(Q|L)const [c-d] x) -(BT(Q|L)const [0] s:(SHRQ x y)) -> (BTQ y x) -(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 -> (BTLconst [c+d] x) -(BTLconst [c] (SHLLconst [d] x)) && c>d -> (BTLconst [c-d] x) -(BTLconst [0] s:(SHRL x y)) -> (BTL y x) +(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x) +(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x) +(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x) +(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x) +(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x) +(BTLconst [0] s:(SHRL x y)) => (BTL y x) // Rewrite a & 1 != 1 into a & 1 == 0. // Among other things, this lets us turn (a>>b)&1 != 1 into a bit test. -(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) -> (SET(EQ|NE) (CMPLconst [0] s)) -(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem) -(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) -> (SET(EQ|NE) (CMPQconst [0] s)) -(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem) +(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s)) +(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem) +(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s)) +(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem) // Recognize bit setting (a |= 1< (BTS(Q|L) x y) -(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTC(Q|L) x y) +(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y) +(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y) // Convert ORconst into BTS, if the code gets smaller, with boundary being // (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes). -((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 - -> (BT(S|C)Qconst [log2(c)] x) -((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 - -> (BT(S|C)Lconst [log2uint32(c)] x) +((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128 + => (BT(S|C)Qconst [int8(log32(c))] x) +((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + => (BT(S|C)Lconst [int8(log32(c))] x) ((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 - -> (BT(S|C)Qconst [log2(c)] x) -((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 - -> (BT(S|C)Lconst [log2uint32(c)] x) + => (BT(S|C)Qconst [int8(log2(c))] x) +((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + => (BT(S|C)Lconst [int8(log32(c))] x) // Recognize bit clearing: a &^= 1< (BTR(Q|L) x y) -(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 - -> (BTRQconst [log2(^c)] x) -(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 - -> (BTRLconst [log2uint32(^c)] x) +(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y) +(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + => (BTRQconst [int8(log32(^c))] x) +(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + => (BTRLconst [int8(log32(^c))] x) (ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 - -> (BTRQconst [log2(^c)] x) -(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 - -> (BTRLconst [log2uint32(^c)] x) + => (BTRQconst [int8(log2(^c))] x) +(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + => (BTRLconst [int8(log32(^c))] x) // Special-case bit patterns on first/last bit. // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts, @@ -656,84 +656,84 @@ // Special case resetting first/last bit (SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) - -> (BTR(L|Q)const [0] x) + => (BTR(L|Q)const [0] x) (SHRLconst [1] (SHLLconst [1] x)) - -> (BTRLconst [31] x) + => (BTRLconst [31] x) (SHRQconst [1] (SHLQconst [1] x)) - -> (BTRQconst [63] x) + => (BTRQconst [63] x) // Special case testing first/last bit (with double-shift generated by generic.rules) ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 - -> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) + => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 - -> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x)) + => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x)) (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 - -> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 - -> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 - -> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x)) + => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x)) ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 - -> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x)) + => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x)) (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 - -> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem) (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 - -> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem) // Special-case manually testing last bit with "a>>63 != 0" (without "&1") ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 - -> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) + => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 - -> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x)) + => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x)) (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 - -> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 - -> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) + => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1) -(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) -> (BTS(Q|L)const [c] x) -(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) -> (BTS(Q|L)const [c] x) -(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) -> (BTR(Q|L)const [c] x) -(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) -> (BTR(Q|L)const [c] x) +(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x) +(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x) +(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x) +(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x) // Fold boolean negation into SETcc. -(XORLconst [1] (SETNE x)) -> (SETEQ x) -(XORLconst [1] (SETEQ x)) -> (SETNE x) -(XORLconst [1] (SETL x)) -> (SETGE x) -(XORLconst [1] (SETGE x)) -> (SETL x) -(XORLconst [1] (SETLE x)) -> (SETG x) -(XORLconst [1] (SETG x)) -> (SETLE x) -(XORLconst [1] (SETB x)) -> (SETAE x) -(XORLconst [1] (SETAE x)) -> (SETB x) -(XORLconst [1] (SETBE x)) -> (SETA x) -(XORLconst [1] (SETA x)) -> (SETBE x) +(XORLconst [1] (SETNE x)) => (SETEQ x) +(XORLconst [1] (SETEQ x)) => (SETNE x) +(XORLconst [1] (SETL x)) => (SETGE x) +(XORLconst [1] (SETGE x)) => (SETL x) +(XORLconst [1] (SETLE x)) => (SETG x) +(XORLconst [1] (SETG x)) => (SETLE x) +(XORLconst [1] (SETB x)) => (SETAE x) +(XORLconst [1] (SETAE x)) => (SETB x) +(XORLconst [1] (SETBE x)) => (SETA x) +(XORLconst [1] (SETA x)) => (SETBE x) // Special case for floating point - LF/LEF not generated -(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no) -(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no) -(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF cmp yes no) -(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no) +(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no) +(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no) // Disabled because it interferes with the pattern match above and makes worse code. -// (SETNEF x) -> (ORQ (SETNE x) (SETNAN x)) -// (SETEQF x) -> (ANDQ (SETEQ x) (SETORD x)) +// (SETNEF x) => (ORQ (SETNE x) (SETNAN x)) +// (SETEQF x) => (ANDQ (SETEQ x) (SETORD x)) // fold constants into instructions -(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x) -(ADDQ x (MOVLconst [c])) && is32Bit(c) -> (ADDQconst [int64(int32(c))] x) -(ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) +(ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x) +(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x) +(ADDL x (MOVLconst [c])) => (ADDLconst [c] x) -(SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) -(SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst x [c])) -(SUBL x (MOVLconst [c])) -> (SUBLconst x [c]) -(SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst x [c])) +(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)]) +(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst x [int32(c)])) +(SUBL x (MOVLconst [c])) => (SUBLconst x [c]) +(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst x [c])) -(MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) -(MULL x (MOVLconst [c])) -> (MULLconst [c] x) +(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x) +(MULL x (MOVLconst [c])) => (MULLconst [c] x) -(ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) -(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) +(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x) +(ANDL x (MOVLconst [c])) => (ANDLconst [c] x) (AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x) (XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x) @@ -763,68 +763,70 @@ (ORQconst [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1< (ORQconst [c | 1< (ORQconst [1< (MULLconst [int64(int32(c * d))] x) -(MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x) -(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x) -(ORQ x (MOVLconst [c])) -> (ORQconst [c] x) -(ORL x (MOVLconst [c])) -> (ORLconst [c] x) +(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x) +(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x) -(XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) -(XORL x (MOVLconst [c])) -> (XORLconst [c] x) +(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x) +(ORQ x (MOVLconst [c])) => (ORQconst [c] x) +(ORL x (MOVLconst [c])) => (ORLconst [c] x) -(SHLQ x (MOV(Q|L)const [c])) -> (SHLQconst [c&63] x) -(SHLL x (MOV(Q|L)const [c])) -> (SHLLconst [c&31] x) +(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x) +(XORL x (MOVLconst [c])) => (XORLconst [c] x) -(SHRQ x (MOV(Q|L)const [c])) -> (SHRQconst [c&63] x) -(SHRL x (MOV(Q|L)const [c])) -> (SHRLconst [c&31] x) -(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 -> (SHRWconst [c&31] x) -(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 -> (MOVLconst [0]) -(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 -> (SHRBconst [c&31] x) -(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 -> (MOVLconst [0]) +(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x) +(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x) + +(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x) +(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x) +(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x) +(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0]) +(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x) +(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0]) + +(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x) +(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x) +(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x) +(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x) -(SARQ x (MOV(Q|L)const [c])) -> (SARQconst [c&63] x) -(SARL x (MOV(Q|L)const [c])) -> (SARLconst [c&31] x) -(SARW x (MOV(Q|L)const [c])) -> (SARWconst [min(c&31,15)] x) -(SARB x (MOV(Q|L)const [c])) -> (SARBconst [min(c&31,7)] x) // Operations which don't affect the low 6/5 bits of the shift amount are NOPs. -((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 -> ((SHLQ|SHRQ|SARQ) x y) -((SHLQ|SHRQ|SARQ) x (NEGQ (ADDQconst [c] y))) && c & 63 == 0 -> ((SHLQ|SHRQ|SARQ) x (NEGQ y)) -((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x y) -((SHLQ|SHRQ|SARQ) x (NEGQ (ANDQconst [c] y))) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x (NEGQ y)) - -((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 -> ((SHLL|SHRL|SARL) x y) -((SHLL|SHRL|SARL) x (NEGQ (ADDQconst [c] y))) && c & 31 == 0 -> ((SHLL|SHRL|SARL) x (NEGQ y)) -((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x y) -((SHLL|SHRL|SARL) x (NEGQ (ANDQconst [c] y))) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x (NEGQ y)) - -((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 -> ((SHLQ|SHRQ|SARQ) x y) -((SHLQ|SHRQ|SARQ) x (NEGL (ADDLconst [c] y))) && c & 63 == 0 -> ((SHLQ|SHRQ|SARQ) x (NEGL y)) -((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x y) -((SHLQ|SHRQ|SARQ) x (NEGL (ANDLconst [c] y))) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x (NEGL y)) - -((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 -> ((SHLL|SHRL|SARL) x y) -((SHLL|SHRL|SARL) x (NEGL (ADDLconst [c] y))) && c & 31 == 0 -> ((SHLL|SHRL|SARL) x (NEGL y)) -((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x y) -((SHLL|SHRL|SARL) x (NEGL (ANDLconst [c] y))) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x (NEGL y)) +((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGQ (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ y)) +((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGQ (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ y)) + +((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGQ (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ y)) +((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGQ (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ y)) + +((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGL (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL y)) +((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGL (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL y)) + +((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGL (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL y)) +((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGL (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL y)) // Constant rotate instructions -((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) -((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) +((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c]) +((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c]) -((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) -((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) +((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c]) +((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 => (ROLBconst x [c]) -(ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x) -(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x) -(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x) -(ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x) +(ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x) +(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x) +(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x) +(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x) -(RotateLeft8 ...) -> (ROLB ...) -(RotateLeft16 ...) -> (ROLW ...) -(RotateLeft32 ...) -> (ROLL ...) -(RotateLeft64 ...) -> (ROLQ ...) +(RotateLeft8 ...) => (ROLB ...) +(RotateLeft16 ...) => (ROLW ...) +(RotateLeft32 ...) => (ROLL ...) +(RotateLeft64 ...) => (ROLQ ...) // Non-constant rotates. // We want to issue a rotate when the Go source contains code like @@ -837,15 +839,15 @@ // But x >> 64 is 0, not x. So there's an additional mask that is ANDed in // to force the second term to 0. We don't need that mask, but we must match // it in order to strip it out. -(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) -> (ROLQ x y) -(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) -> (RORQ x y) +(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y) +(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y) -(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) -> (ROLL x y) -(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) -> (RORL x y) +(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y) +(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y) // Help with rotate detection -(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) -> (FlagLT_ULT) -(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) -> (FlagLT_ULT) +(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT) +(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) => (FlagLT_ULT) (ORL (SHLL x (AND(Q|L)const y [15])) (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))) @@ -855,69 +857,74 @@ (ORL (SHRW x (AND(Q|L)const y [15])) (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))) && v.Type.Size() == 2 - -> (RORW x y) + => (RORW x y) (ORL (SHLL x (AND(Q|L)const y [ 7])) (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8])))) && v.Type.Size() == 1 - -> (ROLB x y) + => (ROLB x y) (ORL (SHRB x (AND(Q|L)const y [ 7])) (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))) && v.Type.Size() == 1 - -> (RORB x y) + => (RORB x y) // rotate left negative = rotate right -(ROLQ x (NEG(Q|L) y)) -> (RORQ x y) -(ROLL x (NEG(Q|L) y)) -> (RORL x y) -(ROLW x (NEG(Q|L) y)) -> (RORW x y) -(ROLB x (NEG(Q|L) y)) -> (RORB x y) +(ROLQ x (NEG(Q|L) y)) => (RORQ x y) +(ROLL x (NEG(Q|L) y)) => (RORL x y) +(ROLW x (NEG(Q|L) y)) => (RORW x y) +(ROLB x (NEG(Q|L) y)) => (RORB x y) // rotate right negative = rotate left -(RORQ x (NEG(Q|L) y)) -> (ROLQ x y) -(RORL x (NEG(Q|L) y)) -> (ROLL x y) -(RORW x (NEG(Q|L) y)) -> (ROLW x y) -(RORB x (NEG(Q|L) y)) -> (ROLB x y) +(RORQ x (NEG(Q|L) y)) => (ROLQ x y) +(RORL x (NEG(Q|L) y)) => (ROLL x y) +(RORW x (NEG(Q|L) y)) => (ROLW x y) +(RORB x (NEG(Q|L) y)) => (ROLB x y) // rotate by constants -(ROLQ x (MOV(Q|L)const [c])) -> (ROLQconst [c&63] x) -(ROLL x (MOV(Q|L)const [c])) -> (ROLLconst [c&31] x) -(ROLW x (MOV(Q|L)const [c])) -> (ROLWconst [c&15] x) -(ROLB x (MOV(Q|L)const [c])) -> (ROLBconst [c&7 ] x) +(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x) +(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x) +(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x) +(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x) -(RORQ x (MOV(Q|L)const [c])) -> (ROLQconst [(-c)&63] x) -(RORL x (MOV(Q|L)const [c])) -> (ROLLconst [(-c)&31] x) -(RORW x (MOV(Q|L)const [c])) -> (ROLWconst [(-c)&15] x) -(RORB x (MOV(Q|L)const [c])) -> (ROLBconst [(-c)&7 ] x) +(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x) +(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x) +(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x) +(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x) // Constant shift simplifications -((SHLQ|SHRQ|SARQ)const x [0]) -> x -((SHLL|SHRL|SARL)const x [0]) -> x -((SHRW|SARW)const x [0]) -> x -((SHRB|SARB)const x [0]) -> x -((ROLQ|ROLL|ROLW|ROLB)const x [0]) -> x +((SHLQ|SHRQ|SARQ)const x [0]) => x +((SHLL|SHRL|SARL)const x [0]) => x +((SHRW|SARW)const x [0]) => x +((SHRB|SARB)const x [0]) => x +((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) // because the x86 instructions are defined to use all 5 bits of the shift even // for the small shifts. I don't think we'll ever generate a weird shift (e.g. // (SHRW x (MOVLconst [24])), but just in case. -(CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) -(CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c])) -(CMPL x (MOVLconst [c])) -> (CMPLconst x [c]) -(CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c])) -(CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))]) -(CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))])) -(CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))]) -(CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))])) +(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)]) +(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)])) +(CMPL x (MOVLconst [c])) => (CMPLconst x [c]) +(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c])) +(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)]) +(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)])) +(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)]) +(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)])) // Canonicalize the order of arguments to comparisons - helps with CSE. -(CMP(Q|L|W|B) x y) && x.ID > y.ID -> (InvertFlags (CMP(Q|L|W|B) y x)) +(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x)) // Using MOVZX instead of AND is cheaper. -(AND(Q|L)const [ 0xFF] x) -> (MOVBQZX x) -(AND(Q|L)const [0xFFFF] x) -> (MOVWQZX x) -(ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x) +(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x) +(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x) +// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32. +// Commenting out for now, because it also can't trigger because of the is32bit guard on the +// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason) +// Using an alternate form of this rule segfaults some binaries because of +// adverse interactions with other passes. +// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x) // strength reduction // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: @@ -928,98 +935,98 @@ // which can require a register-register move // to preserve the original value, // so it must be used with care. -(MUL(Q|L)const [-9] x) -> (NEG(Q|L) (LEA(Q|L)8 x x)) -(MUL(Q|L)const [-5] x) -> (NEG(Q|L) (LEA(Q|L)4 x x)) -(MUL(Q|L)const [-3] x) -> (NEG(Q|L) (LEA(Q|L)2 x x)) -(MUL(Q|L)const [-1] x) -> (NEG(Q|L) x) -(MUL(Q|L)const [ 0] _) -> (MOV(Q|L)const [0]) -(MUL(Q|L)const [ 1] x) -> x -(MUL(Q|L)const [ 3] x) -> (LEA(Q|L)2 x x) -(MUL(Q|L)const [ 5] x) -> (LEA(Q|L)4 x x) -(MUL(Q|L)const [ 7] x) -> (LEA(Q|L)2 x (LEA(Q|L)2 x x)) -(MUL(Q|L)const [ 9] x) -> (LEA(Q|L)8 x x) -(MUL(Q|L)const [11] x) -> (LEA(Q|L)2 x (LEA(Q|L)4 x x)) -(MUL(Q|L)const [13] x) -> (LEA(Q|L)4 x (LEA(Q|L)2 x x)) -(MUL(Q|L)const [19] x) -> (LEA(Q|L)2 x (LEA(Q|L)8 x x)) -(MUL(Q|L)const [21] x) -> (LEA(Q|L)4 x (LEA(Q|L)4 x x)) -(MUL(Q|L)const [25] x) -> (LEA(Q|L)8 x (LEA(Q|L)2 x x)) -(MUL(Q|L)const [27] x) -> (LEA(Q|L)8 (LEA(Q|L)2 x x) (LEA(Q|L)2 x x)) -(MUL(Q|L)const [37] x) -> (LEA(Q|L)4 x (LEA(Q|L)8 x x)) -(MUL(Q|L)const [41] x) -> (LEA(Q|L)8 x (LEA(Q|L)4 x x)) -(MUL(Q|L)const [45] x) -> (LEA(Q|L)8 (LEA(Q|L)4 x x) (LEA(Q|L)4 x x)) -(MUL(Q|L)const [73] x) -> (LEA(Q|L)8 x (LEA(Q|L)8 x x)) -(MUL(Q|L)const [81] x) -> (LEA(Q|L)8 (LEA(Q|L)8 x x) (LEA(Q|L)8 x x)) - -(MUL(Q|L)const [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUB(Q|L) (SHL(Q|L)const [log2(c+1)] x) x) -(MUL(Q|L)const [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEA(Q|L)1 (SHL(Q|L)const [log2(c-1)] x) x) -(MUL(Q|L)const [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEA(Q|L)2 (SHL(Q|L)const [log2(c-2)] x) x) -(MUL(Q|L)const [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEA(Q|L)4 (SHL(Q|L)const [log2(c-4)] x) x) -(MUL(Q|L)const [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEA(Q|L)8 (SHL(Q|L)const [log2(c-8)] x) x) -(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHL(Q|L)const [log2(c/3)] (LEA(Q|L)2 x x)) -(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHL(Q|L)const [log2(c/5)] (LEA(Q|L)4 x x)) -(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHL(Q|L)const [log2(c/9)] (LEA(Q|L)8 x x)) +(MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 x x)) +(MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 x x)) +(MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 x x)) +(MUL(Q|L)const [-1] x) => (NEG(Q|L) x) +(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0]) +(MUL(Q|L)const [ 1] x) => x +(MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x) +(MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x) +(MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 x x)) +(MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x) +(MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 x x)) +(MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 x x)) +(MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 x x)) +(MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 x x)) +(MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 x x)) +(MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 x x) (LEA(Q|L)2 x x)) +(MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 x x)) +(MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 x x)) +(MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 x x) (LEA(Q|L)4 x x)) +(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 x x)) +(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 x x) (LEA(Q|L)8 x x)) + +(MUL(Q|L)const [c] x) && isPowerOfTwo(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const [int8(log2(int64(c)+1))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const [int8(log32(c-1))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const [int8(log32(c-2))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const [int8(log32(c-4))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const [int8(log32(c-8))] x) x) +(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 x x)) +(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 x x)) +(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 x x)) // combine add/shift into LEAQ/LEAL -(ADD(L|Q) x (SHL(L|Q)const [3] y)) -> (LEA(L|Q)8 x y) -(ADD(L|Q) x (SHL(L|Q)const [2] y)) -> (LEA(L|Q)4 x y) -(ADD(L|Q) x (SHL(L|Q)const [1] y)) -> (LEA(L|Q)2 x y) -(ADD(L|Q) x (ADD(L|Q) y y)) -> (LEA(L|Q)2 x y) -(ADD(L|Q) x (ADD(L|Q) x y)) -> (LEA(L|Q)2 y x) +(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y) +(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y) +(ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y) +(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y) +(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x) // combine ADDQ/ADDQconst into LEAQ1/LEAL1 -(ADD(Q|L)const [c] (ADD(Q|L) x y)) -> (LEA(Q|L)1 [c] x y) -(ADD(Q|L) (ADD(Q|L)const [c] x) y) -> (LEA(Q|L)1 [c] x y) -(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) -> (LEA(Q|L)1 [c] x x) +(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y) +(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y) +(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x) // fold ADDQ/ADDL into LEAQ/LEAL -(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(c+d) -> (LEA(Q|L) [c+d] {s} x) -(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(c+d) -> (LEA(Q|L) [c+d] {s} x) -(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB -> (LEA(Q|L)1 [c] {s} x y) -(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEA(Q|L)1 [c] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x) +(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x) +(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y) +(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y) // fold ADDQconst/ADDLconst into LEAQx/LEALx -(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(c+d) -> (LEA(Q|L)1 [c+d] {s} x y) -(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(c+d) -> (LEA(Q|L)2 [c+d] {s} x y) -(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(c+d) -> (LEA(Q|L)4 [c+d] {s} x y) -(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(c+d) -> (LEA(Q|L)8 [c+d] {s} x y) -(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEA(Q|L)1 [c+d] {s} x y) -(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEA(Q|L)2 [c+d] {s} x y) -(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEA(Q|L)2 [c+2*d] {s} x y) -(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEA(Q|L)4 [c+d] {s} x y) -(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEA(Q|L)4 [c+4*d] {s} x y) -(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEA(Q|L)8 [c+d] {s} x y) -(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEA(Q|L)8 [c+8*d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y) +(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y) +(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y) +(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y) +(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y) +(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y) +(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y) +(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y) // fold shifts into LEAQx/LEALx -(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) -> (LEA(Q|L)2 [c] {s} x y) -(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) -> (LEA(Q|L)4 [c] {s} x y) -(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) -> (LEA(Q|L)8 [c] {s} x y) -(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) -> (LEA(Q|L)4 [c] {s} x y) -(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) -> (LEA(Q|L)8 [c] {s} x y) -(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) -> (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y) +(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y) // reverse ordering of compare instruction -(SETL (InvertFlags x)) -> (SETG x) -(SETG (InvertFlags x)) -> (SETL x) -(SETB (InvertFlags x)) -> (SETA x) -(SETA (InvertFlags x)) -> (SETB x) -(SETLE (InvertFlags x)) -> (SETGE x) -(SETGE (InvertFlags x)) -> (SETLE x) -(SETBE (InvertFlags x)) -> (SETAE x) -(SETAE (InvertFlags x)) -> (SETBE x) -(SETEQ (InvertFlags x)) -> (SETEQ x) -(SETNE (InvertFlags x)) -> (SETNE x) - -(SETLstore [off] {sym} ptr (InvertFlags x) mem) -> (SETGstore [off] {sym} ptr x mem) -(SETGstore [off] {sym} ptr (InvertFlags x) mem) -> (SETLstore [off] {sym} ptr x mem) -(SETBstore [off] {sym} ptr (InvertFlags x) mem) -> (SETAstore [off] {sym} ptr x mem) -(SETAstore [off] {sym} ptr (InvertFlags x) mem) -> (SETBstore [off] {sym} ptr x mem) -(SETLEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETGEstore [off] {sym} ptr x mem) -(SETGEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETLEstore [off] {sym} ptr x mem) -(SETBEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETAEstore [off] {sym} ptr x mem) -(SETAEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETBEstore [off] {sym} ptr x mem) -(SETEQstore [off] {sym} ptr (InvertFlags x) mem) -> (SETEQstore [off] {sym} ptr x mem) -(SETNEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETNEstore [off] {sym} ptr x mem) +(SETL (InvertFlags x)) => (SETG x) +(SETG (InvertFlags x)) => (SETL x) +(SETB (InvertFlags x)) => (SETA x) +(SETA (InvertFlags x)) => (SETB x) +(SETLE (InvertFlags x)) => (SETGE x) +(SETGE (InvertFlags x)) => (SETLE x) +(SETBE (InvertFlags x)) => (SETAE x) +(SETAE (InvertFlags x)) => (SETBE x) +(SETEQ (InvertFlags x)) => (SETEQ x) +(SETNE (InvertFlags x)) => (SETNE x) + +(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem) +(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem) +(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem) +(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem) +(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem) +(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem) +(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem) +(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem) +(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem) +(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem) // sign extended loads // Note: The combined instruction must end up in the same block @@ -1029,100 +1036,100 @@ // Make sure we don't combine these ops if the load has another use. // This prevents a single load from being split into multiple loads // which then might return different values. See test/atomicload.go. -(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload [off] {sym} ptr mem) -(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload [off] {sym} ptr mem) -(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload [off] {sym} ptr mem) -(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload [off] {sym} ptr mem) -(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) -(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) -(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) -(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) -(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload [off] {sym} ptr mem) -(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload [off] {sym} ptr mem) -(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload [off] {sym} ptr mem) -(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) -(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) -(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) -(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload [off] {sym} ptr mem) -(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload [off] {sym} ptr mem) -(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload [off] {sym} ptr mem) -(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload [off] {sym} ptr mem) - -(MOVLQZX x) && zeroUpper32Bits(x,3) -> x -(MOVWQZX x) && zeroUpper48Bits(x,3) -> x -(MOVBQZX x) && zeroUpper56Bits(x,3) -> x +(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload [off] {sym} ptr mem) +(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload [off] {sym} ptr mem) +(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload [off] {sym} ptr mem) +(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload [off] {sym} ptr mem) + +(MOVLQZX x) && zeroUpper32Bits(x,3) => x +(MOVWQZX x) && zeroUpper48Bits(x,3) => x +(MOVBQZX x) && zeroUpper56Bits(x,3) => x // replace load from same location as preceding store with zero/sign extension (or copy in case of full width) -(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQZX x) -(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQZX x) -(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQZX x) -(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x -(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQSX x) -(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQSX x) -(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQSX x) +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x) +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x) +(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x) +(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x) +(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x) +(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x) // Fold extensions and ANDs together. -(MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x) -(MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x) -(MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x) -(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x) -(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x) -(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x) +(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x) +(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x) +(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x) +(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x) +(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x) +(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x) // Don't extend before storing -(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem) -(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem) -(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem) +(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem) // fold constants into memory operations // Note that this is not always a good idea because if not all the uses of // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. // Nevertheless, let's do it! -(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> +(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem) -(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> +(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem) -(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) -> +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem) -((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) -> +((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem) -((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) -> +((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem) -(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) -> +(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem) -(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) -> - (CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {sym} base mem) +(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) -((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) -> +((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem) -((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) -> +((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem) -((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) -> - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) -((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) -> - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) -((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) -> +((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {sym} base val mem) -((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) -> +((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {sym} base val mem) // Fold constants into stores. -(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> - (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) -(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) -> - (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) -> - (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) -(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) -> - (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) +(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) => + (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) +(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) // Fold address offsets into constant stores. -(MOV(Q|L|W|B)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> - (MOV(Q|L|W|B)storeconst [ValAndOff(sc).add(off)] {s} ptr mem) +(MOV(Q|L|W|B)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) => + (MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows // what variables are being read/written by the ops. diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index cda9df56f4..89d64052fe 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1245,9 +1245,9 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ADDLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -1261,17 +1261,17 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRLconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 32-c) { continue } v.reset(OpAMD64ROLLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -1286,17 +1286,17 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRWconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { continue } v.reset(OpAMD64ROLWconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -1311,17 +1311,17 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRBconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { continue } v.reset(OpAMD64ROLBconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -1332,7 +1332,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { continue } y := v_1.Args[0] @@ -1347,7 +1347,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { continue } y := v_1.Args[0] @@ -1362,7 +1362,7 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { continue } y := v_1.Args[0] @@ -1420,11 +1420,11 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_0.Op != OpAMD64ADDLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 v.reset(OpAMD64LEAL1) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg2(x, y) return true } @@ -1439,15 +1439,15 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_1.Op != OpAMD64LEAL { continue } - c := v_1.AuxInt - s := v_1.Aux + c := auxIntToInt32(v_1.AuxInt) + s := auxToSym(v_1.Aux) y := v_1.Args[0] if !(x.Op != OpSB && y.Op != OpSB) { continue } v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -1500,131 +1500,131 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { // match: (ADDLconst [c] (ADDL x y)) // result: (LEAL1 [c] x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64ADDL { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64LEAL1) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg2(x, y) return true } // match: (ADDLconst [c] (SHLLconst [1] x)) // result: (LEAL1 [c] x x) for { - c := v.AuxInt - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { break } x := v_0.Args[0] v.reset(OpAMD64LEAL1) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg2(x, x) return true } // match: (ADDLconst [c] (LEAL [d] {s} x)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAL [c+d] {s} x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAL { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAL) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg(x) return true } // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAL1 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAL1 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAL1) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAL2 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAL2 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAL2) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAL4 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAL4 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAL4) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAL8 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAL8 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAL8) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -1685,23 +1685,23 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64ADDLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -1736,24 +1736,24 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ADDLload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ADDLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -1807,24 +1807,24 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ADDLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ADDLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -1858,39 +1858,35 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v_0 := v.Args[0] // match: (ADDQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (ADDQconst [c] x) + // result: (ADDQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpAMD64ADDQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } break } // match: (ADDQ x (MOVLconst [c])) - // cond: is32Bit(c) - // result: (ADDQconst [int64(int32(c))] x) + // result: (ADDQconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt - if !(is32Bit(c)) { - continue - } + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ADDQconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -1904,17 +1900,17 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { if v_0.Op != OpAMD64SHLQconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRQconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 64-c) { continue } v.reset(OpAMD64ROLQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -1925,7 +1921,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { continue } y := v_1.Args[0] @@ -1940,7 +1936,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { continue } y := v_1.Args[0] @@ -1955,7 +1951,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { continue } y := v_1.Args[0] @@ -2013,11 +2009,11 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { if v_0.Op != OpAMD64ADDQconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 v.reset(OpAMD64LEAQ1) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg2(x, y) return true } @@ -2032,15 +2028,15 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { if v_1.Op != OpAMD64LEAQ { continue } - c := v_1.AuxInt - s := v_1.Aux + c := auxIntToInt32(v_1.AuxInt) + s := auxToSym(v_1.Aux) y := v_1.Args[0] if !(x.Op != OpSB && y.Op != OpSB) { continue } v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -2118,131 +2114,131 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { // match: (ADDQconst [c] (ADDQ x y)) // result: (LEAQ1 [c] x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64ADDQ { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpAMD64LEAQ1) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg2(x, y) return true } // match: (ADDQconst [c] (SHLQconst [1] x)) // result: (LEAQ1 [c] x x) for { - c := v.AuxInt - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { break } x := v_0.Args[0] v.reset(OpAMD64LEAQ1) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg2(x, x) return true } // match: (ADDQconst [c] (LEAQ [d] {s} x)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAQ [c+d] {s} x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAQ { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg(x) return true } // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAQ1 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAQ1 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAQ1) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAQ2 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAQ2 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAQ2) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAQ4 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAQ4 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAQ4) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAQ8 [c+d] {s} x y) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64LEAQ8 { break } - d := v_0.AuxInt - s := v_0.Aux + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAQ8) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -2305,23 +2301,23 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64ADDQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -2356,24 +2352,24 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ADDQload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ADDQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -2427,24 +2423,24 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ADDQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ADDQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -2510,24 +2506,24 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ADDSDload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ADDSDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -2613,24 +2609,24 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ADDSSload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ADDSSload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -2695,7 +2691,7 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { } y := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { continue } x := v_1 @@ -2706,20 +2702,20 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { break } // match: (ANDL (MOVLconst [c]) x) - // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 - // result: (BTRLconst [log2uint32(^c)] x) + // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + // result: (BTRLconst [int8(log32(^c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 - if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { + if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) { continue } v.reset(OpAMD64BTRLconst) - v.AuxInt = log2uint32(^c) + v.AuxInt = int8ToAuxInt(int8(log32(^c))) v.AddArg(x) return true } @@ -2733,9 +2729,9 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ANDLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -2781,16 +2777,16 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDLconst [c] x) - // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 - // result: (BTRLconst [log2uint32(^c)] x) + // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + // result: (BTRLconst [int8(log32(^c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { + if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) { break } v.reset(OpAMD64BTRLconst) - v.AuxInt = log2uint32(^c) + v.AuxInt = int8ToAuxInt(int8(log32(^c))) v.AddArg(x) return true } @@ -2825,7 +2821,7 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { // match: (ANDLconst [ 0xFF] x) // result: (MOVBQZX x) for { - if v.AuxInt != 0xFF { + if auxIntToInt32(v.AuxInt) != 0xFF { break } x := v_0 @@ -2836,7 +2832,7 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { // match: (ANDLconst [0xFFFF] x) // result: (MOVWQZX x) for { - if v.AuxInt != 0xFFFF { + if auxIntToInt32(v.AuxInt) != 0xFFFF { break } x := v_0 @@ -2886,23 +2882,23 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64ANDLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -2937,24 +2933,24 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ANDLload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ANDLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -3008,24 +3004,24 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ANDLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ANDLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -3070,7 +3066,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { } y := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { continue } x := v_1 @@ -3082,19 +3078,19 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { } // match: (ANDQ (MOVQconst [c]) x) // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 - // result: (BTRQconst [log2(^c)] x) + // result: (BTRQconst [int8(log2(^c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { continue } v.reset(OpAMD64BTRQconst) - v.AuxInt = log2(^c) + v.AuxInt = int8ToAuxInt(int8(log2(^c))) v.AddArg(x) return true } @@ -3102,19 +3098,19 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { } // match: (ANDQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (ANDQconst [c] x) + // result: (ANDQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpAMD64ANDQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -3160,16 +3156,16 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDQconst [c] x) - // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 - // result: (BTRQconst [log2(^c)] x) + // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128 + // result: (BTRQconst [int8(log32(^c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { + if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) { break } v.reset(OpAMD64BTRQconst) - v.AuxInt = log2(^c) + v.AuxInt = int8ToAuxInt(int8(log32(^c))) v.AddArg(x) return true } @@ -3208,7 +3204,7 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { // match: (ANDQconst [ 0xFF] x) // result: (MOVBQZX x) for { - if v.AuxInt != 0xFF { + if auxIntToInt32(v.AuxInt) != 0xFF { break } x := v_0 @@ -3219,7 +3215,7 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { // match: (ANDQconst [0xFFFF] x) // result: (MOVWQZX x) for { - if v.AuxInt != 0xFFFF { + if auxIntToInt32(v.AuxInt) != 0xFFFF { break } x := v_0 @@ -3227,17 +3223,6 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { v.AddArg(x) return true } - // match: (ANDQconst [0xFFFFFFFF] x) - // result: (MOVLQZX x) - for { - if v.AuxInt != 0xFFFFFFFF { - break - } - x := v_0 - v.reset(OpAMD64MOVLQZX) - v.AddArg(x) - return true - } // match: (ANDQconst [0] _) // result: (MOVQconst [0]) for { @@ -3276,23 +3261,23 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64ANDQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -3327,24 +3312,24 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ANDQload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ANDQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -3398,24 +3383,24 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ANDQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ANDQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -3541,23 +3526,23 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64BTCLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -3590,24 +3575,24 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (BTCLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64BTCLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -3692,23 +3677,23 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64BTCQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -3741,24 +3726,24 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (BTCQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64BTCQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -3793,17 +3778,17 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { // cond: (c+d)<64 // result: (BTQconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64SHRQconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if !((c + d) < 64) { break } v.reset(OpAMD64BTQconst) - v.AuxInt = c + d + v.AuxInt = int8ToAuxInt(c + d) v.AddArg(x) return true } @@ -3811,24 +3796,24 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { // cond: c>d // result: (BTLconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64SHLQconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if !(c > d) { break } v.reset(OpAMD64BTLconst) - v.AuxInt = c - d + v.AuxInt = int8ToAuxInt(c - d) v.AddArg(x) return true } // match: (BTLconst [0] s:(SHRQ x y)) // result: (BTQ y x) for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } s := v_0 @@ -3845,17 +3830,17 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { // cond: (c+d)<32 // result: (BTLconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64SHRLconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if !((c + d) < 32) { break } v.reset(OpAMD64BTLconst) - v.AuxInt = c + d + v.AuxInt = int8ToAuxInt(c + d) v.AddArg(x) return true } @@ -3863,24 +3848,24 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { // cond: c>d // result: (BTLconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64SHLLconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if !(c > d) { break } v.reset(OpAMD64BTLconst) - v.AuxInt = c - d + v.AuxInt = int8ToAuxInt(c - d) v.AddArg(x) return true } // match: (BTLconst [0] s:(SHRL x y)) // result: (BTL y x) for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } s := v_0 @@ -3901,17 +3886,17 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { // cond: (c+d)<64 // result: (BTQconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64SHRQconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if !((c + d) < 64) { break } v.reset(OpAMD64BTQconst) - v.AuxInt = c + d + v.AuxInt = int8ToAuxInt(c + d) v.AddArg(x) return true } @@ -3919,24 +3904,24 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { // cond: c>d // result: (BTQconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64SHLQconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if !(c > d) { break } v.reset(OpAMD64BTQconst) - v.AuxInt = c - d + v.AuxInt = int8ToAuxInt(c - d) v.AddArg(x) return true } // match: (BTQconst [0] s:(SHRQ x y)) // result: (BTQ y x) for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } s := v_0 @@ -3956,26 +3941,26 @@ func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool { // match: (BTRLconst [c] (BTSLconst [c] x)) // result: (BTRLconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTSLconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTRLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } // match: (BTRLconst [c] (BTCLconst [c] x)) // result: (BTRLconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTCLconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTRLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -4025,23 +4010,23 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64BTRLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -4074,24 +4059,24 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (BTRLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64BTRLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -4125,26 +4110,26 @@ func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool { // match: (BTRQconst [c] (BTSQconst [c] x)) // result: (BTRQconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTSQconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTRQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } // match: (BTRQconst [c] (BTCQconst [c] x)) // result: (BTRQconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTCQconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTRQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -4202,23 +4187,23 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64BTRQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -4251,24 +4236,24 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (BTRQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64BTRQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -4302,26 +4287,26 @@ func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool { // match: (BTSLconst [c] (BTRLconst [c] x)) // result: (BTSLconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTRLconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTSLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } // match: (BTSLconst [c] (BTCLconst [c] x)) // result: (BTSLconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTCLconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTSLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -4371,23 +4356,23 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64BTSLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -4420,24 +4405,24 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (BTSLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64BTSLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -4471,26 +4456,26 @@ func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool { // match: (BTSQconst [c] (BTRQconst [c] x)) // result: (BTSQconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTRQconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTSQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } // match: (BTSQconst [c] (BTCQconst [c] x)) // result: (BTSQconst [c] x) for { - c := v.AuxInt - if v_0.Op != OpAMD64BTCQconst || v_0.AuxInt != c { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { break } x := v_0.Args[0] v.reset(OpAMD64BTSQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -4548,23 +4533,23 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64BTSQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -4597,24 +4582,24 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (BTSQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64BTSQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -6741,29 +6726,29 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (CMPB x (MOVLconst [c])) - // result: (CMPBconst x [int64(int8(c))]) + // result: (CMPBconst x [int8(c)]) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64CMPBconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = int8ToAuxInt(int8(c)) v.AddArg(x) return true } // match: (CMPB (MOVLconst [c]) x) - // result: (InvertFlags (CMPBconst x [int64(int8(c))])) + // result: (InvertFlags (CMPBconst x [int8(c)])) for { if v_0.Op != OpAMD64MOVLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v0.AuxInt = int64(int8(c)) + v0.AuxInt = int8ToAuxInt(int8(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -7006,23 +6991,23 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64CMPBconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -7055,24 +7040,24 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (CMPBload [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64CMPBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -7133,9 +7118,9 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64CMPLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -7145,11 +7130,11 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { if v_0.Op != OpAMD64MOVLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = c + v0.AuxInt = int32ToAuxInt(c) v0.AddArg(x) v.AddArg(v0) return true @@ -7407,23 +7392,23 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64CMPLconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -7456,24 +7441,24 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (CMPLload [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64CMPLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -7529,36 +7514,36 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { b := v.Block // match: (CMPQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (CMPQconst x [c]) + // result: (CMPQconst x [int32(c)]) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { break } v.reset(OpAMD64CMPQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (CMPQ (MOVQconst [c]) x) // cond: is32Bit(c) - // result: (InvertFlags (CMPQconst x [c])) + // result: (InvertFlags (CMPQconst x [int32(c)])) for { if v_0.Op != OpAMD64MOVQconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { break } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = c + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -7722,15 +7707,15 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) // result: (FlagLT_ULT) for { - if v.AuxInt != 32 || v_0.Op != OpAMD64NEGQ { + if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ADDQconst || v_0_0.AuxInt != -16 { + if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0.AuxInt != 15 { + if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 { break } v.reset(OpAMD64FlagLT_ULT) @@ -7739,15 +7724,15 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) // result: (FlagLT_ULT) for { - if v.AuxInt != 32 || v_0.Op != OpAMD64NEGQ { + if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ADDQconst || v_0_0.AuxInt != -8 { + if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0.AuxInt != 7 { + if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 { break } v.reset(OpAMD64FlagLT_ULT) @@ -7988,23 +7973,23 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64CMPQconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -8037,24 +8022,24 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (CMPQload [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64CMPQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -8109,29 +8094,29 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVLconst [c])) - // result: (CMPWconst x [int64(int16(c))]) + // result: (CMPWconst x [int16(c)]) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64CMPWconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int16ToAuxInt(int16(c)) v.AddArg(x) return true } // match: (CMPW (MOVLconst [c]) x) - // result: (InvertFlags (CMPWconst x [int64(int16(c))])) + // result: (InvertFlags (CMPWconst x [int16(c)])) for { if v_0.Op != OpAMD64MOVLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v0.AuxInt = int64(int16(c)) + v0.AuxInt = int16ToAuxInt(int16(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -8374,23 +8359,23 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64CMPWconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -8423,24 +8408,24 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (CMPWload [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64CMPWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -8582,24 +8567,24 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (DIVSDload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64DIVSDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -8660,24 +8645,24 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (DIVSSload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64DIVSSload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -8781,22 +8766,22 @@ func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { v_0 := v.Args[0] // match: (LEAL [c] {s} (ADDLconst [d] x)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAL [c+d] {s} x) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAL) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg(x) return true } @@ -8804,8 +8789,8 @@ func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAL1 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDL { break } @@ -8819,8 +8804,8 @@ func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { continue } v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -8832,24 +8817,24 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAL1 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDLconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { continue } v.reset(OpAMD64LEAL1) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -8858,17 +8843,17 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) // result: (LEAL2 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { continue } y := v_1.Args[0] v.reset(OpAMD64LEAL2) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -8877,17 +8862,17 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) // result: (LEAL4 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { continue } y := v_1.Args[0] v.reset(OpAMD64LEAL4) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -8896,17 +8881,17 @@ func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) // result: (LEAL8 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { continue } y := v_1.Args[0] v.reset(OpAMD64LEAL8) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -8918,76 +8903,76 @@ func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAL2 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpAMD64LEAL2) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) - // cond: is32Bit(c+2*d) && y.Op != OpSB + // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB // result: (LEAL2 [c+2*d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64ADDLconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is32Bit(c+2*d) && y.Op != OpSB) { + if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { break } v.reset(OpAMD64LEAL2) - v.AuxInt = c + 2*d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + 2*d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) // result: (LEAL4 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { break } y := v_1.Args[0] v.reset(OpAMD64LEAL4) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) // result: (LEAL8 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { break } y := v_1.Args[0] v.reset(OpAMD64LEAL8) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -8997,60 +8982,60 @@ func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAL4 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpAMD64LEAL4) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) - // cond: is32Bit(c+4*d) && y.Op != OpSB + // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB // result: (LEAL4 [c+4*d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64ADDLconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is32Bit(c+4*d) && y.Op != OpSB) { + if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { break } v.reset(OpAMD64LEAL4) - v.AuxInt = c + 4*d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + 4*d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) // result: (LEAL8 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { break } y := v_1.Args[0] v.reset(OpAMD64LEAL8) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9060,44 +9045,44 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAL8 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpAMD64LEAL8) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) - // cond: is32Bit(c+8*d) && y.Op != OpSB + // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB // result: (LEAL8 [c+8*d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64ADDLconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is32Bit(c+8*d) && y.Op != OpSB) { + if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { break } v.reset(OpAMD64LEAL8) - v.AuxInt = c + 8*d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + 8*d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9106,22 +9091,22 @@ func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { v_0 := v.Args[0] // match: (LEAQ [c] {s} (ADDQconst [d] x)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (LEAQ [c+d] {s} x) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg(x) return true } @@ -9129,8 +9114,8 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAQ1 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQ { break } @@ -9144,8 +9129,8 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { continue } v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9266,24 +9251,24 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAQ1 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { continue } v.reset(OpAMD64LEAQ1) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9292,17 +9277,17 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) // result: (LEAQ2 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { continue } y := v_1.Args[0] v.reset(OpAMD64LEAQ2) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9311,17 +9296,17 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) // result: (LEAQ4 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { continue } y := v_1.Args[0] v.reset(OpAMD64LEAQ4) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9330,17 +9315,17 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) // result: (LEAQ8 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { continue } y := v_1.Args[0] v.reset(OpAMD64LEAQ8) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9451,76 +9436,76 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAQ2 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpAMD64LEAQ2) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) - // cond: is32Bit(c+2*d) && y.Op != OpSB + // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB // result: (LEAQ2 [c+2*d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64ADDQconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is32Bit(c+2*d) && y.Op != OpSB) { + if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { break } v.reset(OpAMD64LEAQ2) - v.AuxInt = c + 2*d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + 2*d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) // result: (LEAQ4 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { break } y := v_1.Args[0] v.reset(OpAMD64LEAQ4) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) // result: (LEAQ8 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { break } y := v_1.Args[0] v.reset(OpAMD64LEAQ8) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9614,60 +9599,60 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAQ4 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpAMD64LEAQ4) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) - // cond: is32Bit(c+4*d) && y.Op != OpSB + // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB // result: (LEAQ4 [c+4*d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64ADDQconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is32Bit(c+4*d) && y.Op != OpSB) { + if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { break } v.reset(OpAMD64LEAQ4) - v.AuxInt = c + 4*d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + 4*d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) // result: (LEAQ8 [c] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { break } y := v_1.Args[0] v.reset(OpAMD64LEAQ8) - v.AuxInt = c - v.Aux = s + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9761,44 +9746,44 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB // result: (LEAQ8 [c+d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] y := v_1 - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { break } v.reset(OpAMD64LEAQ8) - v.AuxInt = c + d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) - // cond: is32Bit(c+8*d) && y.Op != OpSB + // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB // result: (LEAQ8 [c+8*d] {s} x y) for { - c := v.AuxInt - s := v.Aux + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64ADDQconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] - if !(is32Bit(c+8*d) && y.Op != OpSB) { + if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { break } v.reset(OpAMD64LEAQ8) - v.AuxInt = c + 8*d - v.Aux = s + v.AuxInt = int32ToAuxInt(c + 8*d) + v.Aux = symToAux(s) v.AddArg2(x, y) return true } @@ -9877,8 +9862,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { if x.Op != OpAMD64MOVBload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -9887,8 +9872,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -9900,8 +9885,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { if x.Op != OpAMD64MOVWload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -9910,8 +9895,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -9923,8 +9908,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { if x.Op != OpAMD64MOVLload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -9933,8 +9918,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -9946,8 +9931,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { if x.Op != OpAMD64MOVQload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -9956,8 +9941,8 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -9968,13 +9953,13 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { if v_0.Op != OpAMD64ANDLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(c&0x80 == 0) { break } v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0x7f + v.AuxInt = int32ToAuxInt(c & 0x7f) v.AddArg(x) return true } @@ -9998,14 +9983,14 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVBQSX x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVBstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -10050,8 +10035,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if x.Op != OpAMD64MOVBload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -10060,8 +10045,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -10073,8 +10058,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if x.Op != OpAMD64MOVWload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -10083,8 +10068,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -10096,8 +10081,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if x.Op != OpAMD64MOVLload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -10106,8 +10091,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -10119,8 +10104,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if x.Op != OpAMD64MOVQload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -10129,8 +10114,8 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -10151,10 +10136,10 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { if v_0.Op != OpAMD64ANDLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0xff + v.AuxInt = int32ToAuxInt(c & 0xff) v.AddArg(x) return true } @@ -10226,14 +10211,14 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVBQZX x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVBstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -10244,23 +10229,23 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { return true } // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVBload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -10354,8 +10339,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETLstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETL { @@ -10367,8 +10352,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETLstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10376,8 +10361,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETLEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETLE { @@ -10389,8 +10374,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETLEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10398,8 +10383,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETGstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETG { @@ -10411,8 +10396,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETGstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10420,8 +10405,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETGEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETGE { @@ -10433,8 +10418,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETGEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10442,8 +10427,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETEQstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETEQ { @@ -10455,8 +10440,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETEQstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10464,8 +10449,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETNEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETNE { @@ -10477,8 +10462,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETNEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10486,8 +10471,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETB { @@ -10499,8 +10484,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10508,8 +10493,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETBEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETBE { @@ -10521,8 +10506,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETBEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10530,8 +10515,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETAstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETA { @@ -10543,8 +10528,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETAstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -10552,8 +10537,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: y.Uses == 1 // result: (SETAEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SETAE { @@ -10565,16 +10550,16 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVBQSX { break @@ -10582,16 +10567,16 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVBQZX { break @@ -10599,72 +10584,64 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validOff(off)) { - break - } v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = makeValAndOff(int64(int8(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) - // cond: validOff(off) - // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(validOff(off)) { - break - } v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = makeValAndOff(int64(int8(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -11542,23 +11519,23 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -11690,8 +11667,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { if x.Op != OpAMD64MOVLload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -11700,8 +11677,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -11713,8 +11690,8 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { if x.Op != OpAMD64MOVQload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -11723,25 +11700,25 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } // match: (MOVLQSX (ANDLconst [c] x)) - // cond: c & 0x80000000 == 0 + // cond: uint32(c) & 0x80000000 == 0 // result: (ANDLconst [c & 0x7fffffff] x) for { if v_0.Op != OpAMD64ANDLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(c&0x80000000 == 0) { + if !(uint32(c)&0x80000000 == 0) { break } v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0x7fffffff + v.AuxInt = int32ToAuxInt(c & 0x7fffffff) v.AddArg(x) return true } @@ -11787,14 +11764,14 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVLQSX x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -11839,8 +11816,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { if x.Op != OpAMD64MOVLload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -11849,8 +11826,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -11862,8 +11839,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { if x.Op != OpAMD64MOVQload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -11872,8 +11849,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -11894,10 +11871,10 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { if v_0.Op != OpAMD64ANDLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ANDLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -12045,14 +12022,14 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVLQZX x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -12063,23 +12040,23 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { return true } // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVLload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -12189,8 +12166,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) // result: (MOVLstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLQSX { break @@ -12198,16 +12175,16 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVLstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) // result: (MOVLstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLQZX { break @@ -12215,72 +12192,64 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVLstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVLstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + // result: (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validOff(off)) { - break - } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) - // cond: validOff(off) - // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + // result: (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(validOff(off)) { - break - } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -13048,23 +13017,23 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -13193,23 +13162,23 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVOload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVOload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -13245,24 +13214,24 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { config := b.Func.Config typ := &b.Func.Config.Types // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVOstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVOstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -13434,14 +13403,14 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: x for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -13451,23 +13420,23 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { return true } // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVQload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -13573,45 +13542,45 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVQstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) - // cond: validValAndOff(c,off) - // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + // cond: validVal(c) + // result: (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(validValAndOff(c, off)) { + if !(validVal(c)) { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -14229,23 +14198,23 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { b := v.Block config := b.Func.Config // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -14347,23 +14316,23 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVSDload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVSDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -14413,24 +14382,24 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVSDstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVSDstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -14480,23 +14449,23 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVSSload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVSSload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -14546,24 +14515,24 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVSSstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVSSstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -14620,8 +14589,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { if x.Op != OpAMD64MOVWload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -14630,8 +14599,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -14643,8 +14612,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { if x.Op != OpAMD64MOVLload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -14653,8 +14622,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -14666,8 +14635,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { if x.Op != OpAMD64MOVQload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -14676,8 +14645,8 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -14688,13 +14657,13 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { if v_0.Op != OpAMD64ANDLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(c&0x8000 == 0) { break } v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0x7fff + v.AuxInt = int32ToAuxInt(c & 0x7fff) v.AddArg(x) return true } @@ -14729,14 +14698,14 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVWQSX x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVWstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -14781,8 +14750,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { if x.Op != OpAMD64MOVWload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -14791,8 +14760,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -14804,8 +14773,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { if x.Op != OpAMD64MOVLload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -14814,8 +14783,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -14827,8 +14796,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { if x.Op != OpAMD64MOVQload { break } - off := x.AuxInt - sym := x.Aux + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) mem := x.Args[1] ptr := x.Args[0] if !(x.Uses == 1 && clobber(x)) { @@ -14837,8 +14806,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { b = x.Block v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -14859,10 +14828,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { if v_0.Op != OpAMD64ANDLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0xffff + v.AuxInt = int32ToAuxInt(c & 0xffff) v.AddArg(x) return true } @@ -14899,14 +14868,14 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVWQZX x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVWstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) x := v_1.Args[1] ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { @@ -14917,23 +14886,23 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { return true } // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVWload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -15026,8 +14995,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVWQSX { break @@ -15035,16 +15004,16 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVWstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVWQZX { break @@ -15052,72 +15021,64 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVWstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + // result: (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validOff(off)) { - break - } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) - // cond: validOff(off) - // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + // result: (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(validOff(off)) { - break - } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -15454,23 +15415,23 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -15602,9 +15563,9 @@ func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64MULLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -15616,23 +15577,23 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MULLconst [c] (MULLconst [d] x)) - // result: (MULLconst [int64(int32(c * d))] x) + // result: (MULLconst [c * d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MULLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64MULLconst) - v.AuxInt = int64(int32(c * d)) + v.AuxInt = int32ToAuxInt(c * d) v.AddArg(x) return true } // match: (MULLconst [-9] x) // result: (NEGL (LEAL8 x x)) for { - if v.AuxInt != -9 { + if auxIntToInt32(v.AuxInt) != -9 { break } x := v_0 @@ -15645,7 +15606,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [-5] x) // result: (NEGL (LEAL4 x x)) for { - if v.AuxInt != -5 { + if auxIntToInt32(v.AuxInt) != -5 { break } x := v_0 @@ -15658,7 +15619,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [-3] x) // result: (NEGL (LEAL2 x x)) for { - if v.AuxInt != -3 { + if auxIntToInt32(v.AuxInt) != -3 { break } x := v_0 @@ -15671,7 +15632,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [-1] x) // result: (NEGL x) for { - if v.AuxInt != -1 { + if auxIntToInt32(v.AuxInt) != -1 { break } x := v_0 @@ -15682,17 +15643,17 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [ 0] _) // result: (MOVLconst [0]) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (MULLconst [ 1] x) // result: x for { - if v.AuxInt != 1 { + if auxIntToInt32(v.AuxInt) != 1 { break } x := v_0 @@ -15702,7 +15663,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [ 3] x) // result: (LEAL2 x x) for { - if v.AuxInt != 3 { + if auxIntToInt32(v.AuxInt) != 3 { break } x := v_0 @@ -15713,7 +15674,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [ 5] x) // result: (LEAL4 x x) for { - if v.AuxInt != 5 { + if auxIntToInt32(v.AuxInt) != 5 { break } x := v_0 @@ -15724,7 +15685,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [ 7] x) // result: (LEAL2 x (LEAL2 x x)) for { - if v.AuxInt != 7 { + if auxIntToInt32(v.AuxInt) != 7 { break } x := v_0 @@ -15737,7 +15698,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [ 9] x) // result: (LEAL8 x x) for { - if v.AuxInt != 9 { + if auxIntToInt32(v.AuxInt) != 9 { break } x := v_0 @@ -15748,7 +15709,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [11] x) // result: (LEAL2 x (LEAL4 x x)) for { - if v.AuxInt != 11 { + if auxIntToInt32(v.AuxInt) != 11 { break } x := v_0 @@ -15761,7 +15722,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [13] x) // result: (LEAL4 x (LEAL2 x x)) for { - if v.AuxInt != 13 { + if auxIntToInt32(v.AuxInt) != 13 { break } x := v_0 @@ -15774,7 +15735,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [19] x) // result: (LEAL2 x (LEAL8 x x)) for { - if v.AuxInt != 19 { + if auxIntToInt32(v.AuxInt) != 19 { break } x := v_0 @@ -15787,7 +15748,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [21] x) // result: (LEAL4 x (LEAL4 x x)) for { - if v.AuxInt != 21 { + if auxIntToInt32(v.AuxInt) != 21 { break } x := v_0 @@ -15800,7 +15761,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [25] x) // result: (LEAL8 x (LEAL2 x x)) for { - if v.AuxInt != 25 { + if auxIntToInt32(v.AuxInt) != 25 { break } x := v_0 @@ -15813,7 +15774,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [27] x) // result: (LEAL8 (LEAL2 x x) (LEAL2 x x)) for { - if v.AuxInt != 27 { + if auxIntToInt32(v.AuxInt) != 27 { break } x := v_0 @@ -15826,7 +15787,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [37] x) // result: (LEAL4 x (LEAL8 x x)) for { - if v.AuxInt != 37 { + if auxIntToInt32(v.AuxInt) != 37 { break } x := v_0 @@ -15839,7 +15800,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [41] x) // result: (LEAL8 x (LEAL4 x x)) for { - if v.AuxInt != 41 { + if auxIntToInt32(v.AuxInt) != 41 { break } x := v_0 @@ -15852,7 +15813,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [45] x) // result: (LEAL8 (LEAL4 x x) (LEAL4 x x)) for { - if v.AuxInt != 45 { + if auxIntToInt32(v.AuxInt) != 45 { break } x := v_0 @@ -15865,7 +15826,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [73] x) // result: (LEAL8 x (LEAL8 x x)) for { - if v.AuxInt != 73 { + if auxIntToInt32(v.AuxInt) != 73 { break } x := v_0 @@ -15878,7 +15839,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { // match: (MULLconst [81] x) // result: (LEAL8 (LEAL8 x x) (LEAL8 x x)) for { - if v.AuxInt != 81 { + if auxIntToInt32(v.AuxInt) != 81 { break } x := v_0 @@ -15889,128 +15850,128 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { return true } // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c+1) && c >= 15 - // result: (SUBL (SHLLconst [log2(c+1)] x) x) + // cond: isPowerOfTwo(int64(c)+1) && c >= 15 + // result: (SUBL (SHLLconst [int8(log2(int64(c)+1))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c+1) && c >= 15) { + if !(isPowerOfTwo(int64(c)+1) && c >= 15) { break } v.reset(OpAMD64SUBL) v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int8ToAuxInt(int8(log2(int64(c) + 1))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-1) && c >= 17 - // result: (LEAL1 (SHLLconst [log2(c-1)] x) x) + // cond: isPowerOfTwo32(c-1) && c >= 17 + // result: (LEAL1 (SHLLconst [int8(log32(c-1))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-1) && c >= 17) { + if !(isPowerOfTwo32(c-1) && c >= 17) { break } v.reset(OpAMD64LEAL1) v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-2) && c >= 34 - // result: (LEAL2 (SHLLconst [log2(c-2)] x) x) + // cond: isPowerOfTwo32(c-2) && c >= 34 + // result: (LEAL2 (SHLLconst [int8(log32(c-2))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-2) && c >= 34) { + if !(isPowerOfTwo32(c-2) && c >= 34) { break } v.reset(OpAMD64LEAL2) v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v0.AuxInt = log2(c - 2) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-4) && c >= 68 - // result: (LEAL4 (SHLLconst [log2(c-4)] x) x) + // cond: isPowerOfTwo32(c-4) && c >= 68 + // result: (LEAL4 (SHLLconst [int8(log32(c-4))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-4) && c >= 68) { + if !(isPowerOfTwo32(c-4) && c >= 68) { break } v.reset(OpAMD64LEAL4) v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v0.AuxInt = log2(c - 4) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-8) && c >= 136 - // result: (LEAL8 (SHLLconst [log2(c-8)] x) x) + // cond: isPowerOfTwo32(c-8) && c >= 136 + // result: (LEAL8 (SHLLconst [int8(log32(c-8))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-8) && c >= 136) { + if !(isPowerOfTwo32(c-8) && c >= 136) { break } v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v0.AuxInt = log2(c - 8) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULLconst [c] x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SHLLconst [log2(c/3)] (LEAL2 x x)) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (SHLLconst [int8(log32(c/3))] (LEAL2 x x)) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c%3 == 0 && isPowerOfTwo(c/3)) { + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { break } v.reset(OpAMD64SHLLconst) - v.AuxInt = log2(c / 3) + v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) v0.AddArg2(x, x) v.AddArg(v0) return true } // match: (MULLconst [c] x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SHLLconst [log2(c/5)] (LEAL4 x x)) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (SHLLconst [int8(log32(c/5))] (LEAL4 x x)) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c%5 == 0 && isPowerOfTwo(c/5)) { + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { break } v.reset(OpAMD64SHLLconst) - v.AuxInt = log2(c / 5) + v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) v0.AddArg2(x, x) v.AddArg(v0) return true } // match: (MULLconst [c] x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SHLLconst [log2(c/9)] (LEAL8 x x)) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (SHLLconst [int8(log32(c/9))] (LEAL8 x x)) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { break } v.reset(OpAMD64SHLLconst) - v.AuxInt = log2(c / 9) + v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) v0.AddArg2(x, x) v.AddArg(v0) @@ -16035,19 +15996,19 @@ func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { v_0 := v.Args[0] // match: (MULQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (MULQconst [c] x) + // result: (MULQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpAMD64MULQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -16059,27 +16020,27 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MULQconst [c] (MULQconst [d] x)) - // cond: is32Bit(c*d) + // cond: is32Bit(int64(c)*int64(d)) // result: (MULQconst [c * d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MULQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(c * d)) { + if !(is32Bit(int64(c) * int64(d))) { break } v.reset(OpAMD64MULQconst) - v.AuxInt = c * d + v.AuxInt = int32ToAuxInt(c * d) v.AddArg(x) return true } // match: (MULQconst [-9] x) // result: (NEGQ (LEAQ8 x x)) for { - if v.AuxInt != -9 { + if auxIntToInt32(v.AuxInt) != -9 { break } x := v_0 @@ -16092,7 +16053,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [-5] x) // result: (NEGQ (LEAQ4 x x)) for { - if v.AuxInt != -5 { + if auxIntToInt32(v.AuxInt) != -5 { break } x := v_0 @@ -16105,7 +16066,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [-3] x) // result: (NEGQ (LEAQ2 x x)) for { - if v.AuxInt != -3 { + if auxIntToInt32(v.AuxInt) != -3 { break } x := v_0 @@ -16118,7 +16079,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [-1] x) // result: (NEGQ x) for { - if v.AuxInt != -1 { + if auxIntToInt32(v.AuxInt) != -1 { break } x := v_0 @@ -16129,17 +16090,17 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [ 0] _) // result: (MOVQconst [0]) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (MULQconst [ 1] x) // result: x for { - if v.AuxInt != 1 { + if auxIntToInt32(v.AuxInt) != 1 { break } x := v_0 @@ -16149,7 +16110,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [ 3] x) // result: (LEAQ2 x x) for { - if v.AuxInt != 3 { + if auxIntToInt32(v.AuxInt) != 3 { break } x := v_0 @@ -16160,7 +16121,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [ 5] x) // result: (LEAQ4 x x) for { - if v.AuxInt != 5 { + if auxIntToInt32(v.AuxInt) != 5 { break } x := v_0 @@ -16171,7 +16132,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [ 7] x) // result: (LEAQ2 x (LEAQ2 x x)) for { - if v.AuxInt != 7 { + if auxIntToInt32(v.AuxInt) != 7 { break } x := v_0 @@ -16184,7 +16145,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [ 9] x) // result: (LEAQ8 x x) for { - if v.AuxInt != 9 { + if auxIntToInt32(v.AuxInt) != 9 { break } x := v_0 @@ -16195,7 +16156,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [11] x) // result: (LEAQ2 x (LEAQ4 x x)) for { - if v.AuxInt != 11 { + if auxIntToInt32(v.AuxInt) != 11 { break } x := v_0 @@ -16208,7 +16169,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [13] x) // result: (LEAQ4 x (LEAQ2 x x)) for { - if v.AuxInt != 13 { + if auxIntToInt32(v.AuxInt) != 13 { break } x := v_0 @@ -16221,7 +16182,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [19] x) // result: (LEAQ2 x (LEAQ8 x x)) for { - if v.AuxInt != 19 { + if auxIntToInt32(v.AuxInt) != 19 { break } x := v_0 @@ -16234,7 +16195,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [21] x) // result: (LEAQ4 x (LEAQ4 x x)) for { - if v.AuxInt != 21 { + if auxIntToInt32(v.AuxInt) != 21 { break } x := v_0 @@ -16247,7 +16208,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [25] x) // result: (LEAQ8 x (LEAQ2 x x)) for { - if v.AuxInt != 25 { + if auxIntToInt32(v.AuxInt) != 25 { break } x := v_0 @@ -16260,7 +16221,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [27] x) // result: (LEAQ8 (LEAQ2 x x) (LEAQ2 x x)) for { - if v.AuxInt != 27 { + if auxIntToInt32(v.AuxInt) != 27 { break } x := v_0 @@ -16273,7 +16234,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [37] x) // result: (LEAQ4 x (LEAQ8 x x)) for { - if v.AuxInt != 37 { + if auxIntToInt32(v.AuxInt) != 37 { break } x := v_0 @@ -16286,7 +16247,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [41] x) // result: (LEAQ8 x (LEAQ4 x x)) for { - if v.AuxInt != 41 { + if auxIntToInt32(v.AuxInt) != 41 { break } x := v_0 @@ -16299,7 +16260,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [45] x) // result: (LEAQ8 (LEAQ4 x x) (LEAQ4 x x)) for { - if v.AuxInt != 45 { + if auxIntToInt32(v.AuxInt) != 45 { break } x := v_0 @@ -16312,7 +16273,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [73] x) // result: (LEAQ8 x (LEAQ8 x x)) for { - if v.AuxInt != 73 { + if auxIntToInt32(v.AuxInt) != 73 { break } x := v_0 @@ -16325,7 +16286,7 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { // match: (MULQconst [81] x) // result: (LEAQ8 (LEAQ8 x x) (LEAQ8 x x)) for { - if v.AuxInt != 81 { + if auxIntToInt32(v.AuxInt) != 81 { break } x := v_0 @@ -16336,128 +16297,128 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { return true } // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c+1) && c >= 15 - // result: (SUBQ (SHLQconst [log2(c+1)] x) x) + // cond: isPowerOfTwo(int64(c)+1) && c >= 15 + // result: (SUBQ (SHLQconst [int8(log2(int64(c)+1))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c+1) && c >= 15) { + if !(isPowerOfTwo(int64(c)+1) && c >= 15) { break } v.reset(OpAMD64SUBQ) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int8ToAuxInt(int8(log2(int64(c) + 1))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-1) && c >= 17 - // result: (LEAQ1 (SHLQconst [log2(c-1)] x) x) + // cond: isPowerOfTwo32(c-1) && c >= 17 + // result: (LEAQ1 (SHLQconst [int8(log32(c-1))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-1) && c >= 17) { + if !(isPowerOfTwo32(c-1) && c >= 17) { break } v.reset(OpAMD64LEAQ1) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-2) && c >= 34 - // result: (LEAQ2 (SHLQconst [log2(c-2)] x) x) + // cond: isPowerOfTwo32(c-2) && c >= 34 + // result: (LEAQ2 (SHLQconst [int8(log32(c-2))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-2) && c >= 34) { + if !(isPowerOfTwo32(c-2) && c >= 34) { break } v.reset(OpAMD64LEAQ2) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 2) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-4) && c >= 68 - // result: (LEAQ4 (SHLQconst [log2(c-4)] x) x) + // cond: isPowerOfTwo32(c-4) && c >= 68 + // result: (LEAQ4 (SHLQconst [int8(log32(c-4))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-4) && c >= 68) { + if !(isPowerOfTwo32(c-4) && c >= 68) { break } v.reset(OpAMD64LEAQ4) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 4) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-8) && c >= 136 - // result: (LEAQ8 (SHLQconst [log2(c-8)] x) x) + // cond: isPowerOfTwo32(c-8) && c >= 136 + // result: (LEAQ8 (SHLQconst [int8(log32(c-8))] x) x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isPowerOfTwo(c-8) && c >= 136) { + if !(isPowerOfTwo32(c-8) && c >= 136) { break } v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 8) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) v0.AddArg(x) v.AddArg2(v0, x) return true } // match: (MULQconst [c] x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SHLQconst [log2(c/3)] (LEAQ2 x x)) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 x x)) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c%3 == 0 && isPowerOfTwo(c/3)) { + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { break } v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c / 3) + v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) v0.AddArg2(x, x) v.AddArg(v0) return true } // match: (MULQconst [c] x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SHLQconst [log2(c/5)] (LEAQ4 x x)) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 x x)) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c%5 == 0 && isPowerOfTwo(c/5)) { + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { break } v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c / 5) + v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) v0.AddArg2(x, x) v.AddArg(v0) return true } // match: (MULQconst [c] x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SHLQconst [log2(c/9)] (LEAQ8 x x)) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 x x)) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { break } v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c / 9) + v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) v0.AddArg2(x, x) v.AddArg(v0) @@ -16531,24 +16492,24 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MULSDload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MULSDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -16634,24 +16595,24 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MULSSload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MULSSload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -16849,7 +16810,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } y := v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { + if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { continue } x := v_1 @@ -16860,20 +16821,20 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { break } // match: (ORL (MOVLconst [c]) x) - // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSLconst [log2uint32(c)] x) + // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + // result: (BTSLconst [int8(log32(c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { + if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { continue } v.reset(OpAMD64BTSLconst) - v.AuxInt = log2uint32(c) + v.AuxInt = int8ToAuxInt(int8(log32(c))) v.AddArg(x) return true } @@ -16887,9 +16848,9 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ORLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -16903,17 +16864,17 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRLconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 32-c) { continue } v.reset(OpAMD64ROLLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -16928,17 +16889,17 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRWconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { continue } v.reset(OpAMD64ROLWconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -16953,17 +16914,17 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRBconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { continue } v.reset(OpAMD64ROLBconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -16997,7 +16958,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { + if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -17005,11 +16966,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64ROLL) @@ -17047,7 +17008,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { + if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -17055,11 +17016,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64ROLL) @@ -17097,7 +17058,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { + if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -17105,11 +17066,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64RORL) @@ -17147,7 +17108,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { + if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -17155,11 +17116,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64RORL) @@ -17308,7 +17269,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { + if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 { continue } y := v_0_1.Args[0] @@ -17324,11 +17285,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -16 { + if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 { continue } v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { + if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { continue } v.reset(OpAMD64RORW) @@ -17348,7 +17309,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { + if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 { continue } y := v_0_1.Args[0] @@ -17364,11 +17325,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -16 { + if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 { continue } v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { + if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { continue } v.reset(OpAMD64RORW) @@ -17388,7 +17349,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { + if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 { continue } y := v_0_1.Args[0] @@ -17411,15 +17372,15 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -8 { + if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 { continue } v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 8 { + if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -17427,11 +17388,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -8 { + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { continue } v.reset(OpAMD64ROLB) @@ -17452,7 +17413,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { + if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 { continue } y := v_0_1.Args[0] @@ -17475,15 +17436,15 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -8 { + if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 { continue } v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 8 { + if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -17491,11 +17452,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -8 { + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { continue } v.reset(OpAMD64ROLB) @@ -17516,7 +17477,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { + if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 { continue } y := v_0_1.Args[0] @@ -17532,11 +17493,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -8 { + if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 { continue } v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { + if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { continue } v.reset(OpAMD64RORB) @@ -17556,7 +17517,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { + if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 { continue } y := v_0_1.Args[0] @@ -17572,11 +17533,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -8 { + if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 { continue } v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { + if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { continue } v.reset(OpAMD64RORB) @@ -18203,16 +18164,16 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { v_0 := v.Args[0] // match: (ORLconst [c] x) - // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSLconst [log2uint32(c)] x) + // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + // result: (BTSLconst [int8(log32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { + if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { break } v.reset(OpAMD64BTSLconst) - v.AuxInt = log2uint32(c) + v.AuxInt = int8ToAuxInt(int8(log32(c))) v.AddArg(x) return true } @@ -18286,23 +18247,23 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64ORLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -18337,24 +18298,24 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ORLload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ORLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -18408,24 +18369,24 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ORLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ORLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -18468,7 +18429,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } y := v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { continue } x := v_1 @@ -18480,19 +18441,19 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } // match: (ORQ (MOVQconst [c]) x) // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSQconst [log2(c)] x) + // result: (BTSQconst [int8(log2(c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { continue } v.reset(OpAMD64BTSQconst) - v.AuxInt = log2(c) + v.AuxInt = int8ToAuxInt(int8(log2(c))) v.AddArg(x) return true } @@ -18500,19 +18461,19 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } // match: (ORQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (ORQconst [c] x) + // result: (ORQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpAMD64ORQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -18526,9 +18487,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ORQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -18542,17 +18503,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if v_0.Op != OpAMD64SHLQconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRQconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 64-c) { continue } v.reset(OpAMD64ROLQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -18586,7 +18547,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { + if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -18594,11 +18555,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64ROLQ) @@ -18636,7 +18597,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { + if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -18644,11 +18605,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64ROLQ) @@ -18686,7 +18647,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { + if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -18694,11 +18655,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64RORQ) @@ -18736,7 +18697,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { + if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { continue } v_1_1_0_0 := v_1_1_0.Args[0] @@ -18744,11 +18705,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { continue } v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { continue } v.reset(OpAMD64RORQ) @@ -19830,16 +19791,16 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { v_0 := v.Args[0] // match: (ORQconst [c] x) - // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSQconst [log2(c)] x) + // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128 + // result: (BTSQconst [int8(log32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { + if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) { break } v.reset(OpAMD64BTSQconst) - v.AuxInt = log2(c) + v.AuxInt = int8ToAuxInt(int8(log32(c))) v.AddArg(x) return true } @@ -19913,23 +19874,23 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64ORQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -19964,24 +19925,24 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ORQload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ORQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -20035,24 +19996,24 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (ORQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64ORQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -20109,28 +20070,28 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { return true } // match: (ROLB x (MOVQconst [c])) - // result: (ROLBconst [c&7 ] x) + // result: (ROLBconst [int8(c&7) ] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLBconst) - v.AuxInt = c & 7 + v.AuxInt = int8ToAuxInt(int8(c & 7)) v.AddArg(x) return true } // match: (ROLB x (MOVLconst [c])) - // result: (ROLBconst [c&7 ] x) + // result: (ROLBconst [int8(c&7) ] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLBconst) - v.AuxInt = c & 7 + v.AuxInt = int8ToAuxInt(int8(c & 7)) v.AddArg(x) return true } @@ -20141,21 +20102,21 @@ func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { // match: (ROLBconst [c] (ROLBconst [d] x)) // result: (ROLBconst [(c+d)& 7] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64ROLBconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ROLBconst) - v.AuxInt = (c + d) & 7 + v.AuxInt = int8ToAuxInt((c + d) & 7) v.AddArg(x) return true } // match: (ROLBconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -20192,28 +20153,28 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { return true } // match: (ROLL x (MOVQconst [c])) - // result: (ROLLconst [c&31] x) + // result: (ROLLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } // match: (ROLL x (MOVLconst [c])) - // result: (ROLLconst [c&31] x) + // result: (ROLLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -20224,21 +20185,21 @@ func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { // match: (ROLLconst [c] (ROLLconst [d] x)) // result: (ROLLconst [(c+d)&31] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64ROLLconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ROLLconst) - v.AuxInt = (c + d) & 31 + v.AuxInt = int8ToAuxInt((c + d) & 31) v.AddArg(x) return true } // match: (ROLLconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -20275,28 +20236,28 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { return true } // match: (ROLQ x (MOVQconst [c])) - // result: (ROLQconst [c&63] x) + // result: (ROLQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (ROLQ x (MOVLconst [c])) - // result: (ROLQconst [c&63] x) + // result: (ROLQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } @@ -20307,21 +20268,21 @@ func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { // match: (ROLQconst [c] (ROLQconst [d] x)) // result: (ROLQconst [(c+d)&63] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64ROLQconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ROLQconst) - v.AuxInt = (c + d) & 63 + v.AuxInt = int8ToAuxInt((c + d) & 63) v.AddArg(x) return true } // match: (ROLQconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -20358,28 +20319,28 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { return true } // match: (ROLW x (MOVQconst [c])) - // result: (ROLWconst [c&15] x) + // result: (ROLWconst [int8(c&15)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLWconst) - v.AuxInt = c & 15 + v.AuxInt = int8ToAuxInt(int8(c & 15)) v.AddArg(x) return true } // match: (ROLW x (MOVLconst [c])) - // result: (ROLWconst [c&15] x) + // result: (ROLWconst [int8(c&15)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLWconst) - v.AuxInt = c & 15 + v.AuxInt = int8ToAuxInt(int8(c & 15)) v.AddArg(x) return true } @@ -20390,21 +20351,21 @@ func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { // match: (ROLWconst [c] (ROLWconst [d] x)) // result: (ROLWconst [(c+d)&15] x) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64ROLWconst { break } - d := v_0.AuxInt + d := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ROLWconst) - v.AuxInt = (c + d) & 15 + v.AuxInt = int8ToAuxInt((c + d) & 15) v.AddArg(x) return true } // match: (ROLWconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -20441,28 +20402,28 @@ func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { return true } // match: (RORB x (MOVQconst [c])) - // result: (ROLBconst [(-c)&7 ] x) + // result: (ROLBconst [int8((-c)&7) ] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLBconst) - v.AuxInt = (-c) & 7 + v.AuxInt = int8ToAuxInt(int8((-c) & 7)) v.AddArg(x) return true } // match: (RORB x (MOVLconst [c])) - // result: (ROLBconst [(-c)&7 ] x) + // result: (ROLBconst [int8((-c)&7) ] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLBconst) - v.AuxInt = (-c) & 7 + v.AuxInt = int8ToAuxInt(int8((-c) & 7)) v.AddArg(x) return true } @@ -20496,28 +20457,28 @@ func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { return true } // match: (RORL x (MOVQconst [c])) - // result: (ROLLconst [(-c)&31] x) + // result: (ROLLconst [int8((-c)&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLLconst) - v.AuxInt = (-c) & 31 + v.AuxInt = int8ToAuxInt(int8((-c) & 31)) v.AddArg(x) return true } // match: (RORL x (MOVLconst [c])) - // result: (ROLLconst [(-c)&31] x) + // result: (ROLLconst [int8((-c)&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLLconst) - v.AuxInt = (-c) & 31 + v.AuxInt = int8ToAuxInt(int8((-c) & 31)) v.AddArg(x) return true } @@ -20551,28 +20512,28 @@ func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { return true } // match: (RORQ x (MOVQconst [c])) - // result: (ROLQconst [(-c)&63] x) + // result: (ROLQconst [int8((-c)&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLQconst) - v.AuxInt = (-c) & 63 + v.AuxInt = int8ToAuxInt(int8((-c) & 63)) v.AddArg(x) return true } // match: (RORQ x (MOVLconst [c])) - // result: (ROLQconst [(-c)&63] x) + // result: (ROLQconst [int8((-c)&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLQconst) - v.AuxInt = (-c) & 63 + v.AuxInt = int8ToAuxInt(int8((-c) & 63)) v.AddArg(x) return true } @@ -20606,28 +20567,28 @@ func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { return true } // match: (RORW x (MOVQconst [c])) - // result: (ROLWconst [(-c)&15] x) + // result: (ROLWconst [int8((-c)&15)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64ROLWconst) - v.AuxInt = (-c) & 15 + v.AuxInt = int8ToAuxInt(int8((-c) & 15)) v.AddArg(x) return true } // match: (RORW x (MOVLconst [c])) - // result: (ROLWconst [(-c)&15] x) + // result: (ROLWconst [int8((-c)&15)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64ROLWconst) - v.AuxInt = (-c) & 15 + v.AuxInt = int8ToAuxInt(int8((-c) & 15)) v.AddArg(x) return true } @@ -20637,28 +20598,28 @@ func rewriteValueAMD64_OpAMD64SARB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SARB x (MOVQconst [c])) - // result: (SARBconst [min(c&31,7)] x) + // result: (SARBconst [int8(min(int64(c)&31,7))] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SARBconst) - v.AuxInt = min(c&31, 7) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) v.AddArg(x) return true } // match: (SARB x (MOVLconst [c])) - // result: (SARBconst [min(c&31,7)] x) + // result: (SARBconst [int8(min(int64(c)&31,7))] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SARBconst) - v.AuxInt = min(c&31, 7) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) v.AddArg(x) return true } @@ -20669,7 +20630,7 @@ func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { // match: (SARBconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -20695,28 +20656,28 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SARL x (MOVQconst [c])) - // result: (SARLconst [c&31] x) + // result: (SARLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SARLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } // match: (SARL x (MOVLconst [c])) - // result: (SARLconst [c&31] x) + // result: (SARLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SARLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -20728,7 +20689,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1.Op != OpAMD64ADDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 0) { break @@ -20750,7 +20711,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1_0.Op != OpAMD64ADDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 0) { break @@ -20769,7 +20730,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1.Op != OpAMD64ANDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 31) { break @@ -20791,7 +20752,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1_0.Op != OpAMD64ANDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 31) { break @@ -20810,7 +20771,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1.Op != OpAMD64ADDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 0) { break @@ -20832,7 +20793,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1_0.Op != OpAMD64ADDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 0) { break @@ -20851,7 +20812,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1.Op != OpAMD64ANDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 31) { break @@ -20873,7 +20834,7 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { if v_1_0.Op != OpAMD64ANDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 31) { break @@ -20891,7 +20852,7 @@ func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { // match: (SARLconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -20917,28 +20878,28 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SARQ x (MOVQconst [c])) - // result: (SARQconst [c&63] x) + // result: (SARQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SARQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SARQ x (MOVLconst [c])) - // result: (SARQconst [c&63] x) + // result: (SARQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SARQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } @@ -20950,7 +20911,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1.Op != OpAMD64ADDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 0) { break @@ -20972,7 +20933,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1_0.Op != OpAMD64ADDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 0) { break @@ -20991,7 +20952,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1.Op != OpAMD64ANDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 63) { break @@ -21013,7 +20974,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1_0.Op != OpAMD64ANDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 63) { break @@ -21032,7 +20993,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1.Op != OpAMD64ADDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 0) { break @@ -21054,7 +21015,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1_0.Op != OpAMD64ADDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 0) { break @@ -21073,7 +21034,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1.Op != OpAMD64ANDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 63) { break @@ -21095,7 +21056,7 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { if v_1_0.Op != OpAMD64ANDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 63) { break @@ -21113,7 +21074,7 @@ func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { // match: (SARQconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -21138,28 +21099,28 @@ func rewriteValueAMD64_OpAMD64SARW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SARW x (MOVQconst [c])) - // result: (SARWconst [min(c&31,15)] x) + // result: (SARWconst [int8(min(int64(c)&31,15))] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SARWconst) - v.AuxInt = min(c&31, 15) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) v.AddArg(x) return true } // match: (SARW x (MOVLconst [c])) - // result: (SARWconst [min(c&31,15)] x) + // result: (SARWconst [int8(min(int64(c)&31,15))] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SARWconst) - v.AuxInt = min(c&31, 15) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) v.AddArg(x) return true } @@ -21170,7 +21131,7 @@ func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { // match: (SARWconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -21421,7 +21382,7 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { v_0 := v.Args[0] // match: (SETAE (TESTQ x x)) - // result: (ConstBool [1]) + // result: (ConstBool [true]) for { if v_0.Op != OpAMD64TESTQ { break @@ -21431,11 +21392,11 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = boolToAuxInt(true) return true } // match: (SETAE (TESTL x x)) - // result: (ConstBool [1]) + // result: (ConstBool [true]) for { if v_0.Op != OpAMD64TESTL { break @@ -21445,11 +21406,11 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = boolToAuxInt(true) return true } // match: (SETAE (TESTW x x)) - // result: (ConstBool [1]) + // result: (ConstBool [true]) for { if v_0.Op != OpAMD64TESTW { break @@ -21459,11 +21420,11 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = boolToAuxInt(true) return true } // match: (SETAE (TESTB x x)) - // result: (ConstBool [1]) + // result: (ConstBool [true]) for { if v_0.Op != OpAMD64TESTB { break @@ -21473,7 +21434,7 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = boolToAuxInt(true) return true } // match: (SETAE (InvertFlags x)) @@ -21548,8 +21509,8 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETBEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -21557,30 +21518,30 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETBEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETAEstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETAEstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -21708,8 +21669,8 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -21717,30 +21678,30 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETAstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETAstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -21862,7 +21823,7 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { v_0 := v.Args[0] // match: (SETB (TESTQ x x)) - // result: (ConstBool [0]) + // result: (ConstBool [false]) for { if v_0.Op != OpAMD64TESTQ { break @@ -21872,11 +21833,11 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 0 + v.AuxInt = boolToAuxInt(false) return true } // match: (SETB (TESTL x x)) - // result: (ConstBool [0]) + // result: (ConstBool [false]) for { if v_0.Op != OpAMD64TESTL { break @@ -21886,11 +21847,11 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 0 + v.AuxInt = boolToAuxInt(false) return true } // match: (SETB (TESTW x x)) - // result: (ConstBool [0]) + // result: (ConstBool [false]) for { if v_0.Op != OpAMD64TESTW { break @@ -21900,11 +21861,11 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 0 + v.AuxInt = boolToAuxInt(false) return true } // match: (SETB (TESTB x x)) - // result: (ConstBool [0]) + // result: (ConstBool [false]) for { if v_0.Op != OpAMD64TESTB { break @@ -21914,7 +21875,7 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpConstBool) - v.AuxInt = 0 + v.AuxInt = boolToAuxInt(false) return true } // match: (SETB (BTLconst [0] x)) @@ -22078,8 +22039,8 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETAEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -22087,30 +22048,30 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETBEstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETBEstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -22238,8 +22199,8 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETAstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -22247,30 +22208,30 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETAstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETBstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETBstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -22407,7 +22368,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -22434,7 +22395,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -22447,46 +22408,46 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } // match: (SETEQ (TESTLconst [c] x)) - // cond: isUint32PowerOfTwo(c) - // result: (SETAE (BTLconst [log2uint32(c)] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETAE (BTLconst [int8(log32(c))] x)) for { if v_0.Op != OpAMD64TESTLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint32PowerOfTwo(c)) { + if !(isUint32PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = log2uint32(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg(v0) return true } // match: (SETEQ (TESTQconst [c] x)) - // cond: isUint64PowerOfTwo(c) - // result: (SETAE (BTQconst [log2(c)] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETAE (BTQconst [int8(log32(c))] x)) for { if v_0.Op != OpAMD64TESTQconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint64PowerOfTwo(c)) { + if !(isUint64PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg(v0) return true } // match: (SETEQ (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) - // result: (SETAE (BTQconst [log2(c)] x)) + // result: (SETAE (BTQconst [int8(log2(c))] x)) for { if v_0.Op != OpAMD64TESTQ { break @@ -22498,14 +22459,14 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { if v_0_0.Op != OpAMD64MOVQconst { continue } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log2(c))) v0.AddArg(x) v.AddArg(v0) return true @@ -22515,16 +22476,16 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) // result: (SETNE (CMPLconst [0] s)) for { - if v_0.Op != OpAMD64CMPLconst || v_0.AuxInt != 1 { + if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { break } s := v_0.Args[0] - if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { break } v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg(v0) return true @@ -22532,16 +22493,16 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) // result: (SETNE (CMPQconst [0] s)) for { - if v_0.Op != OpAMD64CMPQconst || v_0.AuxInt != 1 { + if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { break } s := v_0.Args[0] - if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { break } v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg(v0) return true @@ -22558,11 +22519,11 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -22572,7 +22533,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg(v0) return true @@ -22591,11 +22552,11 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -22605,7 +22566,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg(v0) return true @@ -22624,11 +22585,11 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -22638,7 +22599,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg(v0) return true @@ -22657,11 +22618,11 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -22671,7 +22632,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg(v0) return true @@ -22690,7 +22651,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } x := z1.Args[0] @@ -22700,7 +22661,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg(v0) return true @@ -22719,7 +22680,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } x := z1.Args[0] @@ -22729,7 +22690,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg(v0) return true @@ -22808,8 +22769,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -22823,14 +22784,14 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { } x := v_1_0.Args[1] v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { + if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { continue } y := v_1_1 mem := v_2 v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg3(ptr, v0, mem) @@ -22841,8 +22802,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -22856,14 +22817,14 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { } x := v_1_0.Args[1] v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { + if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { continue } y := v_1_1 mem := v_2 v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg3(ptr, v0, mem) @@ -22872,61 +22833,61 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { break } // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) - // cond: isUint32PowerOfTwo(c) - // result: (SETAEstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUint32PowerOfTwo(c)) { + if !(isUint32PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = log2uint32(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) - // cond: isUint64PowerOfTwo(c) - // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUint64PowerOfTwo(c)) { + if !(isUint64PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) // cond: isUint64PowerOfTwo(c) - // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) + // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log2(c))] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -22938,17 +22899,17 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { if v_1_0.Op != OpAMD64MOVQconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 mem := v_2 if !(isUint64PowerOfTwo(c)) { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log2(c))) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -22958,22 +22919,22 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64CMPLconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { break } s := v_1.Args[0] - if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { break } mem := v_2 v.reset(OpAMD64SETNEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg3(ptr, v0, mem) return true @@ -22981,22 +22942,22 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64CMPQconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { break } s := v_1.Args[0] - if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { break } mem := v_2 v.reset(OpAMD64SETNEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg3(ptr, v0, mem) return true @@ -23005,8 +22966,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -23016,11 +22977,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -23030,10 +22991,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -23044,8 +23005,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -23055,11 +23016,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -23069,10 +23030,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -23083,8 +23044,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -23094,11 +23055,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -23108,10 +23069,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -23122,8 +23083,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -23133,11 +23094,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -23147,10 +23108,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -23161,8 +23122,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -23172,7 +23133,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } x := z1.Args[0] @@ -23182,10 +23143,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -23196,8 +23157,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -23207,7 +23168,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } x := z1.Args[0] @@ -23217,10 +23178,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { continue } v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -23230,8 +23191,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETEQstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -23239,30 +23200,30 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETEQstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETEQstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETEQstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -23520,8 +23481,8 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETLEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -23529,30 +23490,30 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETLEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETGEstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETGEstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -23680,8 +23641,8 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETLstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -23689,30 +23650,30 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETLstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETGstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETGstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -23970,8 +23931,8 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETGEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -23979,30 +23940,30 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETGEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETLEstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETLEstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -24130,8 +24091,8 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETGstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -24139,30 +24100,30 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETGstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETLstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETLstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -24323,7 +24284,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -24350,7 +24311,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -24363,46 +24324,46 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } // match: (SETNE (TESTLconst [c] x)) - // cond: isUint32PowerOfTwo(c) - // result: (SETB (BTLconst [log2uint32(c)] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETB (BTLconst [int8(log32(c))] x)) for { if v_0.Op != OpAMD64TESTLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint32PowerOfTwo(c)) { + if !(isUint32PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = log2uint32(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg(v0) return true } // match: (SETNE (TESTQconst [c] x)) - // cond: isUint64PowerOfTwo(c) - // result: (SETB (BTQconst [log2(c)] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETB (BTQconst [int8(log32(c))] x)) for { if v_0.Op != OpAMD64TESTQconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint64PowerOfTwo(c)) { + if !(isUint64PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg(v0) return true } // match: (SETNE (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) - // result: (SETB (BTQconst [log2(c)] x)) + // result: (SETB (BTQconst [int8(log2(c))] x)) for { if v_0.Op != OpAMD64TESTQ { break @@ -24414,14 +24375,14 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { if v_0_0.Op != OpAMD64MOVQconst { continue } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log2(c))) v0.AddArg(x) v.AddArg(v0) return true @@ -24431,16 +24392,16 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) // result: (SETEQ (CMPLconst [0] s)) for { - if v_0.Op != OpAMD64CMPLconst || v_0.AuxInt != 1 { + if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { break } s := v_0.Args[0] - if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { break } v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg(v0) return true @@ -24448,16 +24409,16 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) // result: (SETEQ (CMPQconst [0] s)) for { - if v_0.Op != OpAMD64CMPQconst || v_0.AuxInt != 1 { + if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { break } s := v_0.Args[0] - if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { break } v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg(v0) return true @@ -24474,11 +24435,11 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -24488,7 +24449,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg(v0) return true @@ -24507,11 +24468,11 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -24521,7 +24482,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg(v0) return true @@ -24540,11 +24501,11 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -24554,7 +24515,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg(v0) return true @@ -24573,11 +24534,11 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -24587,7 +24548,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg(v0) return true @@ -24606,7 +24567,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } x := z1.Args[0] @@ -24616,7 +24577,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg(v0) return true @@ -24635,7 +24596,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } x := z1.Args[0] @@ -24645,7 +24606,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg(v0) return true @@ -24724,8 +24685,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) // result: (SETBstore [off] {sym} ptr (BTL x y) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -24739,14 +24700,14 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { } x := v_1_0.Args[1] v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { + if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { continue } y := v_1_1 mem := v_2 v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg3(ptr, v0, mem) @@ -24757,8 +24718,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -24772,14 +24733,14 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { } x := v_1_0.Args[1] v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { + if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { continue } y := v_1_1 mem := v_2 v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg3(ptr, v0, mem) @@ -24788,61 +24749,61 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { break } // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) - // cond: isUint32PowerOfTwo(c) - // result: (SETBstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUint32PowerOfTwo(c)) { + if !(isUint32PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = log2uint32(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) - // cond: isUint64PowerOfTwo(c) - // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUint64PowerOfTwo(c)) { + if !(isUint64PowerOfTwo(int64(c))) { break } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) // cond: isUint64PowerOfTwo(c) - // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) + // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log2(c))] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -24854,17 +24815,17 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { if v_1_0.Op != OpAMD64MOVQconst { continue } - c := v_1_0.AuxInt + c := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 mem := v_2 if !(isUint64PowerOfTwo(c)) { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log2(c))) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -24874,22 +24835,22 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64CMPLconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { break } s := v_1.Args[0] - if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { break } mem := v_2 v.reset(OpAMD64SETEQstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg3(ptr, v0, mem) return true @@ -24897,22 +24858,22 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64CMPQconst || v_1.AuxInt != 1 { + if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { break } s := v_1.Args[0] - if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { break } mem := v_2 v.reset(OpAMD64SETEQstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(s) v.AddArg3(ptr, v0, mem) return true @@ -24921,8 +24882,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -24932,11 +24893,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -24946,10 +24907,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -24960,8 +24921,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -24971,11 +24932,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -24985,10 +24946,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -24999,8 +24960,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -25010,11 +24971,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -25024,10 +24985,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -25038,8 +24999,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -25049,11 +25010,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -25063,10 +25024,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -25077,8 +25038,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTQ { break @@ -25088,7 +25049,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } x := z1.Args[0] @@ -25098,10 +25059,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -25112,8 +25073,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64TESTL { break @@ -25123,7 +25084,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { v_1_1 := v_1.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { z1 := v_1_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } x := z1.Args[0] @@ -25133,10 +25094,10 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { continue } v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) v.AddArg3(ptr, v0, mem) return true @@ -25146,8 +25107,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETNEstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break @@ -25155,30 +25116,30 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpAMD64SETNEstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SETNEstore [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SETNEstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -25302,28 +25263,28 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SHLL x (MOVQconst [c])) - // result: (SHLLconst [c&31] x) + // result: (SHLLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } // match: (SHLL x (MOVLconst [c])) - // result: (SHLLconst [c&31] x) + // result: (SHLLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -25335,7 +25296,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1.Op != OpAMD64ADDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 0) { break @@ -25357,7 +25318,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1_0.Op != OpAMD64ADDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 0) { break @@ -25376,7 +25337,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1.Op != OpAMD64ANDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 31) { break @@ -25398,7 +25359,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1_0.Op != OpAMD64ANDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 31) { break @@ -25417,7 +25378,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1.Op != OpAMD64ADDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 0) { break @@ -25439,7 +25400,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1_0.Op != OpAMD64ADDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 0) { break @@ -25458,7 +25419,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1.Op != OpAMD64ANDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 31) { break @@ -25480,7 +25441,7 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { if v_1_0.Op != OpAMD64ANDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 31) { break @@ -25498,19 +25459,19 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { // match: (SHLLconst [1] (SHRLconst [1] x)) // result: (BTRLconst [0] x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SHRLconst || v_0.AuxInt != 1 { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 { break } x := v_0.Args[0] v.reset(OpAMD64BTRLconst) - v.AuxInt = 0 + v.AuxInt = int8ToAuxInt(0) v.AddArg(x) return true } // match: (SHLLconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -25536,28 +25497,28 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SHLQ x (MOVQconst [c])) - // result: (SHLQconst [c&63] x) + // result: (SHLQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SHLQ x (MOVLconst [c])) - // result: (SHLQconst [c&63] x) + // result: (SHLQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } @@ -25569,7 +25530,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1.Op != OpAMD64ADDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 0) { break @@ -25591,7 +25552,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1_0.Op != OpAMD64ADDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 0) { break @@ -25610,7 +25571,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1.Op != OpAMD64ANDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 63) { break @@ -25632,7 +25593,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1_0.Op != OpAMD64ANDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 63) { break @@ -25651,7 +25612,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1.Op != OpAMD64ADDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 0) { break @@ -25673,7 +25634,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1_0.Op != OpAMD64ADDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 0) { break @@ -25692,7 +25653,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1.Op != OpAMD64ANDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 63) { break @@ -25714,7 +25675,7 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { if v_1_0.Op != OpAMD64ANDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 63) { break @@ -25732,19 +25693,19 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { // match: (SHLQconst [1] (SHRQconst [1] x)) // result: (BTRQconst [0] x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SHRQconst || v_0.AuxInt != 1 { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 { break } x := v_0.Args[0] v.reset(OpAMD64BTRQconst) - v.AuxInt = 0 + v.AuxInt = int8ToAuxInt(0) v.AddArg(x) return true } // match: (SHLQconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -25782,35 +25743,35 @@ func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { v_0 := v.Args[0] // match: (SHRB x (MOVQconst [c])) // cond: c&31 < 8 - // result: (SHRBconst [c&31] x) + // result: (SHRBconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c&31 < 8) { break } v.reset(OpAMD64SHRBconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } // match: (SHRB x (MOVLconst [c])) // cond: c&31 < 8 - // result: (SHRBconst [c&31] x) + // result: (SHRBconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) if !(c&31 < 8) { break } v.reset(OpAMD64SHRBconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -25821,12 +25782,12 @@ func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c&31 >= 8) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SHRB _ (MOVLconst [c])) @@ -25836,12 +25797,12 @@ func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) if !(c&31 >= 8) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -25851,7 +25812,7 @@ func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { // match: (SHRBconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -25865,28 +25826,28 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SHRL x (MOVQconst [c])) - // result: (SHRLconst [c&31] x) + // result: (SHRLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SHRLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } // match: (SHRL x (MOVLconst [c])) - // result: (SHRLconst [c&31] x) + // result: (SHRLconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SHRLconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -25898,7 +25859,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1.Op != OpAMD64ADDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 0) { break @@ -25920,7 +25881,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1_0.Op != OpAMD64ADDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 0) { break @@ -25939,7 +25900,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1.Op != OpAMD64ANDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 31) { break @@ -25961,7 +25922,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1_0.Op != OpAMD64ANDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 31) { break @@ -25980,7 +25941,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1.Op != OpAMD64ADDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 0) { break @@ -26002,7 +25963,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1_0.Op != OpAMD64ADDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 0) { break @@ -26021,7 +25982,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1.Op != OpAMD64ANDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&31 == 31) { break @@ -26043,7 +26004,7 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { if v_1_0.Op != OpAMD64ANDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&31 == 31) { break @@ -26061,19 +26022,19 @@ func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { // match: (SHRLconst [1] (SHLLconst [1] x)) // result: (BTRLconst [31] x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { break } x := v_0.Args[0] v.reset(OpAMD64BTRLconst) - v.AuxInt = 31 + v.AuxInt = int8ToAuxInt(31) v.AddArg(x) return true } // match: (SHRLconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -26087,28 +26048,28 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SHRQ x (MOVQconst [c])) - // result: (SHRQconst [c&63] x) + // result: (SHRQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } // match: (SHRQ x (MOVLconst [c])) - // result: (SHRQconst [c&63] x) + // result: (SHRQconst [int8(c&63)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 + v.AuxInt = int8ToAuxInt(int8(c & 63)) v.AddArg(x) return true } @@ -26120,7 +26081,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1.Op != OpAMD64ADDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 0) { break @@ -26142,7 +26103,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1_0.Op != OpAMD64ADDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 0) { break @@ -26161,7 +26122,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1.Op != OpAMD64ANDQconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 63) { break @@ -26183,7 +26144,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1_0.Op != OpAMD64ANDQconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 63) { break @@ -26202,7 +26163,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1.Op != OpAMD64ADDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 0) { break @@ -26224,7 +26185,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1_0.Op != OpAMD64ADDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 0) { break @@ -26243,7 +26204,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1.Op != OpAMD64ANDLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) y := v_1.Args[0] if !(c&63 == 63) { break @@ -26265,7 +26226,7 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { if v_1_0.Op != OpAMD64ANDLconst { break } - c := v_1_0.AuxInt + c := auxIntToInt32(v_1_0.AuxInt) y := v_1_0.Args[0] if !(c&63 == 63) { break @@ -26283,19 +26244,19 @@ func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { // match: (SHRQconst [1] (SHLQconst [1] x)) // result: (BTRQconst [63] x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { break } x := v_0.Args[0] v.reset(OpAMD64BTRQconst) - v.AuxInt = 63 + v.AuxInt = int8ToAuxInt(63) v.AddArg(x) return true } // match: (SHRQconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -26309,35 +26270,35 @@ func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { v_0 := v.Args[0] // match: (SHRW x (MOVQconst [c])) // cond: c&31 < 16 - // result: (SHRWconst [c&31] x) + // result: (SHRWconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c&31 < 16) { break } v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } // match: (SHRW x (MOVLconst [c])) // cond: c&31 < 16 - // result: (SHRWconst [c&31] x) + // result: (SHRWconst [int8(c&31)] x) for { x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) if !(c&31 < 16) { break } v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 + v.AuxInt = int8ToAuxInt(int8(c & 31)) v.AddArg(x) return true } @@ -26348,12 +26309,12 @@ func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c&31 >= 16) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SHRW _ (MOVLconst [c])) @@ -26363,12 +26324,12 @@ func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) if !(c&31 >= 16) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -26378,7 +26339,7 @@ func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { // match: (SHRWconst x [0]) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -26398,9 +26359,9 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64SUBLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -26410,11 +26371,11 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { if v_0.Op != OpAMD64MOVLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) - v0.AuxInt = c + v0.AuxInt = int32ToAuxInt(c) v0.AddArg(x) v.AddArg(v0) return true @@ -26486,24 +26447,24 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SUBLload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SUBLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -26557,24 +26518,24 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SUBLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SUBLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -26609,36 +26570,36 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { b := v.Block // match: (SUBQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (SUBQconst x [c]) + // result: (SUBQconst x [int32(c)]) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { break } v.reset(OpAMD64SUBQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (SUBQ (MOVQconst [c]) x) // cond: is32Bit(c) - // result: (NEGQ (SUBQconst x [c])) + // result: (NEGQ (SUBQconst x [int32(c)])) for { if v_0.Op != OpAMD64MOVQconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { break } v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) - v0.AuxInt = c + v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -26765,24 +26726,24 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SUBQload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SUBQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -26836,24 +26797,24 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SUBQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SUBQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -26916,24 +26877,24 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SUBSDload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SUBSDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -27016,24 +26977,24 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (SUBSSload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64SUBSSload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -27594,7 +27555,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { } y := v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { + if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { continue } x := v_1 @@ -27605,20 +27566,20 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { break } // match: (XORL (MOVLconst [c]) x) - // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTCLconst [log2uint32(c)] x) + // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + // result: (BTCLconst [int8(log32(c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { + if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { continue } v.reset(OpAMD64BTCLconst) - v.AuxInt = log2uint32(c) + v.AuxInt = int8ToAuxInt(int8(log32(c))) v.AddArg(x) return true } @@ -27632,9 +27593,9 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_1.Op != OpAMD64MOVLconst { continue } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpAMD64XORLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -27648,17 +27609,17 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRLconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 32-c) { continue } v.reset(OpAMD64ROLLconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -27673,17 +27634,17 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRWconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { continue } v.reset(OpAMD64ROLWconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -27698,17 +27659,17 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_0.Op != OpAMD64SHLLconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRBconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { continue } v.reset(OpAMD64ROLBconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -27755,23 +27716,23 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { v_0 := v.Args[0] // match: (XORLconst [c] x) - // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTCLconst [log2uint32(c)] x) + // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 + // result: (BTCLconst [int8(log32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { + if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { break } v.reset(OpAMD64BTCLconst) - v.AuxInt = log2uint32(c) + v.AuxInt = int8ToAuxInt(int8(log32(c))) v.AddArg(x) return true } // match: (XORLconst [1] (SETNE x)) // result: (SETEQ x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETNE { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { break } x := v_0.Args[0] @@ -27782,7 +27743,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETEQ x)) // result: (SETNE x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETEQ { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { break } x := v_0.Args[0] @@ -27793,7 +27754,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETL x)) // result: (SETGE x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETL { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { break } x := v_0.Args[0] @@ -27804,7 +27765,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETGE x)) // result: (SETL x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETGE { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { break } x := v_0.Args[0] @@ -27815,7 +27776,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETLE x)) // result: (SETG x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETLE { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { break } x := v_0.Args[0] @@ -27826,7 +27787,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETG x)) // result: (SETLE x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETG { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { break } x := v_0.Args[0] @@ -27837,7 +27798,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETB x)) // result: (SETAE x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETB { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { break } x := v_0.Args[0] @@ -27848,7 +27809,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETAE x)) // result: (SETB x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETAE { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { break } x := v_0.Args[0] @@ -27859,7 +27820,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETBE x)) // result: (SETA x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETBE { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { break } x := v_0.Args[0] @@ -27870,7 +27831,7 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [1] (SETA x)) // result: (SETBE x) for { - if v.AuxInt != 1 || v_0.Op != OpAMD64SETA { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { break } x := v_0.Args[0] @@ -27936,23 +27897,23 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64XORLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -27987,24 +27948,24 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XORLload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XORLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -28058,24 +28019,24 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XORLmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XORLmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -28116,7 +28077,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } y := v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { continue } x := v_1 @@ -28128,19 +28089,19 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } // match: (XORQ (MOVQconst [c]) x) // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTCQconst [log2(c)] x) + // result: (BTCQconst [int8(log2(c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { continue } v.reset(OpAMD64BTCQconst) - v.AuxInt = log2(c) + v.AuxInt = int8ToAuxInt(int8(log2(c))) v.AddArg(x) return true } @@ -28148,19 +28109,19 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } // match: (XORQ x (MOVQconst [c])) // cond: is32Bit(c) - // result: (XORQconst [c] x) + // result: (XORQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpAMD64XORQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -28174,17 +28135,17 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { if v_0.Op != OpAMD64SHLQconst { continue } - c := v_0.AuxInt + c := auxIntToInt8(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpAMD64SHRQconst { continue } - d := v_1.AuxInt + d := auxIntToInt8(v_1.AuxInt) if x != v_1.Args[0] || !(d == 64-c) { continue } v.reset(OpAMD64ROLQconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(c) v.AddArg(x) return true } @@ -28231,16 +28192,16 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { v_0 := v.Args[0] // match: (XORQconst [c] x) - // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTCQconst [log2(c)] x) + // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128 + // result: (BTCQconst [int8(log32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { + if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) { break } v.reset(OpAMD64BTCQconst) - v.AuxInt = log2(c) + v.AuxInt = int8ToAuxInt(int8(log32(c))) v.AddArg(x) return true } @@ -28304,23 +28265,23 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) - // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) for { - valoff1 := v.AuxInt - sym := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2)) { + if !(ValAndOff(valoff1).canAdd32(off2)) { break } v.reset(OpAMD64XORQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) v.AddArg2(base, mem) return true } @@ -28355,24 +28316,24 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XORQload [off1+off2] {sym} val base mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XORQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, base, mem) return true } @@ -28426,24 +28387,24 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XORQmodify [off1+off2] {sym} base val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XORQmodify) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(base, val, mem) return true } @@ -30018,12 +29979,12 @@ func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { // match: (HasCPUFeature {s}) // result: (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s}))) for { - s := v.Aux + s := auxToSym(v.Aux) v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = s + v1.Aux = symToAux(s) v0.AddArg(v1) v.AddArg(v0) return true @@ -30489,10 +30450,10 @@ func rewriteValueAMD64_OpLocalAddr(v *Value) bool { // match: (LocalAddr {sym} base _) // result: (LEAQ {sym} base) for { - sym := v.Aux + sym := auxToSym(v.Aux) base := v_0 v.reset(OpAMD64LEAQ) - v.Aux = sym + v.Aux = symToAux(sym) v.AddArg(base) return true } @@ -31982,7 +31943,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -31990,7 +31951,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { break } v.reset(OpAMD64LoweredPanicBoundsA) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -31998,7 +31959,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 1 // result: (LoweredPanicBoundsB [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -32006,7 +31967,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { break } v.reset(OpAMD64LoweredPanicBoundsB) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -32014,7 +31975,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 2 // result: (LoweredPanicBoundsC [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -32022,7 +31983,7 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { break } v.reset(OpAMD64LoweredPanicBoundsC) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -34243,7 +34204,7 @@ func rewriteBlockAMD64(b *Block) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -34267,7 +34228,7 @@ func rewriteBlockAMD64(b *Block) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -34279,40 +34240,40 @@ func rewriteBlockAMD64(b *Block) bool { break } // match: (EQ (TESTLconst [c] x)) - // cond: isUint32PowerOfTwo(c) - // result: (UGE (BTLconst [log2uint32(c)] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (UGE (BTLconst [int8(log32(c))] x)) for b.Controls[0].Op == OpAMD64TESTLconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint32PowerOfTwo(c)) { + if !(isUint32PowerOfTwo(int64(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = log2uint32(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true } // match: (EQ (TESTQconst [c] x)) - // cond: isUint64PowerOfTwo(c) - // result: (UGE (BTQconst [log2(c)] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (UGE (BTQconst [int8(log32(c))] x)) for b.Controls[0].Op == OpAMD64TESTQconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint64PowerOfTwo(c)) { + if !(isUint64PowerOfTwo(int64(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true } // match: (EQ (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) - // result: (UGE (BTQconst [log2(c)] x)) + // result: (UGE (BTQconst [int8(log2(c))] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] @@ -34322,13 +34283,13 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_0.Op != OpAMD64MOVQconst { continue } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log2(c))) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -34345,11 +34306,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -34358,7 +34319,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -34375,11 +34336,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -34388,7 +34349,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -34405,11 +34366,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -34418,7 +34379,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -34435,11 +34396,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -34448,7 +34409,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -34465,7 +34426,7 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } x := z1.Args[0] @@ -34474,7 +34435,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -34491,7 +34452,7 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } x := z1.Args[0] @@ -34500,7 +34461,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) b.resetWithControl(BlockAMD64UGE, v0) return true @@ -35046,7 +35007,7 @@ func rewriteBlockAMD64(b *Block) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -35070,7 +35031,7 @@ func rewriteBlockAMD64(b *Block) bool { } x := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { continue } y := v_0_1 @@ -35082,40 +35043,40 @@ func rewriteBlockAMD64(b *Block) bool { break } // match: (NE (TESTLconst [c] x)) - // cond: isUint32PowerOfTwo(c) - // result: (ULT (BTLconst [log2uint32(c)] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (ULT (BTLconst [int8(log32(c))] x)) for b.Controls[0].Op == OpAMD64TESTLconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint32PowerOfTwo(c)) { + if !(isUint32PowerOfTwo(int64(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = log2uint32(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true } // match: (NE (TESTQconst [c] x)) - // cond: isUint64PowerOfTwo(c) - // result: (ULT (BTQconst [log2(c)] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (ULT (BTQconst [int8(log32(c))] x)) for b.Controls[0].Op == OpAMD64TESTQconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUint64PowerOfTwo(c)) { + if !(isUint64PowerOfTwo(int64(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true } // match: (NE (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) - // result: (ULT (BTQconst [log2(c)] x)) + // result: (ULT (BTQconst [int8(log2(c))] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] @@ -35125,13 +35086,13 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_0.Op != OpAMD64MOVQconst { continue } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) + v0.AuxInt = int8ToAuxInt(int8(log2(c))) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true @@ -35148,11 +35109,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -35161,7 +35122,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true @@ -35178,11 +35139,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -35191,7 +35152,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true @@ -35208,11 +35169,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { continue } x := z1_0.Args[0] @@ -35221,7 +35182,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true @@ -35238,11 +35199,11 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { continue } x := z1_0.Args[0] @@ -35251,7 +35212,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int8ToAuxInt(0) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true @@ -35268,7 +35229,7 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { continue } x := z1.Args[0] @@ -35277,7 +35238,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 + v0.AuxInt = int8ToAuxInt(63) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true @@ -35294,7 +35255,7 @@ func rewriteBlockAMD64(b *Block) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { z1 := v_0_0 - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { continue } x := z1.Args[0] @@ -35303,7 +35264,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 + v0.AuxInt = int8ToAuxInt(31) v0.AddArg(x) b.resetWithControl(BlockAMD64ULT, v0) return true -- cgit v1.2.3-54-g00ecf From e01a226fadcac721e26c12921ca54388c7244d03 Mon Sep 17 00:00:00 2001 From: Victor Michel Date: Sun, 30 Aug 2020 20:43:39 +0000 Subject: debug/elf: support relocations relative to sections with non-zero addresses commit 72ec930fa70c20ce69b21bf32a7916c04c2e9c2f added basic support for relocations, but assumed that the symbol value would be 0, likely because .debug_info always has address == 0 in the ELF section headers. CL 195679 added further support for relocations, but explicitly encoded the original assumption that section addresses would be 0. This change removes that assumption: all relocations will now be properly computed based on the target symbol value even when that symbol is a section with a non-zero address. Typically, sections that are part of a LOAD program segment have non-zero addresses. For example, .debug_ranges relocations could be relative to .text, which usually has an address > 0. Fixes #40879 Change-Id: Ib0a616bb8b05d6c96d179b03ca33a10946fc5d59 GitHub-Last-Rev: 4200de732641995f3a4958a13a5c78f65b7eae50 GitHub-Pull-Request: golang/go#41038 Reviewed-on: https://go-review.googlesource.com/c/go/+/250559 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/debug/elf/file.go | 77 ++++++++------------ src/debug/elf/file_test.go | 81 ++++++++++++++++++++- ...go-relocation-test-gcc930-ranges-no-rela-x86-64 | Bin 0 -> 5696 bytes ...-relocation-test-gcc930-ranges-with-rela-x86-64 | Bin 0 -> 7680 bytes src/debug/elf/testdata/multiple-code-sections.c | 28 +++++++ 5 files changed, 135 insertions(+), 51 deletions(-) create mode 100644 src/debug/elf/testdata/go-relocation-test-gcc930-ranges-no-rela-x86-64 create mode 100644 src/debug/elf/testdata/go-relocation-test-gcc930-ranges-with-rela-x86-64 create mode 100644 src/debug/elf/testdata/multiple-code-sections.c diff --git a/src/debug/elf/file.go b/src/debug/elf/file.go index 1e863ef78e..cd5bf8fab0 100644 --- a/src/debug/elf/file.go +++ b/src/debug/elf/file.go @@ -628,23 +628,14 @@ func (f *File) applyRelocations(dst []byte, rels []byte) error { } } -// relocSymbolTargetOK decides whether we should try to apply a +// canApplyRelocation reports whether we should try to apply a // relocation to a DWARF data section, given a pointer to the symbol -// targeted by the relocation. Most relocations in DWARF data tend to -// be section-relative, but some target non-section symbols (for -// example, low_PC attrs on subprogram or compilation unit DIEs that -// target function symbols), and we need to include these as well. -// Return value is a pair (X,Y) where X is a boolean indicating -// whether the relocation is needed, and Y is the symbol value in the -// case of a non-section relocation that needs to be applied. -func relocSymbolTargetOK(sym *Symbol) (bool, uint64) { - if ST_TYPE(sym.Info) == STT_SECTION { - return true, 0 - } - if sym.Section != SHN_UNDEF && sym.Section < SHN_LORESERVE { - return true, sym.Value - } - return false, 0 +// targeted by the relocation. +// Most relocations in DWARF data tend to be section-relative, but +// some target non-section symbols (for example, low_PC attrs on +// subprogram or compilation unit DIEs that target function symbols). +func canApplyRelocation(sym *Symbol) bool { + return sym.Section != SHN_UNDEF && sym.Section < SHN_LORESERVE } func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { @@ -670,8 +661,7 @@ func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -684,13 +674,13 @@ func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := val + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_X86_64_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -796,8 +786,7 @@ func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -810,13 +799,13 @@ func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := uint64(val) + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_AARCH64_ABS32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -847,8 +836,7 @@ func (f *File) applyRelocationsPPC(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -857,7 +845,7 @@ func (f *File) applyRelocationsPPC(dst []byte, rels []byte) error { if rela.Off+4 >= uint32(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -888,8 +876,7 @@ func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -898,13 +885,13 @@ func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := val + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_PPC64_ADDR32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -980,8 +967,7 @@ func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -990,13 +976,13 @@ func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := val + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_MIPS_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -1027,8 +1013,7 @@ func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -1037,13 +1022,13 @@ func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := val + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_RISCV_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -1074,8 +1059,7 @@ func (f *File) applyRelocationss390x(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -1084,13 +1068,13 @@ func (f *File) applyRelocationss390x(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := val + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_390_32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } @@ -1121,8 +1105,7 @@ func (f *File) applyRelocationsSPARC64(dst []byte, rels []byte) error { continue } sym := &symbols[symNo-1] - needed, val := relocSymbolTargetOK(sym) - if !needed { + if !canApplyRelocation(sym) { continue } @@ -1131,13 +1114,13 @@ func (f *File) applyRelocationsSPARC64(dst []byte, rels []byte) error { if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val64 := val + uint64(rela.Addend) + val64 := sym.Value + uint64(rela.Addend) f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) case R_SPARC_32, R_SPARC_UA32: if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { continue } - val32 := uint32(val) + uint32(rela.Addend) + val32 := uint32(sym.Value) + uint32(rela.Addend) f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) } } diff --git a/src/debug/elf/file_test.go b/src/debug/elf/file_test.go index 4da580da5a..24948e696a 100644 --- a/src/debug/elf/file_test.go +++ b/src/debug/elf/file_test.go @@ -293,6 +293,7 @@ func decompress(gz string) (io.ReaderAt, error) { type relocationTestEntry struct { entryNumber int entry *dwarf.Entry + pcRanges [][2]uint64 } type relocationTest struct { @@ -319,6 +320,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x6}}, }, }, }, @@ -340,6 +342,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x5}}, }, }, }, @@ -361,6 +364,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x6}}, }, }, }, @@ -382,6 +386,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x24}}, }, }, }, @@ -403,6 +408,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x28}}, }, }, }, @@ -421,9 +427,10 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0x0), Class: dwarf.ClassLinePtr}, {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(48), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrHighpc, Val: int64(0x30), Class: dwarf.ClassConstant}, }, }, + pcRanges: [][2]uint64{{0x0, 0x30}}, }, }, }, @@ -445,6 +452,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x44}}, }, }, }, @@ -466,6 +474,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x24}}, }, }, }, @@ -483,10 +492,11 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrHighpc, Val: int64(0x64), Class: dwarf.ClassConstant}, {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x64}}, }, }, }, @@ -504,10 +514,11 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(58), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrHighpc, Val: int64(0x3a), Class: dwarf.ClassConstant}, {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x3a}}, }, }, }, @@ -529,6 +540,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x2c}}, }, }, }, @@ -550,6 +562,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x58}}, }, }, }, @@ -571,6 +584,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x5c}}, }, }, }, @@ -588,10 +602,11 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString}, {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress}, - {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrHighpc, Val: int64(0x64), Class: dwarf.ClassConstant}, {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x64}}, }, }, }, @@ -613,6 +628,7 @@ var relocationTests = []relocationTest{ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, }, }, + pcRanges: [][2]uint64{{0x0, 0x2c}}, }, }, }, @@ -670,6 +686,56 @@ var relocationTests = []relocationTest{ }, }, }, + { + "testdata/go-relocation-test-gcc930-ranges-no-rela-x86-64", + []relocationTestEntry{ + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C17 9.3.0 -mtune=generic -march=x86-64 -g -fno-asynchronous-unwind-tables", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "multiple-code-sections.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrRanges, Val: int64(0), Class: dwarf.ClassRangeListPtr}, + {Attr: dwarf.AttrLowpc, Val: uint64(0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, + }, + pcRanges: [][2]uint64{ + {0x765, 0x777}, + {0x7e1, 0x7ec}, + }, + }, + }, + }, + { + "testdata/go-relocation-test-gcc930-ranges-with-rela-x86-64", + []relocationTestEntry{ + { + entry: &dwarf.Entry{ + Offset: 0xb, + Tag: dwarf.TagCompileUnit, + Children: true, + Field: []dwarf.Field{ + {Attr: dwarf.AttrProducer, Val: "GNU C17 9.3.0 -mtune=generic -march=x86-64 -g -fno-asynchronous-unwind-tables", Class: dwarf.ClassString}, + {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant}, + {Attr: dwarf.AttrName, Val: "multiple-code-sections.c", Class: dwarf.ClassString}, + {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString}, + {Attr: dwarf.AttrRanges, Val: int64(0), Class: dwarf.ClassRangeListPtr}, + {Attr: dwarf.AttrLowpc, Val: uint64(0), Class: dwarf.ClassAddress}, + {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr}, + }, + }, + pcRanges: [][2]uint64{ + {0x765, 0x777}, + {0x7e1, 0x7ec}, + }, + }, + }, + }, } func TestDWARFRelocations(t *testing.T) { @@ -705,6 +771,13 @@ func TestDWARFRelocations(t *testing.T) { if !reflect.DeepEqual(testEntry.entry, entry) { t.Errorf("entry %d mismatch: got:%#v want:%#v", testEntry.entryNumber, entry, testEntry.entry) } + pcRanges, err := dwarf.Ranges(entry) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEntry.pcRanges, pcRanges) { + t.Errorf("entry %d: PC range mismatch: got:%#v want:%#v", testEntry.entryNumber, pcRanges, testEntry.pcRanges) + } } }) } diff --git a/src/debug/elf/testdata/go-relocation-test-gcc930-ranges-no-rela-x86-64 b/src/debug/elf/testdata/go-relocation-test-gcc930-ranges-no-rela-x86-64 new file mode 100644 index 0000000000..c013f3e081 Binary files /dev/null and b/src/debug/elf/testdata/go-relocation-test-gcc930-ranges-no-rela-x86-64 differ diff --git a/src/debug/elf/testdata/go-relocation-test-gcc930-ranges-with-rela-x86-64 b/src/debug/elf/testdata/go-relocation-test-gcc930-ranges-with-rela-x86-64 new file mode 100644 index 0000000000..51e03aa7b0 Binary files /dev/null and b/src/debug/elf/testdata/go-relocation-test-gcc930-ranges-with-rela-x86-64 differ diff --git a/src/debug/elf/testdata/multiple-code-sections.c b/src/debug/elf/testdata/multiple-code-sections.c new file mode 100644 index 0000000000..03b9d53ab9 --- /dev/null +++ b/src/debug/elf/testdata/multiple-code-sections.c @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Build with: +// gcc -g multiple-code-sections.c -Wl,--emit-relocs -Wl,--discard-none -Wl,-zmax-page-size=1 -fno-asynchronous-unwind-tables -o go-relocation-test-gcc930-ranges-with-rela-x86-64 +// gcc -g multiple-code-sections.c -Wl,-zmax-page-size=1 -fno-asynchronous-unwind-tables -o go-relocation-test-gcc930-ranges-no-rela-x86-64 +// Strip with: +// strip --only-keep-debug \ +// --remove-section=.eh_frame \ +// --remove-section=.eh_frame_hdr \ +// --remove-section=.shstrtab \ +// --remove-section=.strtab \ +// --remove-section=.symtab \ +// --remove-section=.note.gnu.build-id \ +// --remove-section=.note.ABI-tag \ +// --remove-section=.dynamic \ +// --remove-section=.gnu.hash \ +// --remove-section=.interp \ +// --remove-section=.rodata +__attribute__((section(".separate_section"))) // To get GCC to emit a DW_AT_ranges attribute for the CU. +int func(void) { + return 0; +} + +int main(int argc, char *argv[]) { + return 0; +} -- cgit v1.2.3-54-g00ecf From 5f5a55679c54784d07643099b55228b6f88c0bdf Mon Sep 17 00:00:00 2001 From: Paul Forgey Date: Tue, 1 Sep 2020 00:38:01 +0000 Subject: net/http: refactor ResponseWriter.ReadFrom to permit splice on Linux Rather than probe and guess if sendfile will work inside ResponseWriter.ReadFrom(src), this change fixes the underlying issue of starting to respond before src is readable We'll no longer send a status OK if a header has not yet been written and reading from src is destined to fail. This small change implicitly takes care of the need for the server to sniff the response body to determine the Content-Type. This allows splice to work on Linux when src is a socket or any non-regular file that's spliceable. The extra read of 512 bytes may raise an objection, and that's fair, but we're already swapping some syscall prep work for another and a read of 512 probably will not impact the overall performance. For shorter bodies, there's likely less setup time. A little initial slop is not too unusual in zero copy network code, and sometimes actually helps. Fixes #40888 Change-Id: I4a8e2ad0ace1318bae66dae5671d06ea6d4838ed GitHub-Last-Rev: 097364ea866613d103a31e2247b44f4a12077f9e GitHub-Pull-Request: golang/go#40903 Reviewed-on: https://go-review.googlesource.com/c/go/+/249238 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/net/http/fs_test.go | 12 +++++++-- src/net/http/server.go | 70 +++++++++++++++++++++++++------------------------ 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go index c082ceee71..245d9ce65c 100644 --- a/src/net/http/fs_test.go +++ b/src/net/http/fs_test.go @@ -1136,6 +1136,14 @@ func TestLinuxSendfile(t *testing.T) { t.Skipf("skipping; failed to run strace: %v", err) } + filename := fmt.Sprintf("1kb-%d", os.Getpid()) + filepath := path.Join(os.TempDir(), filename) + + if err := ioutil.WriteFile(filepath, bytes.Repeat([]byte{'a'}, 1<<10), 0755); err != nil { + t.Fatal(err) + } + defer os.Remove(filepath) + var buf bytes.Buffer child := exec.Command("strace", "-f", "-q", os.Args[0], "-test.run=TestLinuxSendfileChild") child.ExtraFiles = append(child.ExtraFiles, lnf) @@ -1146,7 +1154,7 @@ func TestLinuxSendfile(t *testing.T) { t.Skipf("skipping; failed to start straced child: %v", err) } - res, err := Get(fmt.Sprintf("http://%s/", ln.Addr())) + res, err := Get(fmt.Sprintf("http://%s/%s", ln.Addr(), filename)) if err != nil { t.Fatalf("http client error: %v", err) } @@ -1192,7 +1200,7 @@ func TestLinuxSendfileChild(*testing.T) { panic(err) } mux := NewServeMux() - mux.Handle("/", FileServer(Dir("testdata"))) + mux.Handle("/", FileServer(Dir(os.TempDir()))) mux.HandleFunc("/quit", func(ResponseWriter, *Request) { os.Exit(0) }) diff --git a/src/net/http/server.go b/src/net/http/server.go index ed5de350a9..9124903b89 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -561,51 +561,53 @@ type writerOnly struct { io.Writer } -func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { - switch v := src.(type) { - case *os.File: - fi, err := v.Stat() - if err != nil { - return false, err - } - return fi.Mode().IsRegular(), nil - case *io.LimitedReader: - return srcIsRegularFile(v.R) - default: - return - } -} - // ReadFrom is here to optimize copying from an *os.File regular file -// to a *net.TCPConn with sendfile. +// to a *net.TCPConn with sendfile, or from a supported src type such +// as a *net.TCPConn on Linux with splice. func (w *response) ReadFrom(src io.Reader) (n int64, err error) { + bufp := copyBufPool.Get().(*[]byte) + buf := *bufp + defer copyBufPool.Put(bufp) + // Our underlying w.conn.rwc is usually a *TCPConn (with its - // own ReadFrom method). If not, or if our src isn't a regular - // file, just fall back to the normal copy method. + // own ReadFrom method). If not, just fall back to the normal + // copy method. rf, ok := w.conn.rwc.(io.ReaderFrom) - regFile, err := srcIsRegularFile(src) - if err != nil { - return 0, err - } - if !ok || !regFile { - bufp := copyBufPool.Get().(*[]byte) - defer copyBufPool.Put(bufp) - return io.CopyBuffer(writerOnly{w}, src, *bufp) + if !ok { + return io.CopyBuffer(writerOnly{w}, src, buf) } // sendfile path: - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } + // Do not start actually writing response until src is readable. + // If body length is <= sniffLen, sendfile/splice path will do + // little anyway. This small read also satisfies sniffing the + // body in case Content-Type is missing. + nr, er := src.Read(buf[:sniffLen]) + atEOF := errors.Is(er, io.EOF) + n += int64(nr) - if w.needsSniff() { - n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) - n += n0 - if err != nil { - return n, err + if nr > 0 { + // Write the small amount read normally. + nw, ew := w.Write(buf[:nr]) + if ew != nil { + err = ew + } else if nr != nw { + err = io.ErrShortWrite } } + if err == nil && er != nil && !atEOF { + err = er + } + + // Do not send StatusOK in the error case where nothing has been written. + if err == nil && !w.wroteHeader { + w.WriteHeader(StatusOK) // nr == 0, no error (or EOF) + } + + if err != nil || atEOF { + return n, err + } w.w.Flush() // get rid of any previous writes w.cw.flush() // make sure Header is written; flush data to rwc -- cgit v1.2.3-54-g00ecf From 4d89b3231d590284e35bff647e597e93fdf41dae Mon Sep 17 00:00:00 2001 From: Heisenberg Date: Mon, 31 Aug 2020 19:22:36 +0800 Subject: runtime: remove remnants of signal stack workaround Updates #35979 Change-Id: Ic3a6e1b5e9d544979a3c8d909a36a55efa3b9c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/251757 Reviewed-by: Keith Randall Reviewed-by: Austin Clements --- src/runtime/os_linux.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 9702920bcf..371db73502 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -5,7 +5,6 @@ package runtime import ( - "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -476,21 +475,7 @@ func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32 func getpid() int func tgkill(tgid, tid, sig int) -// touchStackBeforeSignal stores an errno value. If non-zero, it means -// that we should touch the signal stack before sending a signal. -// This is used on systems that have a bug when the signal stack must -// be faulted in. See #35777 and #37436. -// -// This is accessed atomically as it is set and read in different threads. -// -// TODO(austin): Remove this after Go 1.15 when we remove the -// mlockGsignal workaround. -var touchStackBeforeSignal uint32 - // signalM sends a signal to mp. func signalM(mp *m, sig int) { - if atomic.Load(&touchStackBeforeSignal) != 0 { - atomic.Cas((*uint32)(unsafe.Pointer(mp.gsignal.stack.hi-4)), 0, 0) - } tgkill(getpid(), int(mp.procid), sig) } -- cgit v1.2.3-54-g00ecf From d7a6a44deb3a56aa3f94b75f7ab4ffa1a0fa8cef Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Tue, 25 Aug 2020 10:38:50 +0300 Subject: doc/asm: add BP is callee-save paragraph Change-Id: Id38e639c66a42acf0b1c4488cdfd0b7b6cf71c78 Reviewed-on: https://go-review.googlesource.com/c/go/+/250397 Reviewed-by: Keith Randall --- doc/asm.html | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/asm.html b/doc/asm.html index dbbe8f2cd1..cc8598aeff 100644 --- a/doc/asm.html +++ b/doc/asm.html @@ -687,6 +687,13 @@ MOVQ g(CX), AX // Move g into AX. MOVQ g_m(AX), BX // Move g.m into BX. +

+Register BP is callee-save. +The assembler automatically inserts BP save/restore when frame size is larger than zero. +Using BP as a general purpose register is allowed, +however it can interfere with sampling-based profiling. +

+

ARM

-- cgit v1.2.3-54-g00ecf From ab88d97deb216cdd93712dedca3be4d7a561743e Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 31 Aug 2020 16:29:13 -0400 Subject: cmd: update vendored golang.org/x/mod This pulls in golang.org/cl/250920 which rejects Windows shortnames as path components in module.CheckImportPath (as is already done in cmd/go/internal/get's copy of CheckImportPath). This will allow us to replace the copy of CheckImportPath with the original. This also pulls in golang.org/cl/250919 which rejects + in CheckPath and CheckImportPath, and golang.org/cl/235597, which adds methods to the zip package for gorelease, but shouldn't affect cmd. This change also updates the cmd/go test case TestScript/mod_bad_filenames to reflect that golang.org/x/mod/zip error messages now include filenames for bad file names that can't be included in zip archives. Updates #29101 Change-Id: I7f654325dc33b19bc9c6f77a56546747add5a47f Reviewed-on: https://go-review.googlesource.com/c/go/+/251877 Run-TryBot: Michael Matloob TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Bryan C. Mills --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 4 +- src/cmd/go/testdata/script/mod_bad_filenames.txt | 6 +- src/cmd/vendor/golang.org/x/mod/module/module.go | 33 +- src/cmd/vendor/golang.org/x/mod/zip/zip.go | 621 ++++++++++++++++------- src/cmd/vendor/modules.txt | 2 +- 6 files changed, 477 insertions(+), 191 deletions(-) diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 5c5c99e3cd..68ce1705e4 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -7,7 +7,7 @@ require ( github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 // indirect golang.org/x/arch v0.0.0-20200511175325-f7c78586839d golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8 + golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 // indirect diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 69cebe1b23..cb64a5d475 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -14,8 +14,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8 h1:Qbq3laTJZip3mEOreFwHF81RGkkhIvmraRMINHNyWHE= -golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/src/cmd/go/testdata/script/mod_bad_filenames.txt b/src/cmd/go/testdata/script/mod_bad_filenames.txt index 6e0c8bd302..eb556f4c7c 100644 --- a/src/cmd/go/testdata/script/mod_bad_filenames.txt +++ b/src/cmd/go/testdata/script/mod_bad_filenames.txt @@ -3,9 +3,9 @@ env GO111MODULE=on ! go get rsc.io/badfile1 rsc.io/badfile2 rsc.io/badfile3 rsc.io/badfile4 rsc.io/badfile5 ! stderr 'unzip.*badfile1' stderr 'unzip.*badfile2[\\/]@v[\\/]v1.0.0.zip:.*malformed file path "☺.go": invalid char ''☺''' -stderr 'unzip.*badfile3[\\/]@v[\\/]v1.0.0.zip: malformed file path "x\?y.go": invalid char ''\?''' -stderr 'unzip.*badfile4[\\/]@v[\\/]v1.0.0.zip: case-insensitive file name collision: "x/Y.go" and "x/y.go"' -stderr 'unzip.*badfile5[\\/]@v[\\/]v1.0.0.zip: case-insensitive file name collision: "x/y" and "x/Y"' +stderr 'unzip.*badfile3[\\/]@v[\\/]v1.0.0.zip: rsc.io[\\/]badfile3@v1.0.0[\\/]x\?y.go: malformed file path "x\?y.go": invalid char ''\?''' +stderr 'unzip.*badfile4[\\/]@v[\\/]v1.0.0.zip: rsc.io[\\/]badfile4@v1.0.0[\\/]x[\\/]y.go: case-insensitive file name collision: "x/Y.go" and "x/y.go"' +stderr 'unzip.*badfile5[\\/]@v[\\/]v1.0.0.zip: rsc.io[\\/]badfile5@v1.0.0[\\/]x[\\/]Y[\\/]zz[\\/]ww.go: case-insensitive file name collision: "x/y" and "x/Y"' -- go.mod -- module x diff --git a/src/cmd/vendor/golang.org/x/mod/module/module.go b/src/cmd/vendor/golang.org/x/mod/module/module.go index 3a8b080c7b..c1c5263c42 100644 --- a/src/cmd/vendor/golang.org/x/mod/module/module.go +++ b/src/cmd/vendor/golang.org/x/mod/module/module.go @@ -225,13 +225,13 @@ func firstPathOK(r rune) bool { } // pathOK reports whether r can appear in an import path element. -// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. // This matches what "go get" has historically recognized in import paths. // TODO(rsc): We would like to allow Unicode letters, but that requires additional // care in the safe encoding (see "escaped paths" above). func pathOK(r rune) bool { if r < utf8.RuneSelf { - return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + return r == '-' || r == '.' || r == '_' || r == '~' || '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' @@ -314,11 +314,13 @@ func CheckPath(path string) error { // separated by slashes (U+002F). (It must not begin with nor end in a slash.) // // A valid path element is a non-empty string made up of -// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. // It must not begin or end with a dot (U+002E), nor contain two dots in a row. // // The element prefix up to the first dot must not be a reserved file name -// on Windows, regardless of case (CON, com1, NuL, and so on). +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). // // CheckImportPath may be less restrictive in the future, but see the // top-level package documentation for additional information about @@ -403,6 +405,29 @@ func checkElem(elem string, fileName bool) error { return fmt.Errorf("%q disallowed as path element component on Windows", short) } } + + if fileName { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + return nil } diff --git a/src/cmd/vendor/golang.org/x/mod/zip/zip.go b/src/cmd/vendor/golang.org/x/mod/zip/zip.go index 6865895b3d..5b401ad4d8 100644 --- a/src/cmd/vendor/golang.org/x/mod/zip/zip.go +++ b/src/cmd/vendor/golang.org/x/mod/zip/zip.go @@ -48,6 +48,7 @@ package zip import ( "archive/zip" "bytes" + "errors" "fmt" "io" "io/ioutil" @@ -92,40 +93,134 @@ type File interface { Open() (io.ReadCloser, error) } -// Create builds a zip archive for module m from an abstract list of files -// and writes it to w. +// CheckedFiles reports whether a set of files satisfy the name and size +// constraints required by module zip files. The constraints are listed in the +// package documentation. // -// Create verifies the restrictions described in the package documentation -// and should not produce an archive that Unzip cannot extract. Create does not -// include files in the output archive if they don't belong in the module zip. -// In particular, Create will not include files in modules found in -// subdirectories, most files in vendor directories, or irregular files (such -// as symbolic links) in the output archive. -func Create(w io.Writer, m module.Version, files []File) (err error) { - defer func() { - if err != nil { - err = &zipError{verb: "create zip", err: err} - } - }() +// Functions that produce this report may include slightly different sets of +// files. See documentation for CheckFiles, CheckDir, and CheckZip for details. +type CheckedFiles struct { + // Valid is a list of file paths that should be included in a zip file. + Valid []string + + // Omitted is a list of files that are ignored when creating a module zip + // file, along with the reason each file is ignored. + Omitted []FileError + + // Invalid is a list of files that should not be included in a module zip + // file, along with the reason each file is invalid. + Invalid []FileError + + // SizeError is non-nil if the total uncompressed size of the valid files + // exceeds the module zip size limit or if the zip file itself exceeds the + // limit. + SizeError error +} - // Check that the version is canonical, the module path is well-formed, and - // the major version suffix matches the major version. - if vers := module.CanonicalVersion(m.Version); vers != m.Version { - return fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers) +// Err returns an error if CheckedFiles does not describe a valid module zip +// file. SizeError is returned if that field is set. A FileErrorList is returned +// if there are one or more invalid files. Other errors may be returned in the +// future. +func (cf CheckedFiles) Err() error { + if cf.SizeError != nil { + return cf.SizeError } - if err := module.Check(m.Path, m.Version); err != nil { - return err + if len(cf.Invalid) > 0 { + return FileErrorList(cf.Invalid) + } + return nil +} + +type FileErrorList []FileError + +func (el FileErrorList) Error() string { + buf := &strings.Builder{} + sep := "" + for _, e := range el { + buf.WriteString(sep) + buf.WriteString(e.Error()) + sep = "\n" + } + return buf.String() +} + +type FileError struct { + Path string + Err error +} + +func (e FileError) Error() string { + return fmt.Sprintf("%s: %s", e.Path, e.Err) +} + +func (e FileError) Unwrap() error { + return e.Err +} + +var ( + // Predefined error messages for invalid files. Not exhaustive. + errPathNotClean = errors.New("file path is not clean") + errPathNotRelative = errors.New("file path is not relative") + errGoModCase = errors.New("go.mod files must have lowercase names") + errGoModSize = fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod) + errLICENSESize = fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE) + + // Predefined error messages for omitted files. Not exhaustive. + errVCS = errors.New("directory is a version control repository") + errVendored = errors.New("file is in vendor directory") + errSubmoduleFile = errors.New("file is in another module") + errSubmoduleDir = errors.New("directory is in another module") + errHgArchivalTxt = errors.New("file is inserted by 'hg archive' and is always omitted") + errSymlink = errors.New("file is a symbolic link") + errNotRegular = errors.New("not a regular file") +) + +// CheckFiles reports whether a list of files satisfy the name and size +// constraints listed in the package documentation. The returned CheckedFiles +// record contains lists of valid, invalid, and omitted files. Every file in +// the given list will be included in exactly one of those lists. +// +// CheckFiles returns an error if the returned CheckedFiles does not describe +// a valid module zip file (according to CheckedFiles.Err). The returned +// CheckedFiles is still populated when an error is returned. +// +// Note that CheckFiles will not open any files, so Create may still fail when +// CheckFiles is successful due to I/O errors and reported size differences. +func CheckFiles(files []File) (CheckedFiles, error) { + cf, _, _ := checkFiles(files) + return cf, cf.Err() +} + +// checkFiles implements CheckFiles and also returns lists of valid files and +// their sizes, corresponding to cf.Valid. These lists are used in Crewate to +// avoid repeated calls to File.Lstat. +func checkFiles(files []File) (cf CheckedFiles, validFiles []File, validSizes []int64) { + errPaths := make(map[string]struct{}) + addError := func(path string, omitted bool, err error) { + if _, ok := errPaths[path]; ok { + return + } + errPaths[path] = struct{}{} + fe := FileError{Path: path, Err: err} + if omitted { + cf.Omitted = append(cf.Omitted, fe) + } else { + cf.Invalid = append(cf.Invalid, fe) + } } // Find directories containing go.mod files (other than the root). + // Files in these directories will be omitted. // These directories will not be included in the output zip. haveGoMod := make(map[string]bool) for _, f := range files { - dir, base := path.Split(f.Path()) + p := f.Path() + dir, base := path.Split(p) if strings.EqualFold(base, "go.mod") { info, err := f.Lstat() if err != nil { - return err + addError(p, false, err) + continue } if info.Mode().IsRegular() { haveGoMod[dir] = true @@ -146,77 +241,292 @@ func Create(w io.Writer, m module.Version, files []File) (err error) { } } - // Create the module zip file. - zw := zip.NewWriter(w) - prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) - - addFile := func(f File, path string, size int64) error { - rc, err := f.Open() - if err != nil { - return err - } - defer rc.Close() - w, err := zw.Create(prefix + path) - if err != nil { - return err - } - lr := &io.LimitedReader{R: rc, N: size + 1} - if _, err := io.Copy(w, lr); err != nil { - return err - } - if lr.N <= 0 { - return fmt.Errorf("file %q is larger than declared size", path) - } - return nil - } - collisions := make(collisionChecker) maxSize := int64(MaxZipFile) for _, f := range files { p := f.Path() if p != path.Clean(p) { - return fmt.Errorf("file path %s is not clean", p) + addError(p, false, errPathNotClean) + continue } if path.IsAbs(p) { - return fmt.Errorf("file path %s is not relative", p) + addError(p, false, errPathNotRelative) + continue + } + if isVendoredPackage(p) { + addError(p, true, errVendored) + continue } - if isVendoredPackage(p) || inSubmodule(p) { + if inSubmodule(p) { + addError(p, true, errSubmoduleFile) continue } if p == ".hg_archival.txt" { // Inserted by hg archive. // The go command drops this regardless of the VCS being used. + addError(p, true, errHgArchivalTxt) continue } if err := module.CheckFilePath(p); err != nil { - return err + addError(p, false, err) + continue } if strings.ToLower(p) == "go.mod" && p != "go.mod" { - return fmt.Errorf("found file named %s, want all lower-case go.mod", p) + addError(p, false, errGoModCase) + continue } info, err := f.Lstat() if err != nil { - return err + addError(p, false, err) + continue } if err := collisions.check(p, info.IsDir()); err != nil { - return err + addError(p, false, err) + continue } - if !info.Mode().IsRegular() { + if info.Mode()&os.ModeType == os.ModeSymlink { // Skip symbolic links (golang.org/issue/27093). + addError(p, true, errSymlink) + continue + } + if !info.Mode().IsRegular() { + addError(p, true, errNotRegular) continue } size := info.Size() - if size < 0 || maxSize < size { - return fmt.Errorf("module source tree too large (max size is %d bytes)", MaxZipFile) + if size >= 0 && size <= maxSize { + maxSize -= size + } else if cf.SizeError == nil { + cf.SizeError = fmt.Errorf("module source tree too large (max size is %d bytes)", MaxZipFile) } - maxSize -= size if p == "go.mod" && size > MaxGoMod { - return fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod) + addError(p, false, errGoModSize) + continue } if p == "LICENSE" && size > MaxLICENSE { - return fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE) + addError(p, false, errLICENSESize) + continue + } + + cf.Valid = append(cf.Valid, p) + validFiles = append(validFiles, f) + validSizes = append(validSizes, info.Size()) + } + + return cf, validFiles, validSizes +} + +// CheckDir reports whether the files in dir satisfy the name and size +// constraints listed in the package documentation. The returned CheckedFiles +// record contains lists of valid, invalid, and omitted files. If a directory is +// omitted (for example, a nested module or vendor directory), it will appear in +// the omitted list, but its files won't be listed. +// +// CheckDir returns an error if it encounters an I/O error or if the returned +// CheckedFiles does not describe a valid module zip file (according to +// CheckedFiles.Err). The returned CheckedFiles is still populated when such +// an error is returned. +// +// Note that CheckDir will not open any files, so CreateFromDir may still fail +// when CheckDir is successful due to I/O errors. +func CheckDir(dir string) (CheckedFiles, error) { + // List files (as CreateFromDir would) and check which ones are omitted + // or invalid. + files, omitted, err := listFilesInDir(dir) + if err != nil { + return CheckedFiles{}, err + } + cf, cfErr := CheckFiles(files) + _ = cfErr // ignore this error; we'll generate our own after rewriting paths. + + // Replace all paths with file system paths. + // Paths returned by CheckFiles will be slash-separated paths relative to dir. + // That's probably not appropriate for error messages. + for i := range cf.Valid { + cf.Valid[i] = filepath.Join(dir, cf.Valid[i]) + } + cf.Omitted = append(cf.Omitted, omitted...) + for i := range cf.Omitted { + cf.Omitted[i].Path = filepath.Join(dir, cf.Omitted[i].Path) + } + for i := range cf.Invalid { + cf.Invalid[i].Path = filepath.Join(dir, cf.Invalid[i].Path) + } + return cf, cf.Err() +} + +// CheckZip reports whether the files contained in a zip file satisfy the name +// and size constraints listed in the package documentation. +// +// CheckZip returns an error if the returned CheckedFiles does not describe +// a valid module zip file (according to CheckedFiles.Err). The returned +// CheckedFiles is still populated when an error is returned. CheckZip will +// also return an error if the module path or version is malformed or if it +// encounters an error reading the zip file. +// +// Note that CheckZip does not read individual files, so Unzip may still fail +// when CheckZip is successful due to I/O errors. +func CheckZip(m module.Version, zipFile string) (CheckedFiles, error) { + f, err := os.Open(zipFile) + if err != nil { + return CheckedFiles{}, err + } + defer f.Close() + _, cf, err := checkZip(m, f) + return cf, err +} + +// checkZip implements checkZip and also returns the *zip.Reader. This is +// used in Unzip to avoid redundant I/O. +func checkZip(m module.Version, f *os.File) (*zip.Reader, CheckedFiles, error) { + // Make sure the module path and version are valid. + if vers := module.CanonicalVersion(m.Version); vers != m.Version { + return nil, CheckedFiles{}, fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers) + } + if err := module.Check(m.Path, m.Version); err != nil { + return nil, CheckedFiles{}, err + } + + // Check the total file size. + info, err := f.Stat() + if err != nil { + return nil, CheckedFiles{}, err + } + zipSize := info.Size() + if zipSize > MaxZipFile { + cf := CheckedFiles{SizeError: fmt.Errorf("module zip file is too large (%d bytes; limit is %d bytes)", zipSize, MaxZipFile)} + return nil, cf, cf.Err() + } + + // Check for valid file names, collisions. + var cf CheckedFiles + addError := func(zf *zip.File, err error) { + cf.Invalid = append(cf.Invalid, FileError{Path: zf.Name, Err: err}) + } + z, err := zip.NewReader(f, zipSize) + if err != nil { + return nil, CheckedFiles{}, err + } + prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) + collisions := make(collisionChecker) + var size int64 + for _, zf := range z.File { + if !strings.HasPrefix(zf.Name, prefix) { + addError(zf, fmt.Errorf("path does not have prefix %q", prefix)) + continue } + name := zf.Name[len(prefix):] + if name == "" { + continue + } + isDir := strings.HasSuffix(name, "/") + if isDir { + name = name[:len(name)-1] + } + if path.Clean(name) != name { + addError(zf, errPathNotClean) + continue + } + if err := module.CheckFilePath(name); err != nil { + addError(zf, err) + continue + } + if err := collisions.check(name, isDir); err != nil { + addError(zf, err) + continue + } + if isDir { + continue + } + if base := path.Base(name); strings.EqualFold(base, "go.mod") { + if base != name { + addError(zf, fmt.Errorf("go.mod file not in module root directory")) + continue + } + if name != "go.mod" { + addError(zf, errGoModCase) + continue + } + } + sz := int64(zf.UncompressedSize64) + if sz >= 0 && MaxZipFile-size >= sz { + size += sz + } else if cf.SizeError == nil { + cf.SizeError = fmt.Errorf("total uncompressed size of module contents too large (max size is %d bytes)", MaxZipFile) + } + if name == "go.mod" && sz > MaxGoMod { + addError(zf, fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod)) + continue + } + if name == "LICENSE" && sz > MaxLICENSE { + addError(zf, fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE)) + continue + } + cf.Valid = append(cf.Valid, zf.Name) + } + return z, cf, cf.Err() +} + +// Create builds a zip archive for module m from an abstract list of files +// and writes it to w. +// +// Create verifies the restrictions described in the package documentation +// and should not produce an archive that Unzip cannot extract. Create does not +// include files in the output archive if they don't belong in the module zip. +// In particular, Create will not include files in modules found in +// subdirectories, most files in vendor directories, or irregular files (such +// as symbolic links) in the output archive. +func Create(w io.Writer, m module.Version, files []File) (err error) { + defer func() { + if err != nil { + err = &zipError{verb: "create zip", err: err} + } + }() + + // Check that the version is canonical, the module path is well-formed, and + // the major version suffix matches the major version. + if vers := module.CanonicalVersion(m.Version); vers != m.Version { + return fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers) + } + if err := module.Check(m.Path, m.Version); err != nil { + return err + } + + // Check whether files are valid, not valid, or should be omitted. + // Also check that the valid files don't exceed the maximum size. + cf, validFiles, validSizes := checkFiles(files) + if err := cf.Err(); err != nil { + return err + } + + // Create the module zip file. + zw := zip.NewWriter(w) + prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) + + addFile := func(f File, path string, size int64) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() + w, err := zw.Create(prefix + path) + if err != nil { + return err + } + lr := &io.LimitedReader{R: rc, N: size + 1} + if _, err := io.Copy(w, lr); err != nil { + return err + } + if lr.N <= 0 { + return fmt.Errorf("file %q is larger than declared size", path) + } + return nil + } + + for i, f := range validFiles { + p := f.Path() + size := validSizes[i] if err := addFile(f, p, size); err != nil { return err } @@ -245,61 +555,7 @@ func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) { } }() - var files []File - err = filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - relPath, err := filepath.Rel(dir, filePath) - if err != nil { - return err - } - slashPath := filepath.ToSlash(relPath) - - if info.IsDir() { - if filePath == dir { - // Don't skip the top-level directory. - return nil - } - - // Skip VCS directories. - // fossil repos are regular files with arbitrary names, so we don't try - // to exclude them. - switch filepath.Base(filePath) { - case ".bzr", ".git", ".hg", ".svn": - return filepath.SkipDir - } - - // Skip some subdirectories inside vendor, but maintain bug - // golang.org/issue/31562, described in isVendoredPackage. - // We would like Create and CreateFromDir to produce the same result - // for a set of files, whether expressed as a directory tree or zip. - if isVendoredPackage(slashPath) { - return filepath.SkipDir - } - - // Skip submodules (directories containing go.mod files). - if goModInfo, err := os.Lstat(filepath.Join(filePath, "go.mod")); err == nil && !goModInfo.IsDir() { - return filepath.SkipDir - } - return nil - } - - if info.Mode().IsRegular() { - if !isVendoredPackage(slashPath) { - files = append(files, dirFile{ - filePath: filePath, - slashPath: slashPath, - info: info, - }) - } - return nil - } - - // Not a regular file or a directory. Probably a symbolic link. - // Irregular files are ignored, so skip it. - return nil - }) + files, _, err := listFilesInDir(dir) if err != nil { return err } @@ -356,89 +612,28 @@ func Unzip(dir string, m module.Version, zipFile string) (err error) { } }() - if vers := module.CanonicalVersion(m.Version); vers != m.Version { - return fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers) - } - if err := module.Check(m.Path, m.Version); err != nil { - return err - } - // Check that the directory is empty. Don't create it yet in case there's // an error reading the zip. - files, _ := ioutil.ReadDir(dir) - if len(files) > 0 { + if files, _ := ioutil.ReadDir(dir); len(files) > 0 { return fmt.Errorf("target directory %v exists and is not empty", dir) } - // Open the zip file and ensure it's under the size limit. + // Open the zip and check that it satisfies all restrictions. f, err := os.Open(zipFile) if err != nil { return err } defer f.Close() - info, err := f.Stat() + z, cf, err := checkZip(m, f) if err != nil { return err } - zipSize := info.Size() - if zipSize > MaxZipFile { - return fmt.Errorf("module zip file is too large (%d bytes; limit is %d bytes)", zipSize, MaxZipFile) - } - - z, err := zip.NewReader(f, zipSize) - if err != nil { + if err := cf.Err(); err != nil { return err } - // Check total size, valid file names. - collisions := make(collisionChecker) + // Unzip, enforcing sizes declared in the zip file. prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) - var size int64 - for _, zf := range z.File { - if !strings.HasPrefix(zf.Name, prefix) { - return fmt.Errorf("unexpected file name %s", zf.Name) - } - name := zf.Name[len(prefix):] - if name == "" { - continue - } - isDir := strings.HasSuffix(name, "/") - if isDir { - name = name[:len(name)-1] - } - if path.Clean(name) != name { - return fmt.Errorf("invalid file name %s", zf.Name) - } - if err := module.CheckFilePath(name); err != nil { - return err - } - if err := collisions.check(name, isDir); err != nil { - return err - } - if isDir { - continue - } - if base := path.Base(name); strings.EqualFold(base, "go.mod") { - if base != name { - return fmt.Errorf("found go.mod file not in module root directory (%s)", zf.Name) - } else if name != "go.mod" { - return fmt.Errorf("found file named %s, want all lower-case go.mod", zf.Name) - } - } - s := int64(zf.UncompressedSize64) - if s < 0 || MaxZipFile-size < s { - return fmt.Errorf("total uncompressed size of module contents too large (max size is %d bytes)", MaxZipFile) - } - size += s - if name == "go.mod" && s > MaxGoMod { - return fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod) - } - if name == "LICENSE" && s > MaxLICENSE { - return fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE) - } - } - - // Unzip, enforcing sizes checked earlier. if err := os.MkdirAll(dir, 0777); err != nil { return err } @@ -515,6 +710,72 @@ func (cc collisionChecker) check(p string, isDir bool) error { return nil } +// listFilesInDir walks the directory tree rooted at dir and returns a list of +// files, as well as a list of directories and files that were skipped (for +// example, nested modules and symbolic links). +func listFilesInDir(dir string) (files []File, omitted []FileError, err error) { + err = filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + relPath, err := filepath.Rel(dir, filePath) + if err != nil { + return err + } + slashPath := filepath.ToSlash(relPath) + + // Skip some subdirectories inside vendor, but maintain bug + // golang.org/issue/31562, described in isVendoredPackage. + // We would like Create and CreateFromDir to produce the same result + // for a set of files, whether expressed as a directory tree or zip. + if isVendoredPackage(slashPath) { + omitted = append(omitted, FileError{Path: slashPath, Err: errVendored}) + return nil + } + + if info.IsDir() { + if filePath == dir { + // Don't skip the top-level directory. + return nil + } + + // Skip VCS directories. + // fossil repos are regular files with arbitrary names, so we don't try + // to exclude them. + switch filepath.Base(filePath) { + case ".bzr", ".git", ".hg", ".svn": + omitted = append(omitted, FileError{Path: slashPath, Err: errVCS}) + return filepath.SkipDir + } + + // Skip submodules (directories containing go.mod files). + if goModInfo, err := os.Lstat(filepath.Join(filePath, "go.mod")); err == nil && !goModInfo.IsDir() { + omitted = append(omitted, FileError{Path: slashPath, Err: errSubmoduleDir}) + return filepath.SkipDir + } + return nil + } + + // Skip irregular files and files in vendor directories. + // Irregular files are ignored. They're typically symbolic links. + if !info.Mode().IsRegular() { + omitted = append(omitted, FileError{Path: slashPath, Err: errNotRegular}) + return nil + } + + files = append(files, dirFile{ + filePath: filePath, + slashPath: slashPath, + info: info, + }) + return nil + }) + if err != nil { + return nil, nil, err + } + return files, omitted, nil +} + type zipError struct { verb, path string err error diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index ab2f81a66b..c0c008e038 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -29,7 +29,7 @@ golang.org/x/arch/x86/x86asm golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/ssh/terminal -# golang.org/x/mod v0.3.1-0.20200824162228-c0d644d00ab8 +# golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 ## explicit golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile -- cgit v1.2.3-54-g00ecf From 829ca10f9205ee57158062de823121624deb8988 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 31 Aug 2020 16:12:35 -0400 Subject: cmd/go/internal/get: disallow non-ASCII unicode letters from import paths The copy of CheckImportPath in path.go and the regular expression for github repos in vcsPaths together allow import paths with unicode letters with import paths. These all come from github repos with non-ASCII unicode letters with paths in directories. This mainly shows up in GOPATH mode, but could also show up in Module mode when getting a module in GOPROXY=direct mode. We expect there to not be any significant affected users of this change-- an investingation of github repos that would produce import paths that would comply with the copy CheckImportPaths that's being removed, but not modload.CheckImportPaths only surfaced a handful of cases, all of which seemed to be small test or demonstation repos. But this CL is being submitted early in the cycle so that it can be backed out if need be. Updates #29101 Change-Id: I719df4af5b318e1330e90d8a0bffe5bb8d816f4f Reviewed-on: https://go-review.googlesource.com/c/go/+/251878 Run-TryBot: Michael Matloob TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills Reviewed-by: Jay Conrod --- src/cmd/go/internal/get/get.go | 4 +- src/cmd/go/internal/get/path.go | 192 ---------------------------------------- src/cmd/go/internal/get/vcs.go | 2 +- 3 files changed, 4 insertions(+), 194 deletions(-) delete mode 100644 src/cmd/go/internal/get/path.go diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index e5bacadaa3..d1f032a167 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -20,6 +20,8 @@ import ( "cmd/go/internal/str" "cmd/go/internal/web" "cmd/go/internal/work" + + "golang.org/x/mod/module" ) var CmdGet = &base.Command{ @@ -427,7 +429,7 @@ func downloadPackage(p *load.Package) error { } importPrefix = importPrefix[:slash] } - if err := CheckImportPath(importPrefix); err != nil { + if err := module.CheckImportPath(importPrefix); err != nil { return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) } diff --git a/src/cmd/go/internal/get/path.go b/src/cmd/go/internal/get/path.go deleted file mode 100644 index ce2e0cdd70..0000000000 --- a/src/cmd/go/internal/get/path.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package get - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// The following functions are copied verbatim from golang.org/x/mod/module/module.go, -// with a change to additionally reject Windows short-names, -// and one to accept arbitrary letters (golang.org/issue/29101). -// -// TODO(bcmills): After the call site for this function is backported, -// consolidate this back down to a single copy. -// -// NOTE: DO NOT MERGE THESE UNTIL WE DECIDE ABOUT ARBITRARY LETTERS IN MODULE MODE. - -// CheckImportPath checks that an import path is valid. -func CheckImportPath(path string) error { - if err := checkPath(path, false); err != nil { - return fmt.Errorf("malformed import path %q: %v", path, err) - } - return nil -} - -// checkPath checks that a general path is valid. -// It returns an error describing why but not mentioning path. -// Because these checks apply to both module paths and import paths, -// the caller is expected to add the "malformed ___ path %q: " prefix. -// fileName indicates whether the final element of the path is a file name -// (as opposed to a directory name). -func checkPath(path string, fileName bool) error { - if !utf8.ValidString(path) { - return fmt.Errorf("invalid UTF-8") - } - if path == "" { - return fmt.Errorf("empty string") - } - if path[0] == '-' { - return fmt.Errorf("leading dash") - } - if strings.Contains(path, "//") { - return fmt.Errorf("double slash") - } - if path[len(path)-1] == '/' { - return fmt.Errorf("trailing slash") - } - elemStart := 0 - for i, r := range path { - if r == '/' { - if err := checkElem(path[elemStart:i], fileName); err != nil { - return err - } - elemStart = i + 1 - } - } - if err := checkElem(path[elemStart:], fileName); err != nil { - return err - } - return nil -} - -// checkElem checks whether an individual path element is valid. -// fileName indicates whether the element is a file name (not a directory name). -func checkElem(elem string, fileName bool) error { - if elem == "" { - return fmt.Errorf("empty path element") - } - if strings.Count(elem, ".") == len(elem) { - return fmt.Errorf("invalid path element %q", elem) - } - if elem[0] == '.' && !fileName { - return fmt.Errorf("leading dot in path element") - } - if elem[len(elem)-1] == '.' { - return fmt.Errorf("trailing dot in path element") - } - - charOK := pathOK - if fileName { - charOK = fileNameOK - } - for _, r := range elem { - if !charOK(r) { - return fmt.Errorf("invalid char %q", r) - } - } - - // Windows disallows a bunch of path elements, sadly. - // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file - short := elem - if i := strings.Index(short, "."); i >= 0 { - short = short[:i] - } - for _, bad := range badWindowsNames { - if strings.EqualFold(bad, short) { - return fmt.Errorf("disallowed path element %q", elem) - } - } - - // Reject path components that look like Windows short-names. - // Those usually end in a tilde followed by one or more ASCII digits. - if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { - suffix := short[tilde+1:] - suffixIsDigits := true - for _, r := range suffix { - if r < '0' || r > '9' { - suffixIsDigits = false - break - } - } - if suffixIsDigits { - return fmt.Errorf("trailing tilde and digits in path element") - } - } - - return nil -} - -// pathOK reports whether r can appear in an import path element. -// -// NOTE: This function DIVERGES from module mode pathOK by accepting Unicode letters. -func pathOK(r rune) bool { - if r < utf8.RuneSelf { - return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || - '0' <= r && r <= '9' || - 'A' <= r && r <= 'Z' || - 'a' <= r && r <= 'z' - } - return unicode.IsLetter(r) -} - -// fileNameOK reports whether r can appear in a file name. -// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. -// If we expand the set of allowed characters here, we have to -// work harder at detecting potential case-folding and normalization collisions. -// See note about "safe encoding" below. -func fileNameOK(r rune) bool { - if r < utf8.RuneSelf { - // Entire set of ASCII punctuation, from which we remove characters: - // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ - // We disallow some shell special characters: " ' * < > ? ` | - // (Note that some of those are disallowed by the Windows file system as well.) - // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). - // We allow spaces (U+0020) in file names. - const allowed = "!#$%&()+,-.=@[]^_{}~ " - if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { - return true - } - for i := 0; i < len(allowed); i++ { - if rune(allowed[i]) == r { - return true - } - } - return false - } - // It may be OK to add more ASCII punctuation here, but only carefully. - // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. - return unicode.IsLetter(r) -} - -// badWindowsNames are the reserved file path elements on Windows. -// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file -var badWindowsNames = []string{ - "CON", - "PRN", - "AUX", - "NUL", - "COM1", - "COM2", - "COM3", - "COM4", - "COM5", - "COM6", - "COM7", - "COM8", - "COM9", - "LPT1", - "LPT2", - "LPT3", - "LPT4", - "LPT5", - "LPT6", - "LPT7", - "LPT8", - "LPT9", -} diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go index fd37fcb76f..24c32935d0 100644 --- a/src/cmd/go/internal/get/vcs.go +++ b/src/cmd/go/internal/get/vcs.go @@ -1027,7 +1027,7 @@ var vcsPaths = []*vcsPath{ // Github { prefix: "github.com/", - regexp: lazyregexp.New(`^(?Pgithub\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[\p{L}0-9_.\-]+)*$`), + regexp: lazyregexp.New(`^(?Pgithub\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`), vcs: "git", repo: "https://{root}", check: noVCSSuffix, -- cgit v1.2.3-54-g00ecf From ac55d58fca4312fe4f84fa3a4761800803bc25e0 Mon Sep 17 00:00:00 2001 From: witchard Date: Sun, 30 Aug 2020 18:15:03 +0000 Subject: cmd/go/internal/get: add GOINSECURE support Adds support for the GOINSECURE environment variable to GOPATH mode. Updates #37519. Change-Id: Ibe3f52b7f30b1395edb000998905ee93abe6cada GitHub-Last-Rev: e298c0009eb5eba537bb00185a8778d2aab696ba GitHub-Pull-Request: golang/go#38628 Reviewed-on: https://go-review.googlesource.com/c/go/+/229758 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/alldocs.go | 5 ++++- src/cmd/go/internal/get/get.go | 16 ++++++++------ src/cmd/go/testdata/script/get_insecure_env.txt | 29 +++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 8 deletions(-) create mode 100644 src/cmd/go/testdata/script/get_insecure_env.txt diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 98861c8a0d..8ad4f66d09 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -2172,7 +2172,10 @@ // before resolving dependencies or building the code. // // The -insecure flag permits fetching from repositories and resolving -// custom domains using insecure schemes such as HTTP. Use with caution. +// custom domains using insecure schemes such as HTTP. Use with caution. The +// GOINSECURE environment variable is usually a better alternative, since it +// provides control over which modules may be retrieved using an insecure scheme. +// See 'go help environment' for details. // // The -t flag instructs get to also download the packages required to build // the tests for the specified packages. diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index d1f032a167..d0be3fe1e7 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -43,7 +43,10 @@ The -fix flag instructs get to run the fix tool on the downloaded packages before resolving dependencies or building the code. The -insecure flag permits fetching from repositories and resolving -custom domains using insecure schemes such as HTTP. Use with caution. +custom domains using insecure schemes such as HTTP. Use with caution. The +GOINSECURE environment variable is usually a better alternative, since it +provides control over which modules may be retrieved using an insecure scheme. +See 'go help environment' for details. The -t flag instructs get to also download the packages required to build the tests for the specified packages. @@ -411,11 +414,6 @@ func downloadPackage(p *load.Package) error { blindRepo bool // set if the repo has unusual configuration ) - security := web.SecureOnly - if Insecure { - security = web.Insecure - } - // p can be either a real package, or a pseudo-package whose “import path” is // actually a wildcard pattern. // Trim the path at the element containing the first wildcard, @@ -432,6 +430,10 @@ func downloadPackage(p *load.Package) error { if err := module.CheckImportPath(importPrefix); err != nil { return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) } + security := web.SecureOnly + if Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) { + security = web.Insecure + } if p.Internal.Build.SrcRoot != "" { // Directory exists. Look for checkout along path to src. @@ -475,7 +477,7 @@ func downloadPackage(p *load.Package) error { } vcs, repo, rootPath = rr.vcs, rr.Repo, rr.Root } - if !blindRepo && !vcs.isSecure(repo) && !Insecure { + if !blindRepo && !vcs.isSecure(repo) && security != web.Insecure { return fmt.Errorf("cannot download, %v uses insecure protocol", repo) } diff --git a/src/cmd/go/testdata/script/get_insecure_env.txt b/src/cmd/go/testdata/script/get_insecure_env.txt new file mode 100644 index 0000000000..8d88427c31 --- /dev/null +++ b/src/cmd/go/testdata/script/get_insecure_env.txt @@ -0,0 +1,29 @@ +[!net] skip +[!exec:git] skip + +# GOPATH: Set up +env GO111MODULE=off + +# GOPATH: Try go get -d of HTTP-only repo (should fail). +! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p + +# GOPATH: Try again with invalid GOINSECURE (should fail). +env GOINSECURE=insecure.go-get-issue-15410.appspot.com/pkg/q +! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p + +# GOPATH: Try with correct GOINSECURE (should succeed). +env GOINSECURE=insecure.go-get-issue-15410.appspot.com/pkg/p +go get -d insecure.go-get-issue-15410.appspot.com/pkg/p + +# GOPATH: Try updating without GOINSECURE (should fail). +env GOINSECURE= +! go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p + +# GOPATH: Try updating with GOINSECURE glob (should succeed). +env GOINSECURE=*.go-get-*.appspot.com +go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p + +# GOPATH: Try updating with GOINSECURE base URL (should succeed). +env GOINSECURE=insecure.go-get-issue-15410.appspot.com +go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p + -- cgit v1.2.3-54-g00ecf From afa150c2ea1b121c7727c12ab3615fcc173d0d15 Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Mon, 31 Aug 2020 20:54:17 +0200 Subject: testing: fail Example tests that invoke runtime.Goexit Previously, if an example test invoked runtime.Goexit, it would pass yet hang until a timeout, while regular tests that invoke runtime.Goexit do fail. This change removes that inconsistent behavior and makes such example tests fail, and panic with an indication of having invoked runtime.Goexit. Fixes #41084 Change-Id: I0ffa152204f2b1580f4d5d6961ba1ce6b13fc022 Reviewed-on: https://go-review.googlesource.com/c/go/+/251857 Reviewed-by: Emmanuel Odeke Reviewed-by: Bryan C. Mills Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot --- src/cmd/go/testdata/script/test_example_goexit.txt | 25 ++++++++++++++++++++++ src/testing/example.go | 11 +++++++--- src/testing/run_example.go | 4 +++- src/testing/run_example_js.go | 4 +++- 4 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 src/cmd/go/testdata/script/test_example_goexit.txt diff --git a/src/cmd/go/testdata/script/test_example_goexit.txt b/src/cmd/go/testdata/script/test_example_goexit.txt new file mode 100644 index 0000000000..59219e3366 --- /dev/null +++ b/src/cmd/go/testdata/script/test_example_goexit.txt @@ -0,0 +1,25 @@ +# For issue golang.org/issue/41084 +[short] skip + +! go test -v examplegoexit +stdout '(?s)--- PASS.*--- FAIL.*' +stdout 'panic: test executed panic\(nil\) or runtime\.Goexit' + +-- examplegoexit/example_test.go -- +package main + +import ( + "fmt" + "runtime" +) + +func ExamplePass() { + fmt.Println("pass") + // Output: + // pass +} + +func ExampleGoexit() { + runtime.Goexit() + // Output: +} diff --git a/src/testing/example.go b/src/testing/example.go index adc91d5faf..0217c5d242 100644 --- a/src/testing/example.go +++ b/src/testing/example.go @@ -62,9 +62,10 @@ func sortLines(output string) string { // If stdout doesn't match the expected output or if recovered is non-nil, it'll print the cause of failure to stdout. // If the test is chatty/verbose, it'll print a success message to stdout. // If recovered is non-nil, it'll panic with that value. -func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Duration, recovered interface{}) (passed bool) { +// If the test panicked with nil, or invoked runtime.Goexit, it'll be +// made to fail and panic with errNilPanicOrGoexit +func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Duration, finished bool, recovered interface{}) (passed bool) { passed = true - dstr := fmtDuration(timeSpent) var fail string got := strings.TrimSpace(stdout) @@ -78,16 +79,20 @@ func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Durati fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want) } } - if fail != "" || recovered != nil { + if fail != "" || !finished || recovered != nil { fmt.Printf("--- FAIL: %s (%s)\n%s", eg.Name, dstr, fail) passed = false } else if *chatty { fmt.Printf("--- PASS: %s (%s)\n", eg.Name, dstr) } + if recovered != nil { // Propagate the previously recovered result, by panicking. panic(recovered) } + if !finished && recovered == nil { + panic(errNilPanicOrGoexit) + } return } diff --git a/src/testing/run_example.go b/src/testing/run_example.go index 10bde49e5b..4dc83f7d32 100644 --- a/src/testing/run_example.go +++ b/src/testing/run_example.go @@ -43,6 +43,7 @@ func runExample(eg InternalExample) (ok bool) { outC <- buf.String() }() + finished := false start := time.Now() // Clean up in a deferred call so we can recover if the example panics. @@ -55,10 +56,11 @@ func runExample(eg InternalExample) (ok bool) { out := <-outC err := recover() - ok = eg.processRunResult(out, timeSpent, err) + ok = eg.processRunResult(out, timeSpent, finished, err) }() // Run example. eg.F() + finished = true return } diff --git a/src/testing/run_example_js.go b/src/testing/run_example_js.go index 472e0c57fa..1d4164b61f 100644 --- a/src/testing/run_example_js.go +++ b/src/testing/run_example_js.go @@ -26,6 +26,7 @@ func runExample(eg InternalExample) (ok bool) { stdout := os.Stdout f := createTempFile(eg.Name) os.Stdout = f + finished := false start := time.Now() // Clean up in a deferred call so we can recover if the example panics. @@ -50,11 +51,12 @@ func runExample(eg InternalExample) (ok bool) { } err := recover() - ok = eg.processRunResult(out, timeSpent, err) + ok = eg.processRunResult(out, timeSpent, finished, err) }() // Run example. eg.F() + finished = true return } -- cgit v1.2.3-54-g00ecf From 971203cad3c4a5cdfd196a7ad5ce76b550d2ff9f Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Tue, 1 Sep 2020 15:41:36 -0400 Subject: cmd/go: remove TestScript/get_unicode and an internal get test case That test tested that import paths with non-ASCII unicode paths were allowed by the Go command. Remove this test case because golang.org/cl/251878 removes that support. Also rewrite a test case in TestRepoRootForImportPath in the test for cmd/go/internal/get to reflect that unicode directory names are now disallowed. Updates #29101 Change-Id: I669e220facd04fc82ccd05dd08e8f1ff4d48b1fd Reviewed-on: https://go-review.googlesource.com/c/go/+/252297 Run-TryBot: Michael Matloob TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/get/vcs_test.go | 7 ++---- src/cmd/go/testdata/script/get_unicode.txt | 40 ------------------------------ 2 files changed, 2 insertions(+), 45 deletions(-) delete mode 100644 src/cmd/go/testdata/script/get_unicode.txt diff --git a/src/cmd/go/internal/get/vcs_test.go b/src/cmd/go/internal/get/vcs_test.go index 91800baa83..195bc231eb 100644 --- a/src/cmd/go/internal/get/vcs_test.go +++ b/src/cmd/go/internal/get/vcs_test.go @@ -32,13 +32,10 @@ func TestRepoRootForImportPath(t *testing.T) { Repo: "https://github.com/golang/groupcache", }, }, - // Unicode letters in directories (issue 18660). + // Unicode letters in directories are not valid. { "github.com/user/unicode/испытание", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://github.com/user/unicode", - }, + nil, }, // IBM DevOps Services tests { diff --git a/src/cmd/go/testdata/script/get_unicode.txt b/src/cmd/go/testdata/script/get_unicode.txt deleted file mode 100644 index d3b82bdf25..0000000000 --- a/src/cmd/go/testdata/script/get_unicode.txt +++ /dev/null @@ -1,40 +0,0 @@ -env GO111MODULE=off - -[!exec:git] skip -[short] skip - -# Construct a repository that imports a non-ASCII path. -cd $WORK/_origin/example.com/unicode -exec git init -exec git config user.name 'Nameless Gopher' -exec git config user.email 'nobody@golang.org' -exec git add unicode.go -exec git commit -m 'add unicode.go' - -# Clone the repo into GOPATH so that 'go get -u' can find it. -mkdir $GOPATH/src/example.com/unicode -cd $GOPATH/src/example.com/unicode -exec git clone $WORK/_origin/example.com/unicode . - -# Construct the imported repository. -cd $WORK/_origin/example.com/испытание -exec git init -exec git config user.name 'Nameless Gopher' -exec git config user.email 'nobody@golang.org' -exec git add испытание.go -exec git commit -m 'add испытание.go' - -# Clone that repo into GOPATH too. -mkdir $GOPATH/src/example.com/испытание -cd $GOPATH/src/example.com/испытание -exec git clone $WORK/_origin/example.com/испытание . - -# Upgrading the importer should pull from the non-ASCII repo. -cd $GOPATH -go get -u example.com/unicode - --- $WORK/_origin/example.com/unicode/unicode.go -- -package unicode -import _ "example.com/испытание" --- $WORK/_origin/example.com/испытание/испытание.go -- -package испытание -- cgit v1.2.3-54-g00ecf From 6fc329bb7fbb78315e2f53895a9fc6cbed63c1d7 Mon Sep 17 00:00:00 2001 From: Marco Date: Sun, 24 May 2020 14:58:51 +0200 Subject: net/http/cgi: don't pass nil Body to the child handler For server requests, the http.Request Body should not be nil. Fixes #39190 Change-Id: I32de7b6c0f6ca55008fea9fd86089cda0a2dea62 Reviewed-on: https://go-review.googlesource.com/c/go/+/235137 Reviewed-by: Bryan C. Mills Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot --- src/net/http/cgi/child.go | 3 +++ src/net/http/cgi/integration_test.go | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/src/net/http/cgi/child.go b/src/net/http/cgi/child.go index 9474175f17..d7d813e68a 100644 --- a/src/net/http/cgi/child.go +++ b/src/net/http/cgi/child.go @@ -146,6 +146,9 @@ func Serve(handler http.Handler) error { if err != nil { return err } + if req.Body == nil { + req.Body = http.NoBody + } if handler == nil { handler = http.DefaultServeMux } diff --git a/src/net/http/cgi/integration_test.go b/src/net/http/cgi/integration_test.go index 32d59c09a3..eaa090f6fe 100644 --- a/src/net/http/cgi/integration_test.go +++ b/src/net/http/cgi/integration_test.go @@ -152,6 +152,23 @@ func TestChildOnlyHeaders(t *testing.T) { } } +// Test that a child handler does not receive a nil Request Body. +// golang.org/issue/39190 +func TestNilRequestBody(t *testing.T) { + testenv.MustHaveExec(t) + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "nil-request-body": "false", + } + _ = runCgiTest(t, h, "POST /test.go?nil-request-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap) + _ = runCgiTest(t, h, "POST /test.go?nil-request-body=1 HTTP/1.0\nHost: example.com\nContent-Length: 0\n\n", expectedMap) +} + // golang.org/issue/7198 func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") } func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") } @@ -198,6 +215,10 @@ func TestBeChildCGIProcess(t *testing.T) { os.Exit(0) } Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if req.FormValue("nil-request-body") == "1" { + fmt.Fprintf(rw, "nil-request-body=%v\n", req.Body == nil) + return + } rw.Header().Set("X-Test-Header", "X-Test-Value") req.ParseForm() if req.FormValue("no-body") == "1" { -- cgit v1.2.3-54-g00ecf From b0369225ebfdc355c61abbbb2663316388c60895 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Mon, 31 Aug 2020 23:15:05 -0400 Subject: cmd/go: simplify and document lazy-loading test cases I decided to add package and module diagrams to the test cases to make them easier to follow. While adding those diagrams, I noticed some strong similarities among a couple of the graphs, so I consolidated those cases (and deleted the redundant tests). For #36460 Change-Id: Id6cd04fc871379b83851c2d1af89ea9296a0f3e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/251997 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/testdata/script/mod_all.txt | 72 +++++++++- .../go/testdata/script/mod_lazy_import_allmod.txt | 19 ++- .../testdata/script/mod_lazy_import_test_dep.txt | 159 --------------------- src/cmd/go/testdata/script/mod_lazy_new_import.txt | 28 +++- src/cmd/go/testdata/script/mod_lazy_test_all.txt | 125 ---------------- .../go/testdata/script/mod_lazy_test_horizon.txt | 20 ++- .../testdata/script/mod_lazy_test_of_test_dep.txt | 77 ++++++---- 7 files changed, 177 insertions(+), 323 deletions(-) delete mode 100644 src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt delete mode 100644 src/cmd/go/testdata/script/mod_lazy_test_all.txt diff --git a/src/cmd/go/testdata/script/mod_all.txt b/src/cmd/go/testdata/script/mod_all.txt index a219913094..9f4b0a4e4d 100644 --- a/src/cmd/go/testdata/script/mod_all.txt +++ b/src/cmd/go/testdata/script/mod_all.txt @@ -1,6 +1,38 @@ # This test illustrates the relationship between the 'all' pattern and # the dependencies of the main module. +# The package import graph used in this test looks like: +# +# main --------- a --------- b +# | | +# | a_test ---- c +# | | +# | c_test ---- d +# | +# main_test ---- t --------- u +# | +# t_test ---- w +# | +# w_test ---- x +# +# main/testonly_test ---- q --------- r +# | +# q_test ---- s +# +# And the module dependency graph looks like: +# +# main --- a.1 ---- b.1 +# \ \ \ +# \ \ c.1 -- d.1 +# \ \ +# \ t.1 ---- u.1 +# \ \ +# \ w.1 -- x.1 +# \ +# q.1 ---- r.1 +# \ +# s.1 + env PKGFMT='{{if .Module}}{{.ImportPath}}{{end}}' env MODFMT='{{.Path}}' @@ -41,10 +73,11 @@ stdout '^example.com/u$' # variants of those packages. go list -f $PKGFMT all -stdout -count=11 '^.' +stdout -count=13 '^.' stdout '^example.com/a$' stdout '^example.com/b$' stdout '^example.com/c$' +stdout '^example.com/d$' stdout '^example.com/main$' stdout '^example.com/main/testonly$' stdout '^example.com/q$' @@ -53,20 +86,22 @@ stdout '^example.com/s$' stdout '^example.com/t$' stdout '^example.com/u$' stdout '^example.com/w$' +stdout '^example.com/x$' # 'go list -test all' is equivalent to 'go list -test $(go list all)' # and both should include tests for every package in 'all'. -go list -test -f $PKGFMT example.com/a example.com/b example.com/c example.com/main example.com/main/testonly example.com/q example.com/r example.com/s example.com/t example.com/u example.com/w +go list -test -f $PKGFMT example.com/a example.com/b example.com/c example.com/d example.com/main example.com/main/testonly example.com/q example.com/r example.com/s example.com/t example.com/u example.com/w example.com/x cp stdout list-test-explicit.txt go list -test -f $PKGFMT all cmp stdout list-test-explicit.txt -stdout -count=34 '^.' +stdout -count=36 '^.' stdout '^example.com/a$' stdout '^example.com/b$' stdout '^example.com/c$' +stdout '^example.com/d$' stdout '^example.com/main$' stdout '^example.com/main/testonly$' stdout '^example.com/q$' @@ -75,6 +110,7 @@ stdout '^example.com/s$' stdout '^example.com/t$' stdout '^example.com/u$' stdout '^example.com/w$' +stdout '^example.com/x$' stdout '^example.com/a.test$' stdout '^example.com/a_test \[example.com/a.test\]$' stdout '^example.com/b.test$' @@ -103,10 +139,11 @@ stdout '^example.com/w_test \[example.com/w.test\]$' # 'go list -m all' covers the packages in 'go list -test -deps all'. go list -m -f $MODFMT all -stdout -count=10 '^.' +stdout -count=12 '^.' stdout '^example.com/a$' stdout '^example.com/b$' stdout '^example.com/c$' +stdout '^example.com/d$' stdout '^example.com/main$' stdout '^example.com/q$' stdout '^example.com/r$' @@ -114,6 +151,7 @@ stdout '^example.com/s$' stdout '^example.com/t$' stdout '^example.com/u$' stdout '^example.com/w$' +stdout '^example.com/x$' # 'go mod vendor' copies in only the packages transitively imported by the main @@ -176,12 +214,14 @@ replace ( example.com/a v0.1.0 => ./a example.com/b v0.1.0 => ./b example.com/c v0.1.0 => ./c + example.com/d v0.1.0 => ./d example.com/q v0.1.0 => ./q example.com/r v0.1.0 => ./r example.com/s v0.1.0 => ./s example.com/t v0.1.0 => ./t example.com/u v0.1.0 => ./u example.com/w v0.1.0 => ./w + example.com/x v0.1.0 => ./x ) -- main.go -- package main @@ -207,11 +247,11 @@ require ( example.com/c v0.1.0 ) -- a/a.go -- -package x +package a import _ "example.com/b" -- a/a_test.go -- -package x_test +package a_test import _ "example.com/c" -- b/go.mod -- @@ -226,10 +266,20 @@ package b_test module example.com/c go 1.15 + +require example.com/d v0.1.0 -- c/c.go -- package c -- c/c_test.go -- package c_test + +import _ "example.com/d" +-- d/go.mod -- +module example.com/d + +go 1.15 +-- d/d.go -- +package d -- q/go.mod -- module example.com/q @@ -290,7 +340,17 @@ package u_test module example.com/w go 1.15 + +require example.com/x v0.1.0 -- w/w.go -- package w -- w/w_test.go -- package w_test + +import _ "example.com/x" +-- x/go.mod -- +module example.com/x + +go 1.15 +-- x/x.go -- +package x diff --git a/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt b/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt index aade00d602..4ad8cbf8ee 100644 --- a/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt +++ b/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt @@ -1,4 +1,4 @@ -# This file demonstrates dependency resolution when the main module imports a +# This test demonstrates dependency resolution when the main module imports a # new package from a previously-test-only dependency. # # When lazy loading is active, the loader will not load dependencies of any @@ -6,6 +6,23 @@ # the main module is changed to import a package from such a module, the # dependencies of that module will need to be reloaded. +# The import graph used in this test looks like: +# +# m ---- a +# \ | +# \ a_test ---- b/x +# \ +# --------------b/y (new) ---- c +# +# Where b/x and b/y are disjoint packages, but both contained in module b. +# +# The module dependency graph initially looks like: +# +# m ---- a.1 ---- b.1 ---- c.1 +# +# This configuration is similar to that used in mod_lazy_new_import, +# but the new import is from what is initially a test-only dependency. + # Control case: in Go 1.14, the original go.mod is tidy, # and the dependency on c is eagerly loaded. diff --git a/src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt b/src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt deleted file mode 100644 index b7e3e6cb08..0000000000 --- a/src/cmd/go/testdata/script/mod_lazy_import_test_dep.txt +++ /dev/null @@ -1,159 +0,0 @@ -# This file demonstrates the go.mod changes needed to ensure reproducibility -# when running 'go test' on a sequence of packages for which each package in the -# sequence is a test-only dependency of the previous package, as a user might do -# if they encounter a test failure while fixing a bug found in one of their -# dependencies. - -cp go.mod go.mod.old -cp lazy.go lazy.go.old -go mod tidy -cmp go.mod go.mod.old - -# Before adding a new import, the go.mod file should -# enumerate modules for all packages already imported. -go list -m all -stdout '^example.com/d v0.1.0' # not v0.2.0 as would be resolved by 'latest' -cp stdout list.old -cmp go.mod go.mod.old - -# Following the chain of dependencies by listing test dependencies -# or running tests should not change the go.mod file. -go list -test -deps example.com/a -stdout '^example.com/a' -stdout '^example.com/b' -! stdout '^example.com/c' -[!short] go test -c example.com/a -cmp go.mod go.mod.old - -go list -test -deps example.com/b -stdout '^example.com/b' -stdout '^example.com/c' -! stdout '^example.com/d' -[!short] go test -c example.com/b -cmp go.mod go.mod.old - -go list -test -deps example.com/c -stdout '^example.com/c' -stdout '^example.com/d' -[!short] go test -c example.com/c -cmp go.mod go.mod.old - -# When we add a new import of a package already imported by a test of a test of -# a dependency, and that dependency is already tidy, its transitive dependencies -# should already be present. -cp lazy.go.new lazy.go -go list all -go list -m all -cmp stdout list.old -cmp go.mod go.mod.new # Indirect dependency promoted to direct. - -# TODO(#36460): - -cp lazy.go.old lazy.go -cp go.mod.old go.mod -go mod edit -go=1.16 - -# If we reach d by running successive tests, we should end up with exactly the -# version required by c, with an update to the go.mod file as soon as we load a -# dependency not found in the deepening scan. - -# However, if we skip directly to adding a new import of d, the dependency is -# too far away for a deepening scan to find, which is fine because the package -# whose test imported it wasn't even in "all". It should resolve from the latest -# version of its module. - --- go.mod -- -module example.com/lazy - -go 1.14 - -require example.com/a v0.1.0 - -replace ( - example.com/a v0.1.0 => ./a - example.com/b v0.1.0 => ./b - example.com/c v0.1.0 => ./c - example.com/d v0.1.0 => ./d1 - example.com/d v0.2.0 => ./d2 -) --- go.mod.new -- -module example.com/lazy - -go 1.14 - -require ( - example.com/a v0.1.0 - example.com/d v0.1.0 -) - -replace ( - example.com/a v0.1.0 => ./a - example.com/b v0.1.0 => ./b - example.com/c v0.1.0 => ./c - example.com/d v0.1.0 => ./d1 - example.com/d v0.2.0 => ./d2 -) --- lazy.go -- -package lazy - -import ( - _ "example.com/a" -) - -func main() {} --- lazy.go.new -- -package lazy - -import ( - _ "example.com/a" - "example.com/d" -) - -func main() { - println(d.Version) -} --- a/go.mod -- -module example.com/a - -go 1.14 - -require example.com/b v0.1.0 --- a/a.go -- -package a -import _ "example.com/b" --- b/go.mod -- -module example.com/b - -go 1.16 - -require example.com/c v0.1.0 --- b/b.go -- -package b --- b/b_test.go -- -package b -import _ "example.com/c" --- c/go.mod -- -module example.com/c - -go 1.16 - -require example.com/d v0.1.0 --- c/c.go -- -package c --- c/c_test.go -- -package c -import _ "example.com/d" --- d1/go.mod -- -module example.com/d - -go 1.16 --- d1/d.go -- -package d -const Version = "v0.1.0" --- d2/go.mod -- -module example.com/d - -go 1.16 --- d2/d.go -- -package d -const Version = "v0.2.0" diff --git a/src/cmd/go/testdata/script/mod_lazy_new_import.txt b/src/cmd/go/testdata/script/mod_lazy_new_import.txt index 76b915afaa..02935bf236 100644 --- a/src/cmd/go/testdata/script/mod_lazy_new_import.txt +++ b/src/cmd/go/testdata/script/mod_lazy_new_import.txt @@ -1,3 +1,21 @@ +# This test illustrates the use of a deepening scan to resolve transitive +# imports of imports of new packages from within existing dependencies. + +# The package import graph used in this test looks like: +# +# lazy ---- a/x ---- b +# \ +# ---- a/y ---- c +# +# Where a/x and x/y are disjoint packages, but both contained in module a. +# +# The module dependency graph initially looks like: +# +# lazy ---- a.1 ---- b.1 +# \ +# c.1 + + cp go.mod go.mod.old cp lazy.go lazy.go.old go mod tidy @@ -30,7 +48,7 @@ go mod edit -go=1.16 -- go.mod -- module example.com/lazy -go 1.14 +go 1.15 require example.com/a v0.1.0 @@ -56,7 +74,7 @@ import ( -- a/go.mod -- module example.com/a -go 1.14 +go 1.15 require ( example.com/b v0.1.0 @@ -71,19 +89,19 @@ import _ "example.com/c" -- b/go.mod -- module example.com/b -go 1.14 +go 1.15 -- b/b.go -- package b -- c1/go.mod -- module example.com/c -go 1.14 +go 1.15 -- c1/c.go -- package c -- c2/go.mod -- module example.com/c -go 1.14 +go 1.15 -- c2/c.go -- package c This file should not be used, so this syntax error should be ignored. diff --git a/src/cmd/go/testdata/script/mod_lazy_test_all.txt b/src/cmd/go/testdata/script/mod_lazy_test_all.txt deleted file mode 100644 index 4ce9fb167b..0000000000 --- a/src/cmd/go/testdata/script/mod_lazy_test_all.txt +++ /dev/null @@ -1,125 +0,0 @@ -cp go.mod go.mod.old -go mod tidy -cmp go.mod go.mod.old - -# 'go list -m all' includes modules that cover the test dependencies of -# the packages imported by the main module. - -go list -m all -stdout 'example.com/b v0.1.0' -stdout 'example.com/c v0.1.0' -cmp go.mod go.mod.old - -# 'go test' (or equivalent) of any package in 'all' should use its existing -# dependencies without updating the go.mod file. - -go list all # Control case: example.com/b really is in 'all'. -stdout '^example.com/b$' -cmp go.mod go.mod.old # Already tidy, so dependencies shouldn't change. - -go list -test -deps example.com/b -stdout '^example.com/b$' -stdout '^example.com/c$' -! stdout '^example.com/d$' - -[!short] go test -c example.com/b - -cmp go.mod go.mod.old # Should have resolved the above without modifying go.mod. - - -# TODO(#36460): - -# 'go list -m all' should include modules that cover the test dependencies of -# the packages imported by the main module, found via a deepening scan. - -# 'go test' of any package in 'all' should use its existing dependencies without -# updating the go.mod file. This requires that we consider _dependencies of_ the -# explicit dependencies of the main module, and that we not record those -# dependencies explicitly after loading them. - - --- go.mod -- -module example.com/lazy - -go 1.14 - -require example.com/a v0.1.0 - -replace ( - example.com/a v0.1.0 => ./a - example.com/b v0.1.0 => ./b1 - example.com/b v0.2.0 => ./b2 - example.com/c v0.1.0 => ./c - example.com/d v0.1.0 => ./d -) --- lazy.go -- -package lazy - -import ( - _ "example.com/a/x" -) --- a/go.mod -- -module example.com/a - -go 1.14 - -require example.com/b v0.1.0 --- a/x/x.go -- -package x --- a/x/x_test.go -- -package x - -import ( - "testing" - - _ "example.com/b" -) - -func TestUsingB(t *testing.T) { - // … -} --- b1/go.mod -- -module example.com/b - -go 1.14 - -require example.com/c v0.1.0 --- b1/b.go -- -package b --- b1/b_test.go -- -package b - -import _ "example.com/c" --- b2/go.mod -- -module example.com/b - -go 1.14 - -require example.com/c v0.1.0 --- b2/b.go -- -package b --- b2/b_test.go -- -package b - -import _ "example.com/c" - -This file should not be used, so this syntax error should be ignored. --- c/go.mod -- -module example.com/c - -go 1.14 - -require example.com/d v0.1.0 --- c/c.go -- -package c --- c/c_test.go -- -package c -import _ "example.com/d" -This file should not be used, so this syntax error should be ignored. --- d/go.mod -- -module example.com/d - -go 1.14 --- d/d.go -- -package d -This file should not be used, so this syntax error should be ignored. diff --git a/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt b/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt index 29fc0aaa74..9cdfad79f6 100644 --- a/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt +++ b/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt @@ -1,7 +1,23 @@ # This file demonstrates the effect of lazy loading on the selected # versions of test dependencies. -# Control case: in Go 1.14, the version of c imported by 'go test x' is the +# The package import graph used in this test looks like: +# +# m ---- a +# \ | +# \ a_test ---- b +# \ | +# x b_test +# | \ +# x_test -------------- c +# +# And the module dependency graph looks like: +# +# m -- a.1 -- b.1 -- c.2 +# \ +# x.1 ------------ c.1 + +# Control case: in Go 1.15, the version of c imported by 'go test x' is the # version required by module b, even though b_test is not relevant to the main # module. (The main module imports a, and a_test imports b, but all of the # packages and tests in the main module can be built without b.) @@ -33,7 +49,7 @@ import ( -- go.mod -- module m -go 1.14 +go 1.15 require ( a v0.1.0 diff --git a/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt b/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt index bbb0772303..ca6c55040e 100644 --- a/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt +++ b/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt @@ -1,8 +1,27 @@ +# This file demonstrates the effect of lazy loading on the reproducibility of +# tests (and tests of test dependencies) outside the main module. +# +# It is similar to the cases in mod_all.txt and mod_lazy_test_horizon.txt, but +# focuses on the effect of "go test" on specific packages instead of the "all" +# pattern. + +# The package import graph used in this test looks like: +# +# lazy ---- a +# | +# a_test ---- b +# | +# b_test ---- c +# +# And the non-lazy module dependency graph looks like: +# +# lazy ---- a.1 ---- b.1 ---- c.1 + cp go.mod go.mod.old go mod tidy cmp go.mod go.mod.old -# In Go 1.14 mode, 'go list -m all' includes modules needed by the +# In Go 1.15 mode, 'go list -m all' includes modules needed by the # transitive closure of tests of dependencies of tests of dependencies of …. go list -m all @@ -13,36 +32,44 @@ cmp go.mod go.mod.old # 'go test' (or equivalent) of any such dependency, no matter how remote, does # not update the go.mod file. -go list all -stdout example.com/a/x -stdout example.com/b # Test dependency of example.com/a/x. -stdout example.com/c # Test dependency of example.com/b. - -go list -test -deps all +go list -test -deps example.com/a stdout example.com/b -stdout example.com/c -cmp go.mod go.mod.old +! stdout example.com/c -[!short] go test example.com/a/x +[!short] go test -c example.com/a [!short] cmp go.mod go.mod.old -[!short] go test example.com/b +go list -test -deps example.com/b +stdout example.com/c + +[!short] go test -c example.com/b [!short] cmp go.mod go.mod.old # TODO(#36460): -# After changing to 'go 1.15` uniformly, 'go list -m all' should prune out +# After changing to 'go 1.16` uniformly, 'go list -m all' should prune out # example.com/c, because it is not imported by any package (or test of a package) # transitively imported by the main module. -# example.com/a/x is transitively imported, -# and example.com/b is needed in order to run 'go test example.com/a/x', +# +# example.com/a is imported, +# and example.com/b is needed in order to run 'go test example.com/a', # but example.com/c is not needed because we don't expect the user to need to run # 'go test example.com/b'. +# If we skip directly to adding a new import of c, the dependency is too far +# away for a deepening scan to find, which is fine because the package whose +# test imported it wasn't even it "all". It should resolve from the latest +# version of its module. + +# However, if we reach c by running successive tests starting from the main +# module, we should end up with exactly the version require by c, with an update +# to the go.mod file as soon as we test a test dependency that is not itself in +# "all". + -- go.mod -- module example.com/lazy -go 1.14 +go 1.15 require example.com/a v0.1.0 @@ -57,18 +84,18 @@ replace ( package lazy import ( - _ "example.com/a/x" + _ "example.com/a" ) -- a/go.mod -- module example.com/a -go 1.14 +go 1.15 require example.com/b v0.1.0 --- a/x/x.go -- -package x --- a/x/x_test.go -- -package x +-- a/a.go -- +package a +-- a/a_test.go -- +package a import ( "testing" @@ -82,7 +109,7 @@ func TestUsingB(t *testing.T) { -- b1/go.mod -- module example.com/b -go 1.14 +go 1.15 require example.com/c v0.1.0 -- b1/b.go -- @@ -94,7 +121,7 @@ import _ "example.com/c" -- b2/go.mod -- module example.com/b -go 1.14 +go 1.15 require example.com/c v0.1.0 -- b2/b.go -- @@ -106,13 +133,13 @@ This file should not be used, so this syntax error should be ignored. -- c1/go.mod -- module example.com/c -go 1.14 +go 1.15 -- c1/c.go -- package c -- c2/go.mod -- module example.com/c -go 1.14 +go 1.15 -- c2/c.go -- package c This file should not be used, so this syntax error should be ignored. -- cgit v1.2.3-54-g00ecf From 717266b4c2bd178dcbd49f31048f216d8799e697 Mon Sep 17 00:00:00 2001 From: Daniel Cormier Date: Tue, 1 Sep 2020 21:13:01 +0000 Subject: net/smtp: adds support for the SMTPUTF8 extension If the SMTP server supports the SMTPUTF8 extension, the SMTPUTF8 parameter is added to the MAIL FROM command by the (*Client).Mail method. Fixes #19860 Change-Id: I3287faf114ee514e5faa815a6bbc1bf04cf60b0f GitHub-Last-Rev: d6338bb802da7537223f1ec6eda960606febefb8 GitHub-Pull-Request: golang/go#40627 Reviewed-on: https://go-review.googlesource.com/c/go/+/247257 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/net/smtp/smtp.go | 6 +- src/net/smtp/smtp_test.go | 213 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 218 insertions(+), 1 deletion(-) diff --git a/src/net/smtp/smtp.go b/src/net/smtp/smtp.go index e4e12ae5ee..1a6864a0f2 100644 --- a/src/net/smtp/smtp.go +++ b/src/net/smtp/smtp.go @@ -241,7 +241,8 @@ func (c *Client) Auth(a Auth) error { // Mail issues a MAIL command to the server using the provided email address. // If the server supports the 8BITMIME extension, Mail adds the BODY=8BITMIME -// parameter. +// parameter. If the server supports the SMTPUTF8 extension, Mail adds the +// SMTPUTF8 parameter. // This initiates a mail transaction and is followed by one or more Rcpt calls. func (c *Client) Mail(from string) error { if err := validateLine(from); err != nil { @@ -255,6 +256,9 @@ func (c *Client) Mail(from string) error { if _, ok := c.ext["8BITMIME"]; ok { cmdStr += " BODY=8BITMIME" } + if _, ok := c.ext["SMTPUTF8"]; ok { + cmdStr += " SMTPUTF8" + } } _, _, err := c.cmd(250, cmdStr, from) return err diff --git a/src/net/smtp/smtp_test.go b/src/net/smtp/smtp_test.go index cfda0790e9..55219372d2 100644 --- a/src/net/smtp/smtp_test.go +++ b/src/net/smtp/smtp_test.go @@ -288,6 +288,219 @@ Goodbye. QUIT ` +func TestExtensions(t *testing.T) { + fake := func(server string) (c *Client, bcmdbuf *bufio.Writer, cmdbuf *strings.Builder) { + server = strings.Join(strings.Split(server, "\n"), "\r\n") + + cmdbuf = &strings.Builder{} + bcmdbuf = bufio.NewWriter(cmdbuf) + var fake faker + fake.ReadWriter = bufio.NewReadWriter(bufio.NewReader(strings.NewReader(server)), bcmdbuf) + c = &Client{Text: textproto.NewConn(fake), localName: "localhost"} + + return c, bcmdbuf, cmdbuf + } + + t.Run("helo", func(t *testing.T) { + const ( + basicServer = `250 mx.google.com at your service +250 Sender OK +221 Goodbye +` + + basicClient = `HELO localhost +MAIL FROM: +QUIT +` + ) + + c, bcmdbuf, cmdbuf := fake(basicServer) + + if err := c.helo(); err != nil { + t.Fatalf("HELO failed: %s", err) + } + c.didHello = true + if err := c.Mail("user@gmail.com"); err != nil { + t.Fatalf("MAIL FROM failed: %s", err) + } + if err := c.Quit(); err != nil { + t.Fatalf("QUIT failed: %s", err) + } + + bcmdbuf.Flush() + actualcmds := cmdbuf.String() + client := strings.Join(strings.Split(basicClient, "\n"), "\r\n") + if client != actualcmds { + t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, client) + } + }) + + t.Run("ehlo", func(t *testing.T) { + const ( + basicServer = `250-mx.google.com at your service +250 SIZE 35651584 +250 Sender OK +221 Goodbye +` + + basicClient = `EHLO localhost +MAIL FROM: +QUIT +` + ) + + c, bcmdbuf, cmdbuf := fake(basicServer) + + if err := c.Hello("localhost"); err != nil { + t.Fatalf("EHLO failed: %s", err) + } + if ok, _ := c.Extension("8BITMIME"); ok { + t.Fatalf("Shouldn't support 8BITMIME") + } + if ok, _ := c.Extension("SMTPUTF8"); ok { + t.Fatalf("Shouldn't support SMTPUTF8") + } + if err := c.Mail("user@gmail.com"); err != nil { + t.Fatalf("MAIL FROM failed: %s", err) + } + if err := c.Quit(); err != nil { + t.Fatalf("QUIT failed: %s", err) + } + + bcmdbuf.Flush() + actualcmds := cmdbuf.String() + client := strings.Join(strings.Split(basicClient, "\n"), "\r\n") + if client != actualcmds { + t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, client) + } + }) + + t.Run("ehlo 8bitmime", func(t *testing.T) { + const ( + basicServer = `250-mx.google.com at your service +250-SIZE 35651584 +250 8BITMIME +250 Sender OK +221 Goodbye +` + + basicClient = `EHLO localhost +MAIL FROM: BODY=8BITMIME +QUIT +` + ) + + c, bcmdbuf, cmdbuf := fake(basicServer) + + if err := c.Hello("localhost"); err != nil { + t.Fatalf("EHLO failed: %s", err) + } + if ok, _ := c.Extension("8BITMIME"); !ok { + t.Fatalf("Should support 8BITMIME") + } + if ok, _ := c.Extension("SMTPUTF8"); ok { + t.Fatalf("Shouldn't support SMTPUTF8") + } + if err := c.Mail("user@gmail.com"); err != nil { + t.Fatalf("MAIL FROM failed: %s", err) + } + if err := c.Quit(); err != nil { + t.Fatalf("QUIT failed: %s", err) + } + + bcmdbuf.Flush() + actualcmds := cmdbuf.String() + client := strings.Join(strings.Split(basicClient, "\n"), "\r\n") + if client != actualcmds { + t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, client) + } + }) + + t.Run("ehlo smtputf8", func(t *testing.T) { + const ( + basicServer = `250-mx.google.com at your service +250-SIZE 35651584 +250 SMTPUTF8 +250 Sender OK +221 Goodbye +` + + basicClient = `EHLO localhost +MAIL FROM: SMTPUTF8 +QUIT +` + ) + + c, bcmdbuf, cmdbuf := fake(basicServer) + + if err := c.Hello("localhost"); err != nil { + t.Fatalf("EHLO failed: %s", err) + } + if ok, _ := c.Extension("8BITMIME"); ok { + t.Fatalf("Shouldn't support 8BITMIME") + } + if ok, _ := c.Extension("SMTPUTF8"); !ok { + t.Fatalf("Should support SMTPUTF8") + } + if err := c.Mail("user+📧@gmail.com"); err != nil { + t.Fatalf("MAIL FROM failed: %s", err) + } + if err := c.Quit(); err != nil { + t.Fatalf("QUIT failed: %s", err) + } + + bcmdbuf.Flush() + actualcmds := cmdbuf.String() + client := strings.Join(strings.Split(basicClient, "\n"), "\r\n") + if client != actualcmds { + t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, client) + } + }) + + t.Run("ehlo 8bitmime smtputf8", func(t *testing.T) { + const ( + basicServer = `250-mx.google.com at your service +250-SIZE 35651584 +250-8BITMIME +250 SMTPUTF8 +250 Sender OK +221 Goodbye + ` + + basicClient = `EHLO localhost +MAIL FROM: BODY=8BITMIME SMTPUTF8 +QUIT +` + ) + + c, bcmdbuf, cmdbuf := fake(basicServer) + + if err := c.Hello("localhost"); err != nil { + t.Fatalf("EHLO failed: %s", err) + } + c.didHello = true + if ok, _ := c.Extension("8BITMIME"); !ok { + t.Fatalf("Should support 8BITMIME") + } + if ok, _ := c.Extension("SMTPUTF8"); !ok { + t.Fatalf("Should support SMTPUTF8") + } + if err := c.Mail("user+📧@gmail.com"); err != nil { + t.Fatalf("MAIL FROM failed: %s", err) + } + if err := c.Quit(); err != nil { + t.Fatalf("QUIT failed: %s", err) + } + + bcmdbuf.Flush() + actualcmds := cmdbuf.String() + client := strings.Join(strings.Split(basicClient, "\n"), "\r\n") + if client != actualcmds { + t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, client) + } + }) +} + func TestNewClient(t *testing.T) { server := strings.Join(strings.Split(newClientServer, "\n"), "\r\n") client := strings.Join(strings.Split(newClientClient, "\n"), "\r\n") -- cgit v1.2.3-54-g00ecf From 786120b55db5c3567d8c353fc399e9339c7406dd Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Tue, 1 Sep 2020 10:00:23 +1000 Subject: cmd/cgo: document #include <> search path behaviour cgo effectively prepends -I${SRCDIR} to the header include path of all preambles it processes, so when an #include <> matches a header file both in the source directory and also another include directory, the local copy will be used in preference. This behaviour is surprising but unfortunately also longstanding and relied upon by packages in the wild, so the best we can do is to document it. Fixes #41059 Change-Id: If6d2818294b2bd94ea0fe5fd6ce77e54b3e167a6 Reviewed-on: https://go-review.googlesource.com/c/go/+/251758 Reviewed-by: Ian Lance Taylor --- misc/cgo/test/test.go | 1 + src/cmd/cgo/doc.go | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/misc/cgo/test/test.go b/misc/cgo/test/test.go index 35bc3a1447..05fa52b381 100644 --- a/misc/cgo/test/test.go +++ b/misc/cgo/test/test.go @@ -319,6 +319,7 @@ typedef enum { // issue 4339 // We've historically permitted #include <>, so test it here. Issue 29333. +// Also see issue 41059. #include // issue 4417 diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go index ca18c45d9d..b3f371b08c 100644 --- a/src/cmd/cgo/doc.go +++ b/src/cmd/cgo/doc.go @@ -112,6 +112,13 @@ The default C and C++ compilers may be changed by the CC and CXX environment variables, respectively; those environment variables may include command line options. +The cgo tool will always invoke the C compiler with the source file's +directory in the include path; i.e. -I${SRCDIR} is always implied. This +means that if a header file foo/bar.h exists both in the source +directory and also in the system include directory (or some other place +specified by a -I flag), then "#include " will always find the +local version in preference to any other version. + The cgo tool is enabled by default for native builds on systems where it is expected to work. It is disabled by default when cross-compiling. You can control this by setting the CGO_ENABLED -- cgit v1.2.3-54-g00ecf From b246c0e12fd41caf45a0f81eaa4f8fe249fbbc01 Mon Sep 17 00:00:00 2001 From: chainhelen Date: Fri, 21 Aug 2020 16:44:52 +0000 Subject: runtime: fix panic if newstack at runtime.acquireLockRank Process may crash becaues acquireLockRank and releaseLockRank may be called in nosplit context. With optimizations and inlining disabled, these functions won't get inlined or have their morestack calls eliminated. Nosplit is not strictly required for lockWithRank, unlockWithRank and lockWithRankMayAcquire, just keep consistency with lockrank_on.go here. Fixes #40843 Change-Id: I5824119f98a1da66d767cdb9a60dffe768f13c81 GitHub-Last-Rev: 38fd3ccf6ea03b670c7561c060ccdbccc42fff40 GitHub-Pull-Request: golang/go#40844 Reviewed-on: https://go-review.googlesource.com/c/go/+/248878 Reviewed-by: Dan Scales Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot --- src/runtime/lockrank_off.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/runtime/lockrank_off.go b/src/runtime/lockrank_off.go index 425ca8dd93..32378a9627 100644 --- a/src/runtime/lockrank_off.go +++ b/src/runtime/lockrank_off.go @@ -18,19 +18,29 @@ func getLockRank(l *mutex) lockRank { return 0 } +// The following functions may be called in nosplit context. +// Nosplit is not strictly required for lockWithRank, unlockWithRank +// and lockWithRankMayAcquire, but these nosplit annotations must +// be kept consistent with the equivalent functions in lockrank_on.go. + +//go:nosplit func lockWithRank(l *mutex, rank lockRank) { lock2(l) } +//go:nosplit func acquireLockRank(rank lockRank) { } +//go:nosplit func unlockWithRank(l *mutex) { unlock2(l) } +//go:nosplit func releaseLockRank(rank lockRank) { } +//go:nosplit func lockWithRankMayAcquire(l *mutex, rank lockRank) { } -- cgit v1.2.3-54-g00ecf From 7432bee7b372efbbd09b16c4e3176b69fbb6878a Mon Sep 17 00:00:00 2001 From: Tzu-Chiao Yeh Date: Wed, 2 Sep 2020 10:53:39 +0800 Subject: net/http/fcgi: fix race in child.serve connection read Guards the connection read with a mutex, because typeStdin asynchronously and concurrently writes to the underlying conn. Fixes #41167 Change-Id: Ia2610f4fde0bd4b108c54164095ea293980b0301 Reviewed-on: https://go-review.googlesource.com/c/go/+/252417 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/net/http/fcgi/child.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/net/http/fcgi/child.go b/src/net/http/fcgi/child.go index 30a6b2ce2d..0e91042543 100644 --- a/src/net/http/fcgi/child.go +++ b/src/net/http/fcgi/child.go @@ -155,9 +155,12 @@ func (c *child) serve() { defer c.cleanUp() var rec record for { + c.conn.mutex.Lock() if err := rec.read(c.conn.rwc); err != nil { + c.conn.mutex.Unlock() return } + c.conn.mutex.Unlock() if err := c.handleRecord(&rec); err != nil { return } -- cgit v1.2.3-54-g00ecf From e6583dc95375c4e266bffab6f8888e8e557b6355 Mon Sep 17 00:00:00 2001 From: Heisenberg Date: Mon, 3 Aug 2020 15:33:47 +0800 Subject: runtime: add file copyright header declaration Some files have no copyright notice. The copyright time is the earliest modification record of the file. Change-Id: I5698bae16b6b73543e074415877a03348f792951 Reviewed-on: https://go-review.googlesource.com/c/go/+/246378 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/runtime/defs_linux_arm.go | 4 ++++ src/runtime/defs_linux_mips64x.go | 4 ++++ src/runtime/defs_openbsd_arm64.go | 4 ++++ src/runtime/defs_plan9_386.go | 4 ++++ src/runtime/defs_plan9_amd64.go | 4 ++++ src/runtime/rt0_linux_ppc64.s | 4 ++++ src/runtime/rt0_linux_ppc64le.s | 4 ++++ src/runtime/trace/annotation.go | 4 ++++ src/runtime/trace/annotation_test.go | 4 ++++ 9 files changed, 36 insertions(+) diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go index ea29fd9d98..5bc0916f8b 100644 --- a/src/runtime/defs_linux_arm.go +++ b/src/runtime/defs_linux_arm.go @@ -1,3 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package runtime // Constants diff --git a/src/runtime/defs_linux_mips64x.go b/src/runtime/defs_linux_mips64x.go index 0fb53d5737..1fb423b198 100644 --- a/src/runtime/defs_linux_mips64x.go +++ b/src/runtime/defs_linux_mips64x.go @@ -1,3 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // +build mips64 mips64le // +build linux diff --git a/src/runtime/defs_openbsd_arm64.go b/src/runtime/defs_openbsd_arm64.go index 8b8d5cddf2..628f4bc5a5 100644 --- a/src/runtime/defs_openbsd_arm64.go +++ b/src/runtime/defs_openbsd_arm64.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package runtime import "unsafe" diff --git a/src/runtime/defs_plan9_386.go b/src/runtime/defs_plan9_386.go index 220169d280..49129b3c3f 100644 --- a/src/runtime/defs_plan9_386.go +++ b/src/runtime/defs_plan9_386.go @@ -1,3 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package runtime const _PAGESIZE = 0x1000 diff --git a/src/runtime/defs_plan9_amd64.go b/src/runtime/defs_plan9_amd64.go index 29a2643c3a..0099563034 100644 --- a/src/runtime/defs_plan9_amd64.go +++ b/src/runtime/defs_plan9_amd64.go @@ -1,3 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package runtime const _PAGESIZE = 0x1000 diff --git a/src/runtime/rt0_linux_ppc64.s b/src/runtime/rt0_linux_ppc64.s index 1265b15853..897d61052a 100644 --- a/src/runtime/rt0_linux_ppc64.s +++ b/src/runtime/rt0_linux_ppc64.s @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + #include "textflag.h" // actually a function descriptor for _main<>(SB) diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s index 54ea9d58f7..4f7c6e6c99 100644 --- a/src/runtime/rt0_linux_ppc64le.s +++ b/src/runtime/rt0_linux_ppc64le.s @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + #include "go_asm.h" #include "textflag.h" diff --git a/src/runtime/trace/annotation.go b/src/runtime/trace/annotation.go index 82cb232dba..6e18bfb755 100644 --- a/src/runtime/trace/annotation.go +++ b/src/runtime/trace/annotation.go @@ -1,3 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package trace import ( diff --git a/src/runtime/trace/annotation_test.go b/src/runtime/trace/annotation_test.go index 71abbfcfa6..31fccef206 100644 --- a/src/runtime/trace/annotation_test.go +++ b/src/runtime/trace/annotation_test.go @@ -1,3 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package trace_test import ( -- cgit v1.2.3-54-g00ecf From 51c0bdc6d15dcd7f753c25896039ab41ac787ebb Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 18 Aug 2020 22:47:12 -0400 Subject: testing: flush test summaries to stdout atomically when streaming output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While debugging #40771, I realized that the chatty printer should only ever print to a single io.Writer (normally os.Stdout). The other Writer implementations in the chain write to local buffers, but if we wrote a test's output to a local buffer, then we did *not* write it to stdout and we should not store it as the most recently logged test. Because the chatty printer should only ever print to one place, it shouldn't receive an io.Writer as an argument — rather, it shouldn't be used at all for destinations other than the main output stream. On the other hand, when we flush the output buffer to stdout in the top-level flushToParent call, it is important that we not allow some other test's output to intrude between the test summary header and the remainder of the test's output. cmd/test2json doesn't know how to parse such an intrusion, and it's confusing to humans too. No test because I couldn't reproduce the user-reported error without modifying the testing package. (This behavior seems to be very sensitive to output size and/or goroutine scheduling.) Fixes #40771 Updates #38458 Change-Id: Ic19bf1d535672b096ba1c8583a3b74aab6d6d766 Reviewed-on: https://go-review.googlesource.com/c/go/+/249026 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Ian Lance Taylor --- src/testing/benchmark.go | 18 ++++--- src/testing/sub_test.go | 19 +++++-- src/testing/testing.go | 138 ++++++++++++++++++++++------------------------- 3 files changed, 88 insertions(+), 87 deletions(-) diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go index 52766005bf..e9687bf26d 100644 --- a/src/testing/benchmark.go +++ b/src/testing/benchmark.go @@ -242,7 +242,7 @@ func (b *B) run1() bool { if b.skipped { tag = "SKIP" } - if b.chatty && (len(b.output) > 0 || b.finished) { + if b.chatty != nil && (len(b.output) > 0 || b.finished) { b.trimOutput() fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output) } @@ -523,10 +523,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e } main := &B{ common: common{ - name: "Main", - w: os.Stdout, - chatty: *chatty, - bench: true, + name: "Main", + w: os.Stdout, + bench: true, }, importPath: importPath, benchFunc: func(b *B) { @@ -537,6 +536,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e benchTime: benchTime, context: ctx, } + if Verbose() { + main.chatty = newChattyPrinter(main.w) + } main.runN(1) return !main.failed } @@ -549,7 +551,7 @@ func (ctx *benchContext) processBench(b *B) { benchName := benchmarkName(b.name, procs) // If it's chatty, we've already printed this information. - if !b.chatty { + if b.chatty == nil { fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName) } // Recompute the running time for all but the first iteration. @@ -576,7 +578,7 @@ func (ctx *benchContext) processBench(b *B) { continue } results := r.String() - if b.chatty { + if b.chatty != nil { fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName) } if *benchmarkMemory || b.showAllocResult { @@ -639,7 +641,7 @@ func (b *B) Run(name string, f func(b *B)) bool { atomic.StoreInt32(&sub.hasSub, 1) } - if b.chatty { + if b.chatty != nil { labelsOnce.Do(func() { fmt.Printf("goos: %s\n", runtime.GOOS) fmt.Printf("goarch: %s\n", runtime.GOARCH) diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go index 51fc0ccc39..5b226f85ad 100644 --- a/src/testing/sub_test.go +++ b/src/testing/sub_test.go @@ -483,10 +483,12 @@ func TestTRun(t *T) { signal: make(chan bool), name: "Test", w: buf, - chatty: tc.chatty, }, context: ctx, } + if tc.chatty { + root.chatty = newChattyPrinter(root.w) + } ok := root.Run(tc.desc, tc.f) ctx.release() @@ -665,11 +667,13 @@ func TestBRun(t *T) { signal: make(chan bool), name: "root", w: buf, - chatty: tc.chatty, }, benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure. benchTime: benchTimeFlag{d: 1 * time.Microsecond}, } + if tc.chatty { + root.chatty = newChattyPrinter(root.w) + } root.runN(1) if ok != !tc.failed { t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed) @@ -741,9 +745,13 @@ func TestParallelSub(t *T) { } } -type funcWriter func([]byte) (int, error) +type funcWriter struct { + write func([]byte) (int, error) +} -func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) } +func (fw *funcWriter) Write(b []byte) (int, error) { + return fw.write(b) +} func TestRacyOutput(t *T) { var runs int32 // The number of running Writes @@ -761,9 +769,10 @@ func TestRacyOutput(t *T) { var wg sync.WaitGroup root := &T{ - common: common{w: funcWriter(raceDetector), chatty: true}, + common: common{w: &funcWriter{raceDetector}}, context: newTestContext(1, newMatcher(regexp.MatchString, "", "")), } + root.chatty = newChattyPrinter(root.w) root.Run("", func(t *T) { for i := 0; i < 100; i++ { wg.Add(1) diff --git a/src/testing/testing.go b/src/testing/testing.go index f4f0060523..a64206f349 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -327,7 +327,6 @@ var ( cpuListStr *string parallel *int testlog *string - printer *testPrinter haveExamples bool // are there examples? @@ -337,55 +336,45 @@ var ( numFailed uint32 // number of test failures ) -type testPrinter struct { - chatty bool - +type chattyPrinter struct { + w io.Writer lastNameMu sync.Mutex // guards lastName lastName string // last printed test name in chatty mode } -func newTestPrinter(chatty bool) *testPrinter { - return &testPrinter{ - chatty: chatty, - } +func newChattyPrinter(w io.Writer) *chattyPrinter { + return &chattyPrinter{w: w} } -func (p *testPrinter) Print(testName, out string) { - p.Fprint(os.Stdout, testName, out) +// Updatef prints a message about the status of the named test to w. +// +// The formatted message must include the test name itself. +func (p *chattyPrinter) Updatef(testName, format string, args ...interface{}) { + p.lastNameMu.Lock() + defer p.lastNameMu.Unlock() + + // Since the message already implies an association with a specific new test, + // we don't need to check what the old test name was or log an extra CONT line + // for it. (We're updating it anyway, and the current message already includes + // the test name.) + p.lastName = testName + fmt.Fprintf(p.w, format, args...) } -func (p *testPrinter) Fprint(w io.Writer, testName, out string) { +// Printf prints a message, generated by the named test, that does not +// necessarily mention that tests's name itself. +func (p *chattyPrinter) Printf(testName, format string, args ...interface{}) { p.lastNameMu.Lock() defer p.lastNameMu.Unlock() - if !p.chatty || - strings.HasPrefix(out, "--- PASS: ") || - strings.HasPrefix(out, "--- FAIL: ") || - strings.HasPrefix(out, "--- SKIP: ") || - strings.HasPrefix(out, "=== RUN ") || - strings.HasPrefix(out, "=== CONT ") || - strings.HasPrefix(out, "=== PAUSE ") { - // If we're buffering test output (!p.chatty), we don't really care which - // test is emitting which line so long as they are serialized. - // - // If the message already implies an association with a specific new test, - // we don't need to check what the old test name was or log an extra CONT - // line for it. (We're updating it anyway, and the current message already - // includes the test name.) - p.lastName = testName - fmt.Fprint(w, out) - return - } - if p.lastName == "" { p.lastName = testName } else if p.lastName != testName { - // Always printed as-is, with 0 decoration or indentation. So, we skip - // printing to w. - fmt.Printf("=== CONT %s\n", testName) + fmt.Fprintf(p.w, "=== CONT %s\n", testName) p.lastName = testName } - fmt.Fprint(w, out) + + fmt.Fprintf(p.w, format, args...) } // The maximum number of stack frames to go through when skipping helper functions for @@ -407,12 +396,12 @@ type common struct { cleanupName string // Name of the cleanup function. cleanupPc []uintptr // The stack trace at the point where Cleanup was called. - chatty bool // A copy of the chatty flag. - bench bool // Whether the current test is a benchmark. - finished bool // Test function has completed. - hasSub int32 // Written atomically. - raceErrors int // Number of races detected during test. - runner string // Function name of tRunner running the test. + chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set. + bench bool // Whether the current test is a benchmark. + finished bool // Test function has completed. + hasSub int32 // Written atomically. + raceErrors int // Number of races detected during test. + runner string // Function name of tRunner running the test. parent *common level int // Nesting depth of test or benchmark. @@ -574,12 +563,31 @@ func (c *common) flushToParent(testName, format string, args ...interface{}) { p.mu.Lock() defer p.mu.Unlock() - printer.Fprint(p.w, testName, fmt.Sprintf(format, args...)) - c.mu.Lock() defer c.mu.Unlock() - io.Copy(p.w, bytes.NewReader(c.output)) - c.output = c.output[:0] + + if len(c.output) > 0 { + format += "%s" + args = append(args[:len(args):len(args)], c.output) + c.output = c.output[:0] // but why? + } + + if c.chatty != nil && p.w == c.chatty.w { + // We're flushing to the actual output, so track that this output is + // associated with a specific test (and, specifically, that the next output + // is *not* associated with that test). + // + // Moreover, if c.output is non-empty it is important that this write be + // atomic with respect to the output of other tests, so that we don't end up + // with confusing '=== CONT' lines in the middle of our '--- PASS' block. + // Neither humans nor cmd/test2json can parse those easily. + // (See https://golang.org/issue/40771.) + c.chatty.Updatef(testName, format, args...) + } else { + // We're flushing to the output buffer of the parent test, which will + // itself follow a test-name header when it is finally flushed to stdout. + fmt.Fprintf(p.w, format, args...) + } } type indenter struct { @@ -748,13 +756,13 @@ func (c *common) logDepth(s string, depth int) { } panic("Log in goroutine after " + c.name + " has completed") } else { - if c.chatty { + if c.chatty != nil { if c.bench { // Benchmarks don't print === CONT, so we should skip the test // printer and just print straight to stdout. fmt.Print(c.decorate(s, depth+1)) } else { - printer.Print(c.name, c.decorate(s, depth+1)) + c.chatty.Printf(c.name, "%s", c.decorate(s, depth+1)) } return @@ -1003,34 +1011,22 @@ func (t *T) Parallel() { t.parent.sub = append(t.parent.sub, t) t.raceErrors += race.Errors() - if t.chatty { - // Print directly to root's io.Writer so there is no delay. - root := t.parent - for ; root.parent != nil; root = root.parent { - } - root.mu.Lock() + if t.chatty != nil { // Unfortunately, even though PAUSE indicates that the named test is *no // longer* running, cmd/test2json interprets it as changing the active test // for the purpose of log parsing. We could fix cmd/test2json, but that // won't fix existing deployments of third-party tools that already shell // out to older builds of cmd/test2json — so merely fixing cmd/test2json // isn't enough for now. - printer.Fprint(root.w, t.name, fmt.Sprintf("=== PAUSE %s\n", t.name)) - root.mu.Unlock() + t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name) } t.signal <- true // Release calling test. <-t.parent.barrier // Wait for the parent test to complete. t.context.waitParallel() - if t.chatty { - // Print directly to root's io.Writer so there is no delay. - root := t.parent - for ; root.parent != nil; root = root.parent { - } - root.mu.Lock() - printer.Fprint(root.w, t.name, fmt.Sprintf("=== CONT %s\n", t.name)) - root.mu.Unlock() + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== CONT %s\n", t.name) } t.start = time.Now() @@ -1181,14 +1177,8 @@ func (t *T) Run(name string, f func(t *T)) bool { } t.w = indenter{&t.common} - if t.chatty { - // Print directly to root's io.Writer so there is no delay. - root := t.parent - for ; root.parent != nil; root = root.parent { - } - root.mu.Lock() - printer.Fprint(root.w, t.name, fmt.Sprintf("=== RUN %s\n", t.name)) - root.mu.Unlock() + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== RUN %s\n", t.name) } // Instead of reducing the running count of this test before calling the // tRunner and increasing it afterwards, we rely on tRunner keeping the @@ -1355,8 +1345,6 @@ func (m *M) Run() (code int) { flag.Parse() } - printer = newTestPrinter(Verbose()) - if *parallel < 1 { fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer") flag.Usage() @@ -1401,7 +1389,7 @@ func (t *T) report() { format := "--- %s: %s (%s)\n" if t.Failed() { t.flushToParent(t.name, format, "FAIL", t.name, dstr) - } else if t.chatty { + } else if t.chatty != nil { if t.Skipped() { t.flushToParent(t.name, format, "SKIP", t.name, dstr) } else { @@ -1462,10 +1450,12 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT signal: make(chan bool), barrier: make(chan bool), w: os.Stdout, - chatty: *chatty, }, context: ctx, } + if Verbose() { + t.chatty = newChattyPrinter(t.w) + } tRunner(t, func(t *T) { for _, test := range tests { t.Run(test.Name, test.F) -- cgit v1.2.3-54-g00ecf From be9ed03f1aa5f348aa836c4ffe1904d8e37a629a Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 2 Sep 2020 22:10:15 +0700 Subject: cmd/compile/internal/gc: remove unparen CL 197120 removed the last use of it. Change-Id: I5fe4f57a47acc712208d831e72cd79205a534c28 Reviewed-on: https://go-review.googlesource.com/c/go/+/252697 Reviewed-by: Matthew Dempsky Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/noder.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 590c1a16de..5dce533e4b 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -1641,10 +1641,3 @@ func mkname(sym *types.Sym) *Node { } return n } - -func unparen(x *Node) *Node { - for x.Op == OPAREN { - x = x.Left - } - return x -} -- cgit v1.2.3-54-g00ecf From ef20f76b8bc4e082d5f81fd818890d707751475b Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Wed, 2 Sep 2020 01:08:02 -0700 Subject: net/http: reject negative suffix-length Range:bytes=--N with 416 status code Fixes the file server to reject requests of the form: "Range": "bytes=--N" where "-N" is a negative suffix-length as designated by the grammar in RFC 7233 Section 2.1, "Byte-Ranges", which specifies that suffix-length MUST be of the form 1*DIGIT aka a non-negative digit. Thus requests such as: "Range": "bytes=--2" will be rejected with a "416 Range Not Satisfiable" response. Fixes #40940 Change-Id: I3e89f8326c14af30d8bdb126998a50e02ba002d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/252497 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- doc/go1.16.html | 6 +++++ src/net/http/fs.go | 10 +++++++-- src/net/http/fs_test.go | 58 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 2 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 7738cbdada..8dd806e9f2 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -167,3 +167,9 @@ Do not send CLs removing the interior tags from such phrases. handler serves a 404 instead of its previous behavior of invoking the underlying handler with a mismatched Path/RawPath pair.

+ +

+ The net/http package now rejects HTTP range requests + of the form "Range": "bytes=--N" where "-N" is a negative suffix length, for + example "Range": "bytes=--2". It now replies with a 416 "Range Not Satisfiable" response. +

diff --git a/src/net/http/fs.go b/src/net/http/fs.go index 922706ada1..d718fffba0 100644 --- a/src/net/http/fs.go +++ b/src/net/http/fs.go @@ -771,9 +771,15 @@ func parseRange(s string, size int64) ([]httpRange, error) { var r httpRange if start == "" { // If no start is specified, end specifies the - // range start relative to the end of the file. + // range start relative to the end of the file, + // and we are dealing with + // which has to be a non-negative integer as per + // RFC 7233 Section 2.1 "Byte-Ranges". + if end == "" || end[0] == '-' { + return nil, errors.New("invalid range") + } i, err := strconv.ParseInt(end, 10, 64) - if err != nil { + if i < 0 || err != nil { return nil, errors.New("invalid range") } if i > size { diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go index 245d9ce65c..4ac73b728f 100644 --- a/src/net/http/fs_test.go +++ b/src/net/http/fs_test.go @@ -1316,3 +1316,61 @@ func Test_scanETag(t *testing.T) { } } } + +// Issue 40940: Ensure that we only accept non-negative suffix-lengths +// in "Range": "bytes=-N", and should reject "bytes=--2". +func TestServeFileRejectsInvalidSuffixLengths_h1(t *testing.T) { + testServeFileRejectsInvalidSuffixLengths(t, h1Mode) +} +func TestServeFileRejectsInvalidSuffixLengths_h2(t *testing.T) { + testServeFileRejectsInvalidSuffixLengths(t, h2Mode) +} + +func testServeFileRejectsInvalidSuffixLengths(t *testing.T, h2 bool) { + defer afterTest(t) + cst := httptest.NewUnstartedServer(FileServer(Dir("testdata"))) + cst.EnableHTTP2 = h2 + cst.StartTLS() + defer cst.Close() + + tests := []struct { + r string + wantCode int + wantBody string + }{ + {"bytes=--6", 416, "invalid range\n"}, + {"bytes=--0", 416, "invalid range\n"}, + {"bytes=---0", 416, "invalid range\n"}, + {"bytes=-6", 206, "hello\n"}, + {"bytes=6-", 206, "html says hello\n"}, + {"bytes=-6-", 416, "invalid range\n"}, + {"bytes=-0", 206, ""}, + {"bytes=", 200, "index.html says hello\n"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.r, func(t *testing.T) { + req, err := NewRequest("GET", cst.URL+"/index.html", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Range", tt.r) + res, err := cst.Client().Do(req) + if err != nil { + t.Fatal(err) + } + if g, w := res.StatusCode, tt.wantCode; g != w { + t.Errorf("StatusCode mismatch: got %d want %d", g, w) + } + slurp, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + t.Fatal(err) + } + if g, w := string(slurp), tt.wantBody; g != w { + t.Fatalf("Content mismatch:\nGot: %q\nWant: %q", g, w) + } + }) + } +} -- cgit v1.2.3-54-g00ecf From 0e48c674f5f8e906c03d1ea0055eb74959d149cc Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Thu, 3 Sep 2020 08:27:34 +0930 Subject: cmd/go: add -Wl,-Bsymbolic-functions to cgo flags whitelist Closes #41199 Change-Id: Iab69358e8c39e6d2b2797c7ce750df63aa7e96b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/252698 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/go/internal/work/security.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go index 3ee68ac1b4..d2a2697f0f 100644 --- a/src/cmd/go/internal/work/security.go +++ b/src/cmd/go/internal/work/security.go @@ -177,6 +177,7 @@ var validLinkerFlags = []*lazyregexp.Regexp{ re(`-Wl,-Bdynamic`), re(`-Wl,-berok`), re(`-Wl,-Bstatic`), + re(`-Wl,-Bsymbolic-functions`), re(`-WL,-O([^@,\-][^,]*)?`), re(`-Wl,-d[ny]`), re(`-Wl,--disable-new-dtags`), -- cgit v1.2.3-54-g00ecf From ace37d35f18675ac935dc2c6b534dda32ebe84e5 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 2 Sep 2020 12:45:07 +0700 Subject: cmd/compile: do not push functions literal node to Func.Dcl They are skipped while processing Func.Dcl anyway. This CL does not pass toolstash-check, because it reduces the length of Func.Dcl length, while that length is used to generate autotmp variables name. Change-Id: I408183e62ce6c34e5f04c89814ebb9570957e37b Reviewed-on: https://go-review.googlesource.com/c/go/+/252418 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 4f6fddd089..6dc6f4db70 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -90,7 +90,7 @@ func declare(n *Node, ctxt Class) { lineno = n.Pos Fatalf("automatic outside function") } - if Curfn != nil { + if Curfn != nil && ctxt != PFUNC { Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) } if n.Op == OTYPE { -- cgit v1.2.3-54-g00ecf From a4171d85d7328ef1ff317d0838fef3b6e623bbc3 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 19 Aug 2020 16:05:02 +0700 Subject: cmd/compile: do not declare func nodes The primary responsibility of declare() to associate a symbol (Sym) with a declaration (Node), so "oldname" will work. Function literals are anonymous, so their symbols does not need to be declared. Passes toolstash-check. Change-Id: I739b1054e3953e85fbd74a99148b9cfd7e5a57eb Reviewed-on: https://go-review.googlesource.com/c/go/+/249078 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 3 +-- src/cmd/compile/internal/gc/closure.go | 14 +------------- src/cmd/compile/internal/gc/dcl.go | 14 +++++++++----- src/cmd/compile/internal/gc/init.go | 1 - src/cmd/compile/internal/gc/subr.go | 4 +--- src/cmd/compile/internal/gc/walk.go | 3 +-- 6 files changed, 13 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 2b63700569..c9d71ea00b 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -429,8 +429,7 @@ func hashfor(t *types.Type) *Node { } n := newname(sym) - n.SetClass(PFUNC) - n.Sym.SetFunc(true) + setNodeNameFunc(n) n.Type = functype(nil, []*Node{ anonfield(types.NewPtr(t)), anonfield(types.Types[TUINTPTR]), diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 23e48939b4..250be38e5b 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -107,18 +107,7 @@ func typecheckclosure(clo *Node, top int) { } xfunc.Func.Nname.Sym = closurename(Curfn) - disableExport(xfunc.Func.Nname.Sym) - if xfunc.Func.Nname.Sym.Def != nil { - // The only case we can reach here is when the outer function was redeclared. - // In that case, don't bother to redeclare the closure. Otherwise, we will get - // a spurious error message, see #17758. While we are here, double check that - // we already reported other error. - if nsavederrors+nerrors == 0 { - Fatalf("unexpected symbol collision %v", xfunc.Func.Nname.Sym) - } - } else { - declare(xfunc.Func.Nname, PFUNC) - } + setNodeNameFunc(xfunc.Func.Nname) xfunc = typecheck(xfunc, ctxStmt) // Type check the body now, but only if we're inside a function. @@ -473,7 +462,6 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node { tfn.List.Set(structargs(t0.Params(), true)) tfn.Rlist.Set(structargs(t0.Results(), false)) - disableExport(sym) xfunc := dclfunc(sym, tfn) xfunc.Func.SetDupok(true) xfunc.Func.SetNeedctxt(true) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 6dc6f4db70..69eb13f607 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -985,10 +985,14 @@ func makefuncsym(s *types.Sym) { } } -// disableExport prevents sym from being included in package export -// data. To be effectual, it must be called before declare. -func disableExport(sym *types.Sym) { - sym.SetOnExportList(true) +// setNodeNameFunc marks a node as a function. +func setNodeNameFunc(n *Node) { + if n.Op != ONAME || n.Class() != Pxxx { + Fatalf("expected ONAME/Pxxx node, got %v", n) + } + + n.SetClass(PFUNC) + n.Sym.SetFunc(true) } func dclfunc(sym *types.Sym, tfn *Node) *Node { @@ -1000,7 +1004,7 @@ func dclfunc(sym *types.Sym, tfn *Node) *Node { fn.Func.Nname = newfuncnamel(lineno, sym) fn.Func.Nname.Name.Defn = fn fn.Func.Nname.Name.Param.Ntype = tfn - declare(fn.Func.Nname, PFUNC) + setNodeNameFunc(fn.Func.Nname) funchdr(fn) fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType) return fn diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 03e475e85a..94cbcf9846 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -45,7 +45,6 @@ func fninit(n []*Node) { if len(nf) > 0 { lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt initializers := lookup("init") - disableExport(initializers) fn := dclfunc(initializers, nod(OTFUNC, nil, nil)) for _, dcl := range dummyInitFn.Func.Dcl { dcl.Name.Curfn = fn diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 9c6cd24eb7..8fa3fca50f 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1543,7 +1543,6 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { tfn.List.Set(structargs(method.Type.Params(), true)) tfn.Rlist.Set(structargs(method.Type.Results(), false)) - disableExport(newnam) fn := dclfunc(newnam, tfn) fn.Func.SetDupok(true) @@ -1631,8 +1630,7 @@ func hashmem(t *types.Type) *Node { sym := Runtimepkg.Lookup("memhash") n := newname(sym) - n.SetClass(PFUNC) - n.Sym.SetFunc(true) + setNodeNameFunc(n) n.Type = functype(nil, []*Node{ anonfield(types.NewPtr(t)), anonfield(types.Types[TUINTPTR]), diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 90ecb50d6a..0158af8700 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3166,8 +3166,7 @@ func eqfor(t *types.Type) (n *Node, needsize bool) { case ASPECIAL: sym := typesymprefix(".eq", t) n := newname(sym) - n.SetClass(PFUNC) - n.Sym.SetFunc(true) + setNodeNameFunc(n) n.Type = functype(nil, []*Node{ anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)), -- cgit v1.2.3-54-g00ecf From 6c76edeb1b67a5751dff215aaa712572d87a4ce8 Mon Sep 17 00:00:00 2001 From: Nigel Tao Date: Wed, 2 Sep 2020 21:49:30 +1000 Subject: image/gif: have BenchmarkEncodeRealisticRGBA convert to RGBA Change-Id: I98f5d987b92a29dcff06ae23b92f293cc7d6c02f Reviewed-on: https://go-review.googlesource.com/c/go/+/252597 Reviewed-by: David Symonds --- src/image/gif/writer_test.go | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/src/image/gif/writer_test.go b/src/image/gif/writer_test.go index 5d1b2c439e..1e622b3674 100644 --- a/src/image/gif/writer_test.go +++ b/src/image/gif/writer_test.go @@ -658,27 +658,27 @@ func TestEncodeWrappedImage(t *testing.T) { } func BenchmarkEncodeRandomPaletted(b *testing.B) { - img := image.NewPaletted(image.Rect(0, 0, 640, 480), palette.Plan9) + paletted := image.NewPaletted(image.Rect(0, 0, 640, 480), palette.Plan9) rnd := rand.New(rand.NewSource(123)) - for i := range img.Pix { - img.Pix[i] = uint8(rnd.Intn(256)) + for i := range paletted.Pix { + paletted.Pix[i] = uint8(rnd.Intn(256)) } b.SetBytes(640 * 480 * 1) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - Encode(ioutil.Discard, img, nil) + Encode(ioutil.Discard, paletted, nil) } } func BenchmarkEncodeRandomRGBA(b *testing.B) { - img := image.NewRGBA(image.Rect(0, 0, 640, 480)) - bo := img.Bounds() + rgba := image.NewRGBA(image.Rect(0, 0, 640, 480)) + bo := rgba.Bounds() rnd := rand.New(rand.NewSource(123)) for y := bo.Min.Y; y < bo.Max.Y; y++ { for x := bo.Min.X; x < bo.Max.X; x++ { - img.SetRGBA(x, y, color.RGBA{ + rgba.SetRGBA(x, y, color.RGBA{ uint8(rnd.Intn(256)), uint8(rnd.Intn(256)), uint8(rnd.Intn(256)), @@ -691,24 +691,24 @@ func BenchmarkEncodeRandomRGBA(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - Encode(ioutil.Discard, img, nil) + Encode(ioutil.Discard, rgba, nil) } } func BenchmarkEncodeRealisticPaletted(b *testing.B) { - rgba, err := readImg("../testdata/video-001.png") + img, err := readImg("../testdata/video-001.png") if err != nil { b.Fatalf("readImg: %v", err) } - bo := rgba.Bounds() - img := image.NewPaletted(bo, palette.Plan9) - draw.Draw(img, bo, rgba, bo.Min, draw.Src) + bo := img.Bounds() + paletted := image.NewPaletted(bo, palette.Plan9) + draw.Draw(paletted, bo, img, bo.Min, draw.Src) b.SetBytes(int64(bo.Dx() * bo.Dy() * 1)) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - Encode(ioutil.Discard, img, nil) + Encode(ioutil.Discard, paletted, nil) } } @@ -718,11 +718,17 @@ func BenchmarkEncodeRealisticRGBA(b *testing.B) { b.Fatalf("readImg: %v", err) } bo := img.Bounds() + // Converting img to rgba is redundant for video-001.png, which is already + // in the RGBA format, but for those copy/pasting this benchmark (but + // changing the source image), the conversion ensures that we're still + // benchmarking encoding an RGBA image. + rgba := image.NewRGBA(bo) + draw.Draw(rgba, bo, img, bo.Min, draw.Src) b.SetBytes(int64(bo.Dx() * bo.Dy() * 4)) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - Encode(ioutil.Discard, img, nil) + Encode(ioutil.Discard, rgba, nil) } } -- cgit v1.2.3-54-g00ecf From 2b8b06ebbf0198d3c7a9b4d839bc05d9b13ecbe7 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Tue, 1 Sep 2020 11:16:33 -0400 Subject: cmd/internal/objabi: add regabi GOEXPERIMENT This is the "feature flag" for the register calling convention work (though since this work is expected to extend over a few releases, it's not version-prefixed). This will let us develop the register calling convention on the main branch while maintaining an easy toggle between the old and new ABIs. Updates #40724. Change-Id: I129c8d87d34e6fa0910b6fa43efb35b706021637 Reviewed-on: https://go-review.googlesource.com/c/go/+/252257 Reviewed-by: Cherry Zhang Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot --- src/cmd/internal/objabi/util.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index 6c5a9ba441..d2d6fdbda8 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -131,6 +131,11 @@ func init() { addexp(f) } } + + // regabi is only supported on amd64. + if GOARCH != "amd64" { + Regabi_enabled = 0 + } } // Note: must agree with runtime.framepointer_enabled. @@ -161,6 +166,7 @@ var ( Fieldtrack_enabled int Preemptibleloops_enabled int Staticlockranking_enabled int + Regabi_enabled int ) // Toolchain experiments. @@ -174,6 +180,7 @@ var exper = []struct { {"fieldtrack", &Fieldtrack_enabled}, {"preemptibleloops", &Preemptibleloops_enabled}, {"staticlockranking", &Staticlockranking_enabled}, + {"regabi", &Regabi_enabled}, } var defaultExpstring = Expstring() -- cgit v1.2.3-54-g00ecf From a538b59fd2428ba4d13f296d7483febf2fc05f97 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Tue, 1 Sep 2020 11:21:50 -0400 Subject: cmd/go: define an asm macro for GOEXPERIMENT=regabi This defines a macro for the regabi GOEXPERIMENT when assembling runtime assembly code. In general, assembly code will be shielded from the calling convention change, but there is a small amount of runtime assembly that is going to have to change. By defining a macro, we can easily make the small necessary changes. The other option is to use build tags, but that would require duplicating nontrivial amounts of unaffected code, leading to potential divergence issues. (And unlike Go code, assembly code can't depend on the compiler optimizing away branches on a feature constant.) We consider the macro preferable, especially since this is expected to be temporary as we transition to the new calling convention. Updates #40724. Change-Id: I73984065123968337ec10b47bb12c4a1cbc07dc5 Reviewed-on: https://go-review.googlesource.com/c/go/+/252258 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/go/internal/work/gc.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index f1d08e0268..6031897f88 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -259,6 +259,15 @@ func asmArgs(a *Action, p *load.Package) []interface{} { } } } + if p.ImportPath == "runtime" && objabi.Regabi_enabled != 0 { + // In order to make it easier to port runtime assembly + // to the register ABI, we introduce a macro + // indicating the experiment is enabled. + // + // TODO(austin): Remove this once we commit to the + // register ABI (#40724). + args = append(args, "-D=GOEXPERIMENT_REGABI=1") + } if cfg.Goarch == "mips" || cfg.Goarch == "mipsle" { // Define GOMIPS_value from cfg.GOMIPS. -- cgit v1.2.3-54-g00ecf From ae658cb19a265f3f4694cd4aec508b4565bda6aa Mon Sep 17 00:00:00 2001 From: fanzha02 Date: Thu, 27 Aug 2020 17:34:59 +0800 Subject: cmd/compile: store the comparison pseudo-ops of arm64 conditional instructions in AuxInt The current implementation stores the comparison pseudo-ops of arm64 conditional instructions (CSEL/CSEL0) in Aux, this patch modifies it and stores it in AuxInt, which can avoid the allocation. Change-Id: I0b69e51f63acd84c6878c6a59ccf6417501a8cfc Reviewed-on: https://go-review.googlesource.com/c/go/+/252517 Run-TryBot: fannie zhang TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/arm64/ssa.go | 2 +- src/cmd/compile/internal/ssa/check.go | 6 +- src/cmd/compile/internal/ssa/gen/ARM64.rules | 162 ++++++------- src/cmd/compile/internal/ssa/gen/ARM64Ops.go | 4 +- src/cmd/compile/internal/ssa/gen/rulegen.go | 7 +- src/cmd/compile/internal/ssa/rewrite.go | 15 +- src/cmd/compile/internal/ssa/rewriteARM64.go | 332 +++++++++++++-------------- src/cmd/compile/internal/ssa/value.go | 2 +- 8 files changed, 265 insertions(+), 265 deletions(-) diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index b6bb81a847..1d6ea6b9d8 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -816,7 +816,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg - p.From.Reg = condBits[v.Aux.(ssa.Op)] + p.From.Reg = condBits[ssa.Op(v.AuxInt)] p.Reg = v.Args[0].Reg() p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1}) p.To.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 98e1b79334..828f645b39 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -171,10 +171,10 @@ func checkFunc(f *Func) { canHaveAuxInt = true canHaveAux = true case auxCCop: - if _, ok := v.Aux.(Op); !ok { - f.Fatalf("bad type %T for CCop in %v", v.Aux, v) + if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" { + f.Fatalf("value %v has an AuxInt value that is a valid opcode", v) } - canHaveAux = true + canHaveAuxInt = true case auxS390XCCMask: if _, ok := v.Aux.(s390x.CCMask); !ok { f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index c29e7f7edf..311067e87a 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -132,65 +132,65 @@ // we compare to 64 to ensure Go semantics for large shifts // Rules about rotates with non-const shift are based on the following rules, // if the following rules change, please also modify the rules based on them. -(Lsh64x64 x y) => (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh64x32 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh64x16 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh64x8 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Lsh32x64 x y) => (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh32x32 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh32x16 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh32x8 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Lsh16x64 x y) => (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh16x32 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh16x16 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh16x8 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Lsh8x64 x y) => (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh8x32 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh8x16 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh8x8 x y) => (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Rsh64Ux64 x y) => (CSEL {OpARM64LessThanU} (SRL x y) (Const64 [0]) (CMPconst [64] y)) -(Rsh64Ux32 x y) => (CSEL {OpARM64LessThanU} (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh64Ux16 x y) => (CSEL {OpARM64LessThanU} (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh64Ux8 x y) => (CSEL {OpARM64LessThanU} (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Rsh32Ux64 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) -(Rsh32Ux32 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh32Ux16 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh32Ux8 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Rsh16Ux64 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) -(Rsh16Ux32 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh16Ux16 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh16Ux8 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Rsh8Ux64 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) -(Rsh8Ux32 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh8Ux16 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh8Ux8 x y) => (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - -(Rsh64x64 x y) => (SRA x (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) -(Rsh64x32 x y) => (SRA x (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh64x16 x y) => (SRA x (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh64x8 x y) => (SRA x (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) - -(Rsh32x64 x y) => (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) -(Rsh32x32 x y) => (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh32x16 x y) => (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh32x8 x y) => (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) - -(Rsh16x64 x y) => (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) -(Rsh16x32 x y) => (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh16x16 x y) => (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh16x8 x y) => (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) - -(Rsh8x64 x y) => (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) -(Rsh8x32 x y) => (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh8x16 x y) => (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh8x8 x y) => (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) +(Lsh64x64 x y) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh64x32 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh64x16 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh64x8 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Lsh32x64 x y) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh32x32 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh32x16 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh32x8 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Lsh16x64 x y) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh16x32 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh16x16 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh16x8 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Lsh8x64 x y) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh8x32 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh8x16 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh8x8 x y) => (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Rsh64Ux64 x y) => (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] y)) +(Rsh64Ux32 x y) => (CSEL [OpARM64LessThanU] (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh64Ux16 x y) => (CSEL [OpARM64LessThanU] (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh64Ux8 x y) => (CSEL [OpARM64LessThanU] (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Rsh32Ux64 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) +(Rsh32Ux32 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh32Ux16 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh32Ux8 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Rsh16Ux64 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) +(Rsh16Ux32 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh16Ux16 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh16Ux8 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Rsh8Ux64 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) +(Rsh8Ux32 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh8Ux16 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh8Ux8 x y) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + +(Rsh64x64 x y) => (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh64x32 x y) => (SRA x (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh64x16 x y) => (SRA x (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh64x8 x y) => (SRA x (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + +(Rsh32x64 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh32x32 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh32x16 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh32x8 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + +(Rsh16x64 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh16x32 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh16x16 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh16x8 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + +(Rsh8x64 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh8x32 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh8x16 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh8x8 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) // constants (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) @@ -315,8 +315,8 @@ (FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x)) // CSEL needs a flag-generating argument. Synthesize a CMPW if necessary. -(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL {boolval.Op} x y flagArg(boolval)) -(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL {OpARM64NotEqual} x y (CMPWconst [0] boolval)) +(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval)) +(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval)) (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr) (OffPtr [off] ptr) => (ADDconst [off] ptr) @@ -1324,8 +1324,8 @@ (XOR x (MVN y)) -> (EON x y) (OR x (MVN y)) -> (ORN x y) (MVN (XOR x y)) -> (EON x y) -(CSEL {cc} x (MOVDconst [0]) flag) -> (CSEL0 {cc} x flag) -(CSEL {cc} (MOVDconst [0]) y flag) -> (CSEL0 {arm64Negate(cc.(Op))} y flag) +(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag) +(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag) (SUB x (SUB y z)) -> (SUB (ADD x z) y) (SUB (SUB x y) z) -> (SUB x (ADD y z)) @@ -1481,8 +1481,8 @@ (GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no) // absorb InvertFlags into CSEL(0) -(CSEL {cc} x y (InvertFlags cmp)) => (CSEL {arm64Invert(cc)} x y cmp) -(CSEL0 {cc} x (InvertFlags cmp)) => (CSEL0 {arm64Invert(cc)} x cmp) +(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp) +(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp) // absorb flag constants into boolean values (Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())]) @@ -1517,20 +1517,20 @@ (MOVBUreg x) && x.Type.IsBoolean() => (MOVDreg x) // absorb flag constants into conditional instructions -(CSEL {cc} x _ flag) && ccARM64Eval(cc, flag) > 0 => x -(CSEL {cc} _ y flag) && ccARM64Eval(cc, flag) < 0 => y -(CSEL0 {cc} x flag) && ccARM64Eval(cc, flag) > 0 => x -(CSEL0 {cc} _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0]) +(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y +(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x +(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0]) // absorb flags back into boolean CSEL -(CSEL {cc} x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => - (CSEL {boolval.Op} x y flagArg(boolval)) -(CSEL {cc} x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil => - (CSEL {arm64Negate(boolval.Op)} x y flagArg(boolval)) -(CSEL0 {cc} x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => - (CSEL0 {boolval.Op} x flagArg(boolval)) -(CSEL0 {cc} x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil => - (CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval)) +(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => + (CSEL [boolval.Op] x y flagArg(boolval)) +(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil => + (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval)) +(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => + (CSEL0 [boolval.Op] x flagArg(boolval)) +(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil => + (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval)) // absorb shifts into ops (NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y) @@ -1691,11 +1691,11 @@ // "|" can also be "^" or "+". // As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y). ((ADD|OR|XOR) (SLL x (ANDconst [63] y)) - (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) + (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) && cc == OpARM64LessThanU => (ROR x (NEG y)) ((ADD|OR|XOR) (SRL x (ANDconst [63] y)) - (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) + (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) && cc == OpARM64LessThanU => (ROR x y) @@ -1705,11 +1705,11 @@ // "|" can also be "^" or "+". // As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y). ((ADD|OR|XOR) (SLL x (ANDconst [31] y)) - (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) + (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) && cc == OpARM64LessThanU => (RORW x (NEG y)) ((ADD|OR|XOR) (SRL (MOVWUreg x) (ANDconst [31] y)) - (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) + (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) && cc == OpARM64LessThanU => (RORW x y) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go index 2424e67e20..e9af261a6a 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go @@ -467,8 +467,8 @@ func init() { // conditional instructions; auxint is // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.) - {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // aux(flags) ? arg0 : arg1 - {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // aux(flags) ? arg0 : 0 + {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0 // function calls {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index e520503ab1..9e2e112cd7 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -1423,7 +1423,8 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxi func opHasAuxInt(op opData) bool { switch op.aux { - case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant": + case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", + "SymOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop": return true } return false @@ -1431,7 +1432,7 @@ func opHasAuxInt(op opData) bool { func opHasAux(op opData) bool { switch op.aux { - case "String", "Sym", "SymOff", "SymValAndOff", "Typ", "TypSize", "CCop", + case "String", "Sym", "SymOff", "SymValAndOff", "Typ", "TypSize", "S390XCCMask", "S390XRotateParams": return true } @@ -1784,8 +1785,6 @@ func (op opData) auxType() string { return "s390x.CCMask" case "S390XRotateParams": return "s390x.RotateParams" - case "CCop": - return "CCop" default: return "invalid" } diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 3df9ad24f9..09f94ef53e 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -651,6 +651,10 @@ func auxIntToFlagConstant(x int64) flagConstant { return flagConstant(x) } +func auxIntToOp(cc int64) Op { + return Op(cc) +} + func boolToAuxInt(b bool) int64 { if b { return 1 @@ -694,6 +698,10 @@ func flagConstantToAuxInt(x flagConstant) int64 { return int64(x) } +func opToAuxInt(o Op) int64 { + return int64(o) +} + func auxToString(i interface{}) string { return i.(string) } @@ -727,13 +735,6 @@ func s390xCCMaskToAux(c s390x.CCMask) interface{} { func s390xRotateParamsToAux(r s390x.RotateParams) interface{} { return r } -func cCopToAux(o Op) interface{} { - return o -} - -func auxToCCop(cc interface{}) Op { - return cc.(Op) -} // uaddOvf reports whether unsigned a+b would overflow. func uaddOvf(a, b int64) bool { diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 453578aa9a..0fb86b6bdd 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -1285,7 +1285,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } break } - // match: (ADD (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // match: (ADD (SLL x (ANDconst [63] y)) (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc == OpARM64LessThanU // result: (ROR x (NEG y)) for { @@ -1307,7 +1307,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { @@ -1355,7 +1355,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } break } - // match: (ADD (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // match: (ADD (SRL x (ANDconst [63] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc == OpARM64LessThanU // result: (ROR x y) for { @@ -1377,7 +1377,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SLL { @@ -1423,7 +1423,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } break } - // match: (ADD (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // match: (ADD (SLL x (ANDconst [31] y)) (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc == OpARM64LessThanU // result: (RORW x (NEG y)) for { @@ -1445,7 +1445,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { @@ -1494,7 +1494,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } break } - // match: (ADD (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // match: (ADD (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc == OpARM64LessThanU // result: (RORW x y) for { @@ -1520,7 +1520,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SLL { @@ -3178,38 +3178,38 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CSEL {cc} x (MOVDconst [0]) flag) - // result: (CSEL0 {cc} x flag) + // match: (CSEL [cc] x (MOVDconst [0]) flag) + // result: (CSEL0 [cc] x flag) for { - cc := v.Aux + cc := auxIntToOp(v.AuxInt) x := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } flag := v_2 v.reset(OpARM64CSEL0) - v.Aux = cc + v.AuxInt = opToAuxInt(cc) v.AddArg2(x, flag) return true } - // match: (CSEL {cc} (MOVDconst [0]) y flag) - // result: (CSEL0 {arm64Negate(cc.(Op))} y flag) + // match: (CSEL [cc] (MOVDconst [0]) y flag) + // result: (CSEL0 [arm64Negate(cc)] y flag) for { - cc := v.Aux - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != 0 { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 { break } y := v_1 flag := v_2 v.reset(OpARM64CSEL0) - v.Aux = arm64Negate(cc.(Op)) + v.AuxInt = opToAuxInt(arm64Negate(cc)) v.AddArg2(y, flag) return true } - // match: (CSEL {cc} x y (InvertFlags cmp)) - // result: (CSEL {arm64Invert(cc)} x y cmp) + // match: (CSEL [cc] x y (InvertFlags cmp)) + // result: (CSEL [arm64Invert(cc)] x y cmp) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64InvertFlags { @@ -3217,15 +3217,15 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { } cmp := v_2.Args[0] v.reset(OpARM64CSEL) - v.Aux = cCopToAux(arm64Invert(cc)) + v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg3(x, y, cmp) return true } - // match: (CSEL {cc} x _ flag) + // match: (CSEL [cc] x _ flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_2 if !(ccARM64Eval(cc, flag) > 0) { @@ -3234,11 +3234,11 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { v.copyOf(x) return true } - // match: (CSEL {cc} _ y flag) + // match: (CSEL [cc] _ y flag) // cond: ccARM64Eval(cc, flag) < 0 // result: y for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) y := v_1 flag := v_2 if !(ccARM64Eval(cc, flag) < 0) { @@ -3247,11 +3247,11 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { v.copyOf(y) return true } - // match: (CSEL {cc} x y (CMPWconst [0] boolval)) + // match: (CSEL [cc] x y (CMPWconst [0] boolval)) // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil - // result: (CSEL {boolval.Op} x y flagArg(boolval)) + // result: (CSEL [boolval.Op] x y flagArg(boolval)) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { @@ -3262,15 +3262,15 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { break } v.reset(OpARM64CSEL) - v.Aux = cCopToAux(boolval.Op) + v.AuxInt = opToAuxInt(boolval.Op) v.AddArg3(x, y, flagArg(boolval)) return true } - // match: (CSEL {cc} x y (CMPWconst [0] boolval)) + // match: (CSEL [cc] x y (CMPWconst [0] boolval)) // cond: cc == OpARM64Equal && flagArg(boolval) != nil - // result: (CSEL {arm64Negate(boolval.Op)} x y flagArg(boolval)) + // result: (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval)) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { @@ -3281,7 +3281,7 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { break } v.reset(OpARM64CSEL) - v.Aux = cCopToAux(arm64Negate(boolval.Op)) + v.AuxInt = opToAuxInt(arm64Negate(boolval.Op)) v.AddArg3(x, y, flagArg(boolval)) return true } @@ -3290,25 +3290,25 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CSEL0 {cc} x (InvertFlags cmp)) - // result: (CSEL0 {arm64Invert(cc)} x cmp) + // match: (CSEL0 [cc] x (InvertFlags cmp)) + // result: (CSEL0 [arm64Invert(cc)] x cmp) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64InvertFlags { break } cmp := v_1.Args[0] v.reset(OpARM64CSEL0) - v.Aux = cCopToAux(arm64Invert(cc)) + v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg2(x, cmp) return true } - // match: (CSEL0 {cc} x flag) + // match: (CSEL0 [cc] x flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_1 if !(ccARM64Eval(cc, flag) > 0) { @@ -3317,11 +3317,11 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { v.copyOf(x) return true } - // match: (CSEL0 {cc} _ flag) + // match: (CSEL0 [cc] _ flag) // cond: ccARM64Eval(cc, flag) < 0 // result: (MOVDconst [0]) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) flag := v_1 if !(ccARM64Eval(cc, flag) < 0) { break @@ -3330,11 +3330,11 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } - // match: (CSEL0 {cc} x (CMPWconst [0] boolval)) + // match: (CSEL0 [cc] x (CMPWconst [0] boolval)) // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil - // result: (CSEL0 {boolval.Op} x flagArg(boolval)) + // result: (CSEL0 [boolval.Op] x flagArg(boolval)) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { break @@ -3344,15 +3344,15 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { break } v.reset(OpARM64CSEL0) - v.Aux = cCopToAux(boolval.Op) + v.AuxInt = opToAuxInt(boolval.Op) v.AddArg2(x, flagArg(boolval)) return true } - // match: (CSEL0 {cc} x (CMPWconst [0] boolval)) + // match: (CSEL0 [cc] x (CMPWconst [0] boolval)) // cond: cc == OpARM64Equal && flagArg(boolval) != nil - // result: (CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval)) + // result: (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval)) for { - cc := auxToCCop(v.Aux) + cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { break @@ -3362,7 +3362,7 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { break } v.reset(OpARM64CSEL0) - v.Aux = cCopToAux(arm64Negate(boolval.Op)) + v.AuxInt = opToAuxInt(arm64Negate(boolval.Op)) v.AddArg2(x, flagArg(boolval)) return true } @@ -15043,7 +15043,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } break } - // match: (OR (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // match: (OR (SLL x (ANDconst [63] y)) (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc == OpARM64LessThanU // result: (ROR x (NEG y)) for { @@ -15065,7 +15065,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { @@ -15113,7 +15113,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } break } - // match: (OR (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // match: (OR (SRL x (ANDconst [63] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc == OpARM64LessThanU // result: (ROR x y) for { @@ -15135,7 +15135,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SLL { @@ -15181,7 +15181,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } break } - // match: (OR (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // match: (OR (SLL x (ANDconst [31] y)) (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc == OpARM64LessThanU // result: (RORW x (NEG y)) for { @@ -15203,7 +15203,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { @@ -15252,7 +15252,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } break } - // match: (OR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // match: (OR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc == OpARM64LessThanU // result: (RORW x y) for { @@ -15278,7 +15278,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SLL { @@ -20713,7 +20713,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } break } - // match: (XOR (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // match: (XOR (SLL x (ANDconst [63] y)) (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc == OpARM64LessThanU // result: (ROR x (NEG y)) for { @@ -20735,7 +20735,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { @@ -20783,7 +20783,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } break } - // match: (XOR (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // match: (XOR (SRL x (ANDconst [63] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc == OpARM64LessThanU // result: (ROR x y) for { @@ -20805,7 +20805,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SLL { @@ -20851,7 +20851,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } break } - // match: (XOR (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // match: (XOR (SLL x (ANDconst [31] y)) (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc == OpARM64LessThanU // result: (RORW x (NEG y)) for { @@ -20873,7 +20873,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { @@ -20922,7 +20922,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { } break } - // match: (XOR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // match: (XOR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc == OpARM64LessThanU // result: (RORW x y) for { @@ -20948,7 +20948,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } - cc := auxToCCop(v_1.Aux) + cc := auxIntToOp(v_1.AuxInt) _ = v_1.Args[1] v_1_0 := v_1.Args[0] if v_1_0.Op != OpARM64SLL { @@ -21471,7 +21471,7 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { b := v.Block // match: (CondSelect x y boolval) // cond: flagArg(boolval) != nil - // result: (CSEL {boolval.Op} x y flagArg(boolval)) + // result: (CSEL [boolval.Op] x y flagArg(boolval)) for { x := v_0 y := v_1 @@ -21480,13 +21480,13 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { break } v.reset(OpARM64CSEL) - v.Aux = cCopToAux(boolval.Op) + v.AuxInt = opToAuxInt(boolval.Op) v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CondSelect x y boolval) // cond: flagArg(boolval) == nil - // result: (CSEL {OpARM64NotEqual} x y (CMPWconst [0] boolval)) + // result: (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval)) for { x := v_0 y := v_1 @@ -21495,7 +21495,7 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool { break } v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64NotEqual) + v.AuxInt = opToAuxInt(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(0) v0.AddArg(boolval) @@ -22734,13 +22734,13 @@ func rewriteValueARM64_OpLsh16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) @@ -22760,13 +22760,13 @@ func rewriteValueARM64_OpLsh16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) @@ -22785,13 +22785,13 @@ func rewriteValueARM64_OpLsh16x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh16x64 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) @@ -22809,13 +22809,13 @@ func rewriteValueARM64_OpLsh16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) @@ -22835,13 +22835,13 @@ func rewriteValueARM64_OpLsh32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) @@ -22861,13 +22861,13 @@ func rewriteValueARM64_OpLsh32x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) @@ -22886,13 +22886,13 @@ func rewriteValueARM64_OpLsh32x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh32x64 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) @@ -22910,13 +22910,13 @@ func rewriteValueARM64_OpLsh32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) @@ -22936,13 +22936,13 @@ func rewriteValueARM64_OpLsh64x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) @@ -22962,13 +22962,13 @@ func rewriteValueARM64_OpLsh64x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) @@ -22987,13 +22987,13 @@ func rewriteValueARM64_OpLsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh64x64 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) @@ -23011,13 +23011,13 @@ func rewriteValueARM64_OpLsh64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) @@ -23037,13 +23037,13 @@ func rewriteValueARM64_OpLsh8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) @@ -23063,13 +23063,13 @@ func rewriteValueARM64_OpLsh8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) @@ -23088,13 +23088,13 @@ func rewriteValueARM64_OpLsh8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh8x64 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) @@ -23112,13 +23112,13 @@ func rewriteValueARM64_OpLsh8x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) @@ -23932,13 +23932,13 @@ func rewriteValueARM64_OpRsh16Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) @@ -23960,13 +23960,13 @@ func rewriteValueARM64_OpRsh16Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) @@ -23988,13 +23988,13 @@ func rewriteValueARM64_OpRsh16Ux64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) @@ -24014,13 +24014,13 @@ func rewriteValueARM64_OpRsh16Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) @@ -24042,7 +24042,7 @@ func rewriteValueARM64_OpRsh16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 @@ -24050,7 +24050,7 @@ func rewriteValueARM64_OpRsh16x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24069,7 +24069,7 @@ func rewriteValueARM64_OpRsh16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 @@ -24077,7 +24077,7 @@ func rewriteValueARM64_OpRsh16x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24096,7 +24096,7 @@ func rewriteValueARM64_OpRsh16x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 @@ -24104,7 +24104,7 @@ func rewriteValueARM64_OpRsh16x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -24121,7 +24121,7 @@ func rewriteValueARM64_OpRsh16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 @@ -24129,7 +24129,7 @@ func rewriteValueARM64_OpRsh16x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24148,13 +24148,13 @@ func rewriteValueARM64_OpRsh32Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) @@ -24176,13 +24176,13 @@ func rewriteValueARM64_OpRsh32Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) @@ -24204,13 +24204,13 @@ func rewriteValueARM64_OpRsh32Ux64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) @@ -24230,13 +24230,13 @@ func rewriteValueARM64_OpRsh32Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) @@ -24258,7 +24258,7 @@ func rewriteValueARM64_OpRsh32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 @@ -24266,7 +24266,7 @@ func rewriteValueARM64_OpRsh32x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24285,7 +24285,7 @@ func rewriteValueARM64_OpRsh32x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 @@ -24293,7 +24293,7 @@ func rewriteValueARM64_OpRsh32x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24312,7 +24312,7 @@ func rewriteValueARM64_OpRsh32x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x y) - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 @@ -24320,7 +24320,7 @@ func rewriteValueARM64_OpRsh32x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -24337,7 +24337,7 @@ func rewriteValueARM64_OpRsh32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 @@ -24345,7 +24345,7 @@ func rewriteValueARM64_OpRsh32x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24364,13 +24364,13 @@ func rewriteValueARM64_OpRsh64Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) - // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) @@ -24390,13 +24390,13 @@ func rewriteValueARM64_OpRsh64Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) - // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) @@ -24415,13 +24415,13 @@ func rewriteValueARM64_OpRsh64Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux64 x y) - // result: (CSEL {OpARM64LessThanU} (SRL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) @@ -24439,13 +24439,13 @@ func rewriteValueARM64_OpRsh64Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) - // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) @@ -24465,13 +24465,13 @@ func rewriteValueARM64_OpRsh64x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) - // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA x (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = cCopToAux(OpARM64LessThanU) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24490,13 +24490,13 @@ func rewriteValueARM64_OpRsh64x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x y) - // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA x (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = cCopToAux(OpARM64LessThanU) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24514,13 +24514,13 @@ func rewriteValueARM64_OpRsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 x y) - // result: (SRA x (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = cCopToAux(OpARM64LessThanU) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = int64ToAuxInt(63) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -24537,13 +24537,13 @@ func rewriteValueARM64_OpRsh64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) - // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA x (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = cCopToAux(OpARM64LessThanU) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24562,13 +24562,13 @@ func rewriteValueARM64_OpRsh8Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) @@ -24590,13 +24590,13 @@ func rewriteValueARM64_OpRsh8Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) @@ -24618,13 +24618,13 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) @@ -24644,13 +24644,13 @@ func rewriteValueARM64_OpRsh8Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64CSEL) - v.Aux = cCopToAux(OpARM64LessThanU) + v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) @@ -24672,7 +24672,7 @@ func rewriteValueARM64_OpRsh8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 @@ -24680,7 +24680,7 @@ func rewriteValueARM64_OpRsh8x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24699,7 +24699,7 @@ func rewriteValueARM64_OpRsh8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 @@ -24707,7 +24707,7 @@ func rewriteValueARM64_OpRsh8x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) @@ -24726,7 +24726,7 @@ func rewriteValueARM64_OpRsh8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 @@ -24734,7 +24734,7 @@ func rewriteValueARM64_OpRsh8x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -24751,7 +24751,7 @@ func rewriteValueARM64_OpRsh8x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 @@ -24759,7 +24759,7 @@ func rewriteValueARM64_OpRsh8x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = cCopToAux(OpARM64LessThanU) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpConst64, y.Type) diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 7fc33772d3..090745def7 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -210,7 +210,7 @@ func (v *Value) auxString() string { } return s + fmt.Sprintf(" [%s]", v.AuxValAndOff()) case auxCCop: - return fmt.Sprintf(" {%s}", v.Aux.(Op)) + return fmt.Sprintf(" {%s}", Op(v.AuxInt)) case auxS390XCCMask, auxS390XRotateParams: return fmt.Sprintf(" {%v}", v.Aux) case auxFlagConstant: -- cgit v1.2.3-54-g00ecf From e61d17d3b9ccf4c3e8ac87add9d74da7afa76488 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 3 Sep 2020 12:26:10 -0400 Subject: cmd/link: MACHOPLT is executable The PLT stubs are executable. Put it together with executable sections, instead of read-only data sections. Change-Id: I3818414aa0b87c6968c6c7eccce19b0db7c43193 Reviewed-on: https://go-review.googlesource.com/c/go/+/253018 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/data.go | 2 +- src/cmd/link/internal/sym/symkind.go | 2 +- src/cmd/link/internal/sym/symkind_string.go | 24 ++++++++++++------------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index dc7096ea8c..85ce7d8fb7 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1787,6 +1787,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { Errorf(nil, "dodata found an sym.STEXT symbol: %s", culprit) } state.allocateSingleSymSections(&Segtext, sym.SELFRXSECT, sym.SRODATA, 04) + state.allocateSingleSymSections(&Segtext, sym.SMACHOPLT, sym.SRODATA, 05) /* read-only data */ sect = state.allocateNamedDataSection(segro, ".rodata", sym.ReadOnly, 04) @@ -1810,7 +1811,6 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { /* read-only ELF, Mach-O sections */ state.allocateSingleSymSections(segro, sym.SELFROSECT, sym.SRODATA, 04) - state.allocateSingleSymSections(segro, sym.SMACHOPLT, sym.SRODATA, 04) // There is some data that are conceptually read-only but are written to by // relocations. On GNU systems, we can arrange for the dynamic linker to diff --git a/src/cmd/link/internal/sym/symkind.go b/src/cmd/link/internal/sym/symkind.go index 3e47d9a8e4..c176d5e208 100644 --- a/src/cmd/link/internal/sym/symkind.go +++ b/src/cmd/link/internal/sym/symkind.go @@ -41,6 +41,7 @@ const ( Sxxx SymKind = iota STEXT SELFRXSECT + SMACHOPLT // Read-only sections. STYPE @@ -52,7 +53,6 @@ const ( SFUNCTAB SELFROSECT - SMACHOPLT // Read-only sections with relocations. // diff --git a/src/cmd/link/internal/sym/symkind_string.go b/src/cmd/link/internal/sym/symkind_string.go index 47b2406e28..34cb314bd5 100644 --- a/src/cmd/link/internal/sym/symkind_string.go +++ b/src/cmd/link/internal/sym/symkind_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=SymKindstringer -type=SymKind"; DO NOT EDIT. +// Code generated by "stringer -type=SymKind"; DO NOT EDIT. package sym @@ -11,15 +11,15 @@ func _() { _ = x[Sxxx-0] _ = x[STEXT-1] _ = x[SELFRXSECT-2] - _ = x[STYPE-3] - _ = x[SSTRING-4] - _ = x[SGOSTRING-5] - _ = x[SGOFUNC-6] - _ = x[SGCBITS-7] - _ = x[SRODATA-8] - _ = x[SFUNCTAB-9] - _ = x[SELFROSECT-10] - _ = x[SMACHOPLT-11] + _ = x[SMACHOPLT-3] + _ = x[STYPE-4] + _ = x[SSTRING-5] + _ = x[SGOSTRING-6] + _ = x[SGOFUNC-7] + _ = x[SGCBITS-8] + _ = x[SRODATA-9] + _ = x[SFUNCTAB-10] + _ = x[SELFROSECT-11] _ = x[STYPERELRO-12] _ = x[SSTRINGRELRO-13] _ = x[SGOSTRINGRELRO-14] @@ -68,9 +68,9 @@ func _() { _ = x[SABIALIAS-57] } -const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSFirstWritableSBUILDINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_EXTRA_COUNTERSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSABIALIAS" +const _SymKind_name = "SxxxSTEXTSELFRXSECTSMACHOPLTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSFirstWritableSBUILDINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_EXTRA_COUNTERSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSABIALIAS" -var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 220, 230, 238, 244, 253, 261, 268, 278, 286, 291, 300, 304, 313, 337, 344, 349, 361, 373, 390, 407, 416, 426, 434, 443, 453, 465, 476, 485, 497, 507, 516, 527, 536, 547, 556} +var _SymKind_index = [...]uint16{0, 4, 9, 19, 28, 33, 40, 49, 56, 63, 70, 78, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 220, 230, 238, 244, 253, 261, 268, 278, 286, 291, 300, 304, 313, 337, 344, 349, 361, 373, 390, 407, 416, 426, 434, 443, 453, 465, 476, 485, 497, 507, 516, 527, 536, 547, 556} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { -- cgit v1.2.3-54-g00ecf From 612b1194475a23760ec502b48a93fea7237f3ae6 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 2 Sep 2020 21:04:12 -0400 Subject: cmd/link: pass darwin/amd64-specific flags only on AMD64 The linker assumed macOS is AMD64 (and 386 in the past). It passes darwin/amd64-specific flags to the external linker when building for macOS. They don't work for ARM64-based macOS. So only pass them on AMD64. Disable DWARF combining for macOS ARM64 for now. The generated binary doesn't run. (TODO: fix.) For macOS ARM64 port. External linking now works. Change-Id: Iab53bc48f4fadd9b91de8898b4b450ea442667a2 Reviewed-on: https://go-review.googlesource.com/c/go/+/253019 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/lib.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index d6ee437bca..702c902142 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1240,7 +1240,8 @@ func (ctxt *Link) hostlink() { switch ctxt.HeadType { case objabi.Hdarwin: - if machoPlatform == PLATFORM_MACOS { + if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { + // Leave room for DWARF combining. // -headerpad is incompatible with -fembed-bitcode. argv = append(argv, "-Wl,-headerpad,1144") } @@ -1280,7 +1281,7 @@ func (ctxt *Link) hostlink() { switch ctxt.BuildMode { case BuildModeExe: if ctxt.HeadType == objabi.Hdarwin { - if machoPlatform == PLATFORM_MACOS { + if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { argv = append(argv, "-Wl,-no_pie") argv = append(argv, "-Wl,-pagezero_size,4000000") } @@ -1517,7 +1518,7 @@ func (ctxt *Link) hostlink() { // does not work, the resulting programs will not run. See // issue #17847. To avoid this problem pass -no-pie to the // toolchain if it is supported. - if ctxt.BuildMode == BuildModeExe && !ctxt.linkShared { + if ctxt.BuildMode == BuildModeExe && !ctxt.linkShared && !(ctxt.IsDarwin() && ctxt.IsARM64()) { // GCC uses -no-pie, clang uses -nopie. for _, nopie := range []string{"-no-pie", "-nopie"} { if linkerFlagSupported(argv[0], altLinker, nopie) { @@ -1607,7 +1608,7 @@ func (ctxt *Link) hostlink() { Exitf("%s: parsing Mach-O header failed: %v", os.Args[0], err) } // Only macOS supports unmapped segments such as our __DWARF segment. - if machoPlatform == PLATFORM_MACOS { + if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { if err := machoCombineDwarf(ctxt, exef, exem, dsym, combinedOutput); err != nil { Exitf("%s: combining dwarf failed: %v", os.Args[0], err) } -- cgit v1.2.3-54-g00ecf From 93810ac1f4574e1e2a79ea156781bafaf8b8ebe0 Mon Sep 17 00:00:00 2001 From: Benjamin Barenblat Date: Thu, 3 Sep 2020 16:36:38 -0400 Subject: runtime: opportunistically rotate map key seed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When clearing a map, reinitialize the hash seed with random data. This makes it more difficult for attackers to trigger pathological performance via repeated hash collisions. The extra reinitialization causes no statistically significant slowdown: name old time/op new time/op delta GoMapClear/Reflexive/1-12 18.3ns ± 0% 20.0ns ± 0% ~ (p=1.000 n=1+1) GoMapClear/Reflexive/10-12 18.2ns ± 0% 19.8ns ± 0% ~ (p=1.000 n=1+1) GoMapClear/Reflexive/100-12 44.6ns ± 0% 46.1ns ± 0% ~ (p=1.000 n=1+1) GoMapClear/Reflexive/1000-12 592ns ± 0% 592ns ± 0% ~ (all samples are equal) GoMapClear/Reflexive/10000-12 3.88µs ± 0% 3.88µs ± 0% ~ (p=1.000 n=1+1) GoMapClear/NonReflexive/1-12 62.7ns ± 0% 63.9ns ± 0% ~ (p=1.000 n=1+1) GoMapClear/NonReflexive/10-12 75.0ns ± 0% 76.1ns ± 0% ~ (p=1.000 n=1+1) GoMapClear/NonReflexive/100-12 203ns ± 0% 206ns ± 0% ~ (p=1.000 n=1+1) GoMapClear/NonReflexive/1000-12 2.33µs ± 0% 2.33µs ± 0% ~ (all samples are equal) GoMapClear/NonReflexive/10000-12 18.1µs ± 0% 18.1µs ± 0% ~ (p=1.000 n=1+1) Fixes #25237 Change-Id: I629a79dd7c562ba18bd94159673c3b9b653da643 Reviewed-on: https://go-review.googlesource.com/c/go/+/253020 Reviewed-by: Keith Randall --- src/runtime/map.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/runtime/map.go b/src/runtime/map.go index 399c1b071f..22a0241f56 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -993,6 +993,10 @@ func mapclear(t *maptype, h *hmap) { h.noverflow = 0 h.count = 0 + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + h.hash0 = fastrand() + // Keep the mapextra allocation but clear any extra information. if h.extra != nil { *h.extra = mapextra{} -- cgit v1.2.3-54-g00ecf From 721819e7bad078ac350bdebebb28793e13081e82 Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Fri, 28 Aug 2020 12:13:37 +0200 Subject: testing: make TempDir idempotent for both Cleanup and Benchmark Ensures that calling TempDir() in either of Cleanup or Benchmark doesn't cause test failures which were previously caused by the created directory having been deleted after the first run, yet we prevented the recreation of the directory due to our selection of concurrency primitive sync.Once. This change recreates the temporary directory if it doesn't exist, regardless of how many times Cleanup and Benchmark are invoked. Fixes #41062 Change-Id: I925d9f7207d7c369a193d1e17da7a59a586244a7 Reviewed-on: https://go-review.googlesource.com/c/go/+/251297 Reviewed-by: Emmanuel Odeke Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot --- src/testing/testing.go | 26 ++++++++++++++++++++------ src/testing/testing_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/src/testing/testing.go b/src/testing/testing.go index a64206f349..66f296234a 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -413,10 +413,10 @@ type common struct { signal chan bool // To signal a test is done. sub []*T // Queue of subtests to be run in parallel. - tempDirOnce sync.Once - tempDir string - tempDirErr error - tempDirSeq int32 + tempDirMu sync.Mutex + tempDir string + tempDirErr error + tempDirSeq int32 } // Short reports whether the -test.short flag is set. @@ -903,7 +903,19 @@ var tempDirReplacer struct { func (c *common) TempDir() string { // Use a single parent directory for all the temporary directories // created by a test, each numbered sequentially. - c.tempDirOnce.Do(func() { + c.tempDirMu.Lock() + var nonExistent bool + if c.tempDir == "" { // Usually the case with js/wasm + nonExistent = true + } else { + _, err := os.Stat(c.tempDir) + nonExistent = os.IsNotExist(err) + if err != nil && !nonExistent { + c.Fatalf("TempDir: %v", err) + } + } + + if nonExistent { c.Helper() // ioutil.TempDir doesn't like path separators in its pattern, @@ -921,7 +933,9 @@ func (c *common) TempDir() string { } }) } - }) + } + c.tempDirMu.Unlock() + if c.tempDirErr != nil { c.Fatalf("TempDir: %v", c.tempDirErr) } diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go index dbef7066e0..d665a334e4 100644 --- a/src/testing/testing_test.go +++ b/src/testing/testing_test.go @@ -19,6 +19,38 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func TestTempDirInCleanup(t *testing.T) { + var dir string + + t.Run("test", func(t *testing.T) { + t.Cleanup(func() { + dir = t.TempDir() + }) + _ = t.TempDir() + }) + + fi, err := os.Stat(dir) + if fi != nil { + t.Fatalf("Directory %q from user Cleanup still exists", dir) + } + if !os.IsNotExist(err) { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestTempDirInBenchmark(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + if !b.Run("test", func(b *testing.B) { + // Add a loop so that the test won't fail. See issue 38677. + for i := 0; i < b.N; i++ { + _ = b.TempDir() + } + }) { + t.Fatal("Sub test failure in a benchmark") + } + }) +} + func TestTempDir(t *testing.T) { testTempDir(t) t.Run("InSubtest", testTempDir) -- cgit v1.2.3-54-g00ecf From bf833ead6250290dce039ffeee88f20a086b5dbe Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 28 Aug 2020 14:32:15 -0400 Subject: cmd/compile: ensure that ssa.Func constant cache is consistent It was not necessarily consistent before, we were just lucky. Change-Id: I3a92dc724e0af7b4d810a6a0b7b1d58844eb8f87 Reviewed-on: https://go-review.googlesource.com/c/go/+/251440 Run-TryBot: David Chase TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/func.go | 59 ++++++++++++++++++++++++------- src/cmd/compile/internal/ssa/softfloat.go | 1 + src/cmd/compile/internal/ssa/value.go | 9 +++++ 3 files changed, 56 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 6718b778e1..32df0c06f3 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -257,6 +257,49 @@ func (f *Func) LogStat(key string, args ...interface{}) { f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name) } +// unCacheLine removes v from f's constant cache "line" for aux, +// resets v.InCache when it is found (and removed), +// and returns whether v was found in that line. +func (f *Func) unCacheLine(v *Value, aux int64) bool { + vv := f.constants[aux] + for i, cv := range vv { + if v == cv { + vv[i] = vv[len(vv)-1] + vv[len(vv)-1] = nil + f.constants[aux] = vv[0 : len(vv)-1] + v.InCache = false + return true + } + } + return false +} + +// unCache removes v from f's constant cache. +func (f *Func) unCache(v *Value) { + if v.InCache { + aux := v.AuxInt + if f.unCacheLine(v, aux) { + return + } + if aux == 0 { + switch v.Op { + case OpConstNil: + aux = constNilMagic + case OpConstSlice: + aux = constSliceMagic + case OpConstString: + aux = constEmptyStringMagic + case OpConstInterface: + aux = constInterfaceMagic + } + if aux != 0 && f.unCacheLine(v, aux) { + return + } + } + f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux) + } +} + // freeValue frees a value. It must no longer be referenced or have any args. func (f *Func) freeValue(v *Value) { if v.Block == nil { @@ -270,19 +313,8 @@ func (f *Func) freeValue(v *Value) { } // Clear everything but ID (which we reuse). id := v.ID - - // Values with zero arguments and OpOffPtr values might be cached, so remove them there. - nArgs := opcodeTable[v.Op].argLen - if nArgs == 0 || v.Op == OpOffPtr { - vv := f.constants[v.AuxInt] - for i, cv := range vv { - if v == cv { - vv[i] = vv[len(vv)-1] - vv[len(vv)-1] = nil - f.constants[v.AuxInt] = vv[0 : len(vv)-1] - break - } - } + if v.InCache { + f.unCache(v) } *v = Value{} v.ID = id @@ -548,6 +580,7 @@ func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value { v = f.Entry.NewValue0(src.NoXPos, op, t) } f.constants[c] = append(vv, v) + v.InCache = true return v } diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go index 8db4334fef..a8a8f83629 100644 --- a/src/cmd/compile/internal/ssa/softfloat.go +++ b/src/cmd/compile/internal/ssa/softfloat.go @@ -18,6 +18,7 @@ func softfloat(f *Func) { for _, b := range f.Blocks { for _, v := range b.Values { if v.Type.IsFloat() { + f.unCache(v) switch v.Op { case OpPhi, OpLoad, OpArg: if v.Type.Size() == 4 { diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 090745def7..6692df7921 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -54,6 +54,9 @@ type Value struct { // nor a slot on Go stack, and the generation of this value is delayed to its use time. OnWasmStack bool + // Is this value in the per-function constant cache? If so, remove from cache before changing it or recycling it. + InCache bool + // Storage for the first three args argstorage [3]*Value } @@ -332,6 +335,9 @@ func (v *Value) resetArgs() { // of cmd/compile by almost 10%, and slows it down. //go:noinline func (v *Value) reset(op Op) { + if v.InCache { + v.Block.Func.unCache(v) + } v.Op = op v.resetArgs() v.AuxInt = 0 @@ -342,6 +348,9 @@ func (v *Value) reset(op Op) { // It modifies v to be (Copy a). //go:noinline func (v *Value) copyOf(a *Value) { + if v.InCache { + v.Block.Func.unCache(v) + } v.Op = OpCopy v.resetArgs() v.AddArg(a) -- cgit v1.2.3-54-g00ecf From 62fe10bf4e62c97af3bb8eb2ef72d9224a8752ba Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Sat, 5 Sep 2020 15:01:19 +0300 Subject: src/go.mod, net/http: update bundled and latest golang.org/x/net Updates x/net/http2 to git rev 62affa334b73ec65ed44a326519ac12c421905e3 x/net/http2: reject HTTP/2 Content-Length headers containing a sign https://go-review.googlesource.com/c/net/+/236098/ (fixes #39017) also updates the vendored version of golang.org/x/net by running go get golang.org/x/net@62affa334b73ec65ed44a326519ac12c421905e3 go mod tidy go mod vendor go generate -run bundle net/http Change-Id: I7ecfdb7644574c44c3616e3b47664eefd4c926f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/253238 Reviewed-by: Emmanuel Odeke Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot --- src/go.mod | 2 +- src/go.sum | 4 ++-- src/net/http/h2_bundle.go | 15 +++++++++------ src/vendor/modules.txt | 2 +- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/go.mod b/src/go.mod index c75f74b916..0d5892f178 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.15 require ( golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200822124328-c89045814202 + golang.org/x/net v0.0.0-20200904194848-62affa334b73 golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect golang.org/x/text v0.3.3-0.20200430171850-afb9336c4530 // indirect ) diff --git a/src/go.sum b/src/go.sum index dc9641be1a..52907d313f 100644 --- a/src/go.sum +++ b/src/go.sum @@ -2,8 +2,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 463e7e8ce9..458e0b7646 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -5591,7 +5591,11 @@ func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHead } if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { + req.ContentLength = int64(cl) + } else { + req.ContentLength = 0 + } } else { req.ContentLength = -1 } @@ -5974,9 +5978,8 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) { var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 + if cl, err := strconv.ParseUint(clen, 10, 63); err == nil { + rws.sentContentLen = int64(cl) } else { clen = "" } @@ -8505,8 +8508,8 @@ func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 + if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { + res.ContentLength = int64(cl) } else { // TODO: care? unlike http/1, it won't mess up our framing, so it's // more safe smuggling-wise to ignore. diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index d1e4f28e21..faf95b871e 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/subtle golang.org/x/crypto/poly1305 -# golang.org/x/net v0.0.0-20200822124328-c89045814202 +# golang.org/x/net v0.0.0-20200904194848-62affa334b73 ## explicit golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts -- cgit v1.2.3-54-g00ecf From c489330987eca992cee0bb018a6fdb7ff5401704 Mon Sep 17 00:00:00 2001 From: Ainar Garipov Date: Wed, 26 Aug 2020 23:55:09 +0300 Subject: doc/go1.16: reformat the minor changes section as a definition list Change the section to use
,
, and
tags to match previous documents. Change-Id: Ide0bea698a84ed6b61b364ef9e2f3801ebb8d4d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/250897 Reviewed-by: Ian Lance Taylor --- doc/go1.16.html | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 8dd806e9f2..0ffaecc5a9 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -154,22 +154,26 @@ Do not send CLs removing the interior tags from such phrases. TODO

-

- In the net/http package, the - behavior of StripPrefix - has been changed to strip the prefix from the request URL's - RawPath field in addition to its Path field. - In past releases, only the Path field was trimmed, and so if the - request URL contained any escaped characters the URL would be modified to - have mismatched Path and RawPath fields. - In Go 1.16, StripPrefix trims both fields. - If there are escaped characters in the prefix part of the request URL the - handler serves a 404 instead of its previous behavior of invoking the - underlying handler with a mismatched Path/RawPath pair. -

- -

- The net/http package now rejects HTTP range requests - of the form "Range": "bytes=--N" where "-N" is a negative suffix length, for - example "Range": "bytes=--2". It now replies with a 416 "Range Not Satisfiable" response. -

+
net/http
+
+

+ In the net/http package, the + behavior of StripPrefix + has been changed to strip the prefix from the request URL's + RawPath field in addition to its Path field. + In past releases, only the Path field was trimmed, and so if the + request URL contained any escaped characters the URL would be modified to + have mismatched Path and RawPath fields. + In Go 1.16, StripPrefix trims both fields. + If there are escaped characters in the prefix part of the request URL the + handler serves a 404 instead of its previous behavior of invoking the + underlying handler with a mismatched Path/RawPath pair. +

+ +

+ The net/http package now rejects HTTP range requests + of the form "Range": "bytes=--N" where "-N" is a negative suffix length, for + example "Range": "bytes=--2". It now replies with a 416 "Range Not Satisfiable" response. +

+
+
-- cgit v1.2.3-54-g00ecf From b60ec4cc4b230f4d0787acf82057947b8bf80cea Mon Sep 17 00:00:00 2001 From: Ainar Garipov Date: Sat, 5 Sep 2020 14:24:28 +0300 Subject: mime: add examples for FormatMediaType and ParseMediaType Change-Id: Ic129c58784ad1f0b8b90fc9d33e52bee61bdf0eb Reviewed-on: https://go-review.googlesource.com/c/go/+/253237 Reviewed-by: Emmanuel Odeke Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot --- src/mime/example_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/mime/example_test.go b/src/mime/example_test.go index c7d13cdcdb..85795976f0 100644 --- a/src/mime/example_test.go +++ b/src/mime/example_test.go @@ -96,3 +96,29 @@ func ExampleWordDecoder_DecodeHeader() { // ¡Hola, señor! // HELLO WORLD! } + +func ExampleFormatMediaType() { + mediatype := "text/html" + params := map[string]string{ + "charset": "utf-8", + } + + result := mime.FormatMediaType(mediatype, params) + + fmt.Println("result:", result) + // Output: + // result: text/html; charset=utf-8 +} + +func ExampleParseMediaType() { + mediatype, params, err := mime.ParseMediaType("text/html; charset=utf-8") + if err != nil { + panic(err) + } + + fmt.Println("type:", mediatype) + fmt.Println("charset:", params["charset"]) + // Output: + // type: text/html + // charset: utf-8 +} -- cgit v1.2.3-54-g00ecf From 617f2c3e35cdc8483b950aa3ef18d92965d63197 Mon Sep 17 00:00:00 2001 From: Michael Fraenkel Date: Sat, 27 Jun 2020 13:31:34 -0600 Subject: net/http: mark http/2 connections active On Server.Shutdown, all idle connections are closed. A caveat for new connections is that they are marked idle after 5 seconds. Previously new HTTP/2 connections were marked New, and after 5 seconds, they would then become idle. With this change, we now mark HTTP/2 connections as Active to allow the proper shutdown sequence to occur. Fixes #36946 Fixes #39776 Change-Id: I31efbf64b9a2850ca544da797f86d7e1b3378e8b Reviewed-on: https://go-review.googlesource.com/c/go/+/240278 Reviewed-by: Emmanuel Odeke Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot --- src/net/http/export_test.go | 11 +++++++++++ src/net/http/serve_test.go | 18 ++++++++++++++++-- src/net/http/server.go | 24 ++++++++++++++++++------ 3 files changed, 45 insertions(+), 8 deletions(-) diff --git a/src/net/http/export_test.go b/src/net/http/export_test.go index 657ff9dba4..67a74ae19f 100644 --- a/src/net/http/export_test.go +++ b/src/net/http/export_test.go @@ -274,6 +274,17 @@ func (s *Server) ExportAllConnsIdle() bool { return true } +func (s *Server) ExportAllConnsByState() map[ConnState]int { + states := map[ConnState]int{} + s.mu.Lock() + defer s.mu.Unlock() + for c := range s.activeConn { + st, _ := c.getState() + states[st] += 1 + } + return states +} + func (r *Request) WithT(t *testing.T) *Request { return r.WithContext(context.WithValue(r.Context(), tLogKey{}, t.Logf)) } diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index 635bf5dfc9..6d3317fb0c 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -5537,16 +5537,23 @@ func TestServerSetKeepAlivesEnabledClosesConns(t *testing.T) { } } -func TestServerShutdown_h1(t *testing.T) { testServerShutdown(t, h1Mode) } -func TestServerShutdown_h2(t *testing.T) { testServerShutdown(t, h2Mode) } +func TestServerShutdown_h1(t *testing.T) { + testServerShutdown(t, h1Mode) +} +func TestServerShutdown_h2(t *testing.T) { + testServerShutdown(t, h2Mode) +} func testServerShutdown(t *testing.T, h2 bool) { setParallel(t) defer afterTest(t) var doShutdown func() // set later + var doStateCount func() var shutdownRes = make(chan error, 1) + var statesRes = make(chan map[ConnState]int, 1) var gotOnShutdown = make(chan struct{}, 1) handler := HandlerFunc(func(w ResponseWriter, r *Request) { + doStateCount() go doShutdown() // Shutdown is graceful, so it should not interrupt // this in-flight response. Add a tiny sleep here to @@ -5563,6 +5570,9 @@ func testServerShutdown(t *testing.T, h2 bool) { doShutdown = func() { shutdownRes <- cst.ts.Config.Shutdown(context.Background()) } + doStateCount = func() { + statesRes <- cst.ts.Config.ExportAllConnsByState() + } get(t, cst.c, cst.ts.URL) // calls t.Fail on failure if err := <-shutdownRes; err != nil { @@ -5574,6 +5584,10 @@ func testServerShutdown(t *testing.T, h2 bool) { t.Errorf("onShutdown callback not called, RegisterOnShutdown broken?") } + if states := <-statesRes; states[StateActive] != 1 { + t.Errorf("connection in wrong state, %v", states) + } + res, err := cst.c.Get(cst.ts.URL) if err == nil { res.Body.Close() diff --git a/src/net/http/server.go b/src/net/http/server.go index 9124903b89..25fab288f2 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -324,7 +324,7 @@ func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) } } - c.setState(rwc, StateHijacked) + c.setState(rwc, StateHijacked, runHooks) return } @@ -1739,7 +1739,12 @@ func validNextProto(proto string) bool { return true } -func (c *conn) setState(nc net.Conn, state ConnState) { +const ( + runHooks = true + skipHooks = false +) + +func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) { srv := c.server switch state { case StateNew: @@ -1752,6 +1757,9 @@ func (c *conn) setState(nc net.Conn, state ConnState) { } packedState := uint64(time.Now().Unix()<<8) | uint64(state) atomic.StoreUint64(&c.curState.atomic, packedState) + if !runHook { + return + } if hook := srv.ConnState; hook != nil { hook(nc, state) } @@ -1805,7 +1813,7 @@ func (c *conn) serve(ctx context.Context) { } if !c.hijacked() { c.close() - c.setState(c.rwc, StateClosed) + c.setState(c.rwc, StateClosed, runHooks) } }() @@ -1833,6 +1841,10 @@ func (c *conn) serve(ctx context.Context) { if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) { if fn := c.server.TLSNextProto[proto]; fn != nil { h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}} + // Mark freshly created HTTP/2 as active and prevent any server state hooks + // from being run on these connections. This prevents closeIdleConns from + // closing such connections. See issue https://golang.org/issue/39776. + c.setState(c.rwc, StateActive, skipHooks) fn(c.server, tlsConn, h) } return @@ -1853,7 +1865,7 @@ func (c *conn) serve(ctx context.Context) { w, err := c.readRequest(ctx) if c.r.remain != c.server.initialReadLimitSize() { // If we read any bytes off the wire, we're active. - c.setState(c.rwc, StateActive) + c.setState(c.rwc, StateActive, runHooks) } if err != nil { const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" @@ -1936,7 +1948,7 @@ func (c *conn) serve(ctx context.Context) { } return } - c.setState(c.rwc, StateIdle) + c.setState(c.rwc, StateIdle, runHooks) c.curReq.Store((*response)(nil)) if !w.conn.server.doKeepAlives() { @@ -2971,7 +2983,7 @@ func (srv *Server) Serve(l net.Listener) error { } tempDelay = 0 c := srv.newConn(rw) - c.setState(c.rwc, StateNew) // before Serve can return + c.setState(c.rwc, StateNew, runHooks) // before Serve can return go c.serve(connCtx) } } -- cgit v1.2.3-54-g00ecf From 5cc030aa1996762e48ce446001078ce6447f105e Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Mon, 27 Jul 2020 14:42:06 +0200 Subject: go/ast: note that in BasicLit CHARs and STRINGs are quoted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Value field of ast.BasicLit is a string field holding the literal string. For CHARs and STRINGs, the BasicLit.Value literal includes quotes, so to use the value in practise one will often need to Unquote it. Since this is a common gotcha (I've been bitten by this a few times), document it, and suggest the use of the strconv.Unquote functions. Fixes #39590 Change-Id: Ie3e13f5a2a71bb1b59e03bc5b3a16d8e2e7c01d4 Reviewed-on: https://go-review.googlesource.com/c/go/+/244960 Reviewed-by: Daniel Martí --- src/go/ast/ast.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go index 81c64589d0..1061f1d3ce 100644 --- a/src/go/ast/ast.go +++ b/src/go/ast/ast.go @@ -285,6 +285,12 @@ type ( } // A BasicLit node represents a literal of basic type. + // + // Note that for the CHAR and STRING kinds, the literal is stored + // with its quotes. For example, for a double-quoted STRING, the + // first and the last rune in the Value field will be ". The + // Unquote and UnquoteChar functions in the strconv package can be + // used to unquote STRING and CHAR values, respectively. BasicLit struct { ValuePos token.Pos // literal position Kind token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING -- cgit v1.2.3-54-g00ecf From 1a119edd8b4364a8c8342194648166ca5851f061 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 4 Sep 2020 17:47:44 +0700 Subject: runtime: rotate map key seed on clearing up maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Same thing as CL 253020 did for map clear idiom. name old time/op new time/op delta MapDelete/Int32/100-12 30.0ns ± 1% 30.7ns ± 3% ~ (p=0.400 n=3+3) MapDelete/Int32/1000-12 26.6ns ± 2% 28.1ns ± 3% ~ (p=0.100 n=3+3) MapDelete/Int32/10000-12 28.6ns ± 1% 31.9ns ± 1% ~ (p=0.100 n=3+3) MapDelete/Int64/100-12 30.2ns ± 0% 32.1ns ± 3% ~ (p=0.100 n=3+3) MapDelete/Int64/1000-12 26.5ns ± 1% 27.5ns ± 3% ~ (p=0.100 n=3+3) MapDelete/Int64/10000-12 29.6ns ± 1% 29.3ns ± 1% ~ (p=0.300 n=3+3) MapDelete/Str/100-12 19.5ns ± 3% 19.6ns ± 2% ~ (p=0.800 n=3+3) MapDelete/Str/1000-12 31.6ns ± 1% 31.4ns ± 1% ~ (p=0.500 n=3+3) MapDelete/Str/10000-12 37.8ns ± 1% 37.1ns ± 1% ~ (p=0.100 n=3+3) MapDelete/Pointer/100-12 15.9ns ± 1% 16.8ns ± 9% ~ (p=0.200 n=3+3) MapDelete/Pointer/1000-12 26.9ns ± 1% 26.2ns ± 2% ~ (p=0.200 n=3+3) MapDelete/Pointer/10000-12 30.6ns ± 1% 30.7ns ± 4% ~ (p=0.700 n=3+3) Fixes #25237 Change-Id: I353cf44a2f6158549f0ef563d867f0844fec7095 Reviewed-on: https://go-review.googlesource.com/c/go/+/252940 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke Reviewed-by: Keith Randall --- src/runtime/map.go | 5 +++++ src/runtime/map_fast32.go | 5 +++++ src/runtime/map_fast64.go | 5 +++++ src/runtime/map_faststr.go | 5 +++++ 4 files changed, 20 insertions(+) diff --git a/src/runtime/map.go b/src/runtime/map.go index 22a0241f56..8be1d3991d 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -780,6 +780,11 @@ search: } notLast: h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = fastrand() + } break search } } diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go index d035ed0386..d80f5eac78 100644 --- a/src/runtime/map_fast32.go +++ b/src/runtime/map_fast32.go @@ -344,6 +344,11 @@ search: } notLast: h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = fastrand() + } break search } } diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go index f1f3927598..3bc84bbdd3 100644 --- a/src/runtime/map_fast64.go +++ b/src/runtime/map_fast64.go @@ -346,6 +346,11 @@ search: } notLast: h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = fastrand() + } break search } } diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go index 069cda6554..108c502394 100644 --- a/src/runtime/map_faststr.go +++ b/src/runtime/map_faststr.go @@ -369,6 +369,11 @@ search: } notLast: h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = fastrand() + } break search } } -- cgit v1.2.3-54-g00ecf From 0e19aaabc052f858beed7d174a55d4110c8d66a0 Mon Sep 17 00:00:00 2001 From: fanzha02 Date: Thu, 20 Aug 2020 18:39:46 +0800 Subject: cmd/asm: fix the error of checking the post-index offset of VLD[1-4]R instructions of arm64 The post-index offset of VLD[1-4]R instructions is decided by the "size" field not "Q" field, the current assembler uses "Q" fileld to check the correctness of post-index offset which is not correct. This patch fixes it. Fixes #40725 Change-Id: If1cde7f21c6b3ee0e491649eb567700bd1475c84 Reviewed-on: https://go-review.googlesource.com/c/go/+/249757 Reviewed-by: Cherry Zhang --- src/cmd/asm/internal/asm/testdata/arm64.s | 28 +++++++++++++++----------- src/cmd/asm/internal/asm/testdata/arm64error.s | 1 + src/cmd/internal/obj/arm64/asm7.go | 13 ++++++++++-- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 5a6db05074..f0c716a2b5 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -359,18 +359,22 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VLD4 (R15), [V10.H4, V11.H4, V12.H4, V13.H4] // ea05400c VLD4.P 32(R24), [V31.B8, V0.B8, V1.B8, V2.B8] // 1f03df0c VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // VLD4.P (R13)(R9*1), [V14.S2,V15.S2,V16.S2,V17.S2] // ae09c90c - VLD1R (R0), [V0.B16] // 00c0404d - VLD1R.P 16(R0), [V0.B16] // 00c0df4d - VLD1R.P (R15)(R1), [V15.H4] // VLD1R.P (R15)(R1*1), [V15.H4] // efc5c10d - VLD2R (R15), [V15.H4, V16.H4] // efc5600d - VLD2R.P 32(R0), [V0.D2, V1.D2] // 00ccff4d - VLD2R.P (R0)(R5), [V31.D1, V0.D1] // VLD2R.P (R0)(R5*1), [V31.D1, V0.D1] // 1fcce50d - VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d - VLD3R.P 24(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d - VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // VLD3R.P (R15)(R6*1), [V15.H8, V16.H8, V17.H8] // efe5c64d - VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d - VLD4R.P 64(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d - VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // VLD4R.P (R15)(R9*1), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d + VLD1R (R1), [V9.B8] // 29c0400d + VLD1R.P (R1), [V9.B8] // 29c0df0d + VLD1R.P 1(R1), [V2.B8] // 22c0df0d + VLD1R.P 2(R1), [V2.H4] // 22c4df0d + VLD1R (R0), [V0.B16] // 00c0404d + VLD1R.P (R0), [V0.B16] // 00c0df4d + VLD1R.P (R15)(R1), [V15.H4] // VLD1R.P (R15)(R1*1), [V15.H4] // efc5c10d + VLD2R (R15), [V15.H4, V16.H4] // efc5600d + VLD2R.P 16(R0), [V0.D2, V1.D2] // 00ccff4d + VLD2R.P (R0)(R5), [V31.D1, V0.D1] // VLD2R.P (R0)(R5*1), [V31.D1, V0.D1] // 1fcce50d + VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d + VLD3R.P 6(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d + VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // VLD3R.P (R15)(R6*1), [V15.H8, V16.H8, V17.H8] // efe5c64d + VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d + VLD4R.P 16(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d + VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // VLD4R.P (R15)(R9*1), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d VST1.P [V24.S2], 8(R2) // 58789f0c VST1 [V29.S2, V30.S2], (R29) // bdab000c VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index 0661a474b4..9f377817a9 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -339,4 +339,5 @@ TEXT errors(SB),$0 MRS ICV_EOIR1_EL1, R3 // ERROR "system register is not readable" MRS PMSWINC_EL0, R3 // ERROR "system register is not readable" MRS OSLAR_EL1, R3 // ERROR "system register is not readable" + VLD3R.P 24(R15), [V15.H4,V16.H4,V17.H4] // ERROR "invalid post-increment offset" RET diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 65f7898332..0b90e31392 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -2898,6 +2898,7 @@ func (c *ctxt7) checkoffset(p *obj.Prog, as obj.As) { } opcode := (list >> 12) & 15 q := (list >> 30) & 1 + size := (list >> 10) & 3 if offset == 0 { return } @@ -2913,8 +2914,16 @@ func (c *ctxt7) checkoffset(p *obj.Prog, as obj.As) { default: c.ctxt.Diag("invalid register numbers in ARM64 register list: %v", p) } - if !(q == 0 && offset == n*8) && !(q == 1 && offset == n*16) { - c.ctxt.Diag("invalid post-increment offset: %v", p) + + switch as { + case AVLD1R, AVLD2R, AVLD3R, AVLD4R: + if offset != n*(1< Date: Mon, 7 Sep 2020 12:14:30 +0700 Subject: cmd/compile: handle ODDD in exprformat Fixes #41247 Change-Id: Iaa9502cc610e2cc64be5dfd91ba3187f86f87cbd Reviewed-on: https://go-review.googlesource.com/c/go/+/252942 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Rob Pike --- src/cmd/compile/internal/gc/fmt.go | 3 ++- test/fixedbugs/issue41247.go | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue41247.go diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index d6cc9fa4cf..866cd0a714 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1616,7 +1616,8 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { } n1.exprfmt(s, nprec, mode) } - + case ODDD: + mode.Fprintf(s, "...") default: mode.Fprintf(s, "", n.Op) } diff --git a/test/fixedbugs/issue41247.go b/test/fixedbugs/issue41247.go new file mode 100644 index 0000000000..2df919c9e6 --- /dev/null +++ b/test/fixedbugs/issue41247.go @@ -0,0 +1,11 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func f() [2]int { + return [...]int{2: 0} // ERROR "cannot use \[\.\.\.\]int literal \(type \[3\]int\)" +} -- cgit v1.2.3-54-g00ecf From f98f3b0c306634e850975c5972ceb8519404edae Mon Sep 17 00:00:00 2001 From: Martin Möhrmann Date: Mon, 7 Sep 2020 20:39:48 +0200 Subject: runtime: remove outdated comment in mkduff.go about usage of STOSQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I71966cc5def4615d64876165872e5e7f2956b270 Reviewed-on: https://go-review.googlesource.com/c/go/+/253397 Run-TryBot: Martin Möhrmann Reviewed-by: Keith Randall TryBot-Result: Gobot Gobot --- src/runtime/mkduff.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go index 6c7a4cf8dc..8859ed68cc 100644 --- a/src/runtime/mkduff.go +++ b/src/runtime/mkduff.go @@ -83,7 +83,6 @@ func copyAMD64(w io.Writer) { // // This is equivalent to a sequence of MOVSQ but // for some reason that is 3.5x slower than this code. - // The STOSQ in duffzero seem fine, though. fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0") for i := 0; i < 64; i++ { fmt.Fprintln(w, "\tMOVUPS\t(SI), X0") -- cgit v1.2.3-54-g00ecf From 565ad134c994b7a831e1bea336fdc64f0aeefc85 Mon Sep 17 00:00:00 2001 From: chainhelen Date: Tue, 8 Sep 2020 03:36:19 +0000 Subject: runtime: make PCDATA_RegMapUnsafe more clear and remove magic number Change-Id: Ibf3ee755c3fbec03a9396840dc92ce148c49d9f7 GitHub-Last-Rev: 945d8aaa136003dc381c6aa48bff9ea7ca2c6991 GitHub-Pull-Request: golang/go#41262 Reviewed-on: https://go-review.googlesource.com/c/go/+/253377 Reviewed-by: Austin Clements Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot --- src/cmd/internal/objabi/funcdata.go | 2 +- src/runtime/debugcall.go | 2 +- src/runtime/preempt.go | 2 +- src/runtime/symtab.go | 3 +++ 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/cmd/internal/objabi/funcdata.go b/src/cmd/internal/objabi/funcdata.go index d5bacb5900..c9480bf2f0 100644 --- a/src/cmd/internal/objabi/funcdata.go +++ b/src/cmd/internal/objabi/funcdata.go @@ -35,7 +35,7 @@ const ( // PCDATA_RegMapIndex values. // // Only if !go115ReduceLiveness. - PCDATA_RegMapUnsafe = -2 // Unsafe for async preemption + PCDATA_RegMapUnsafe = PCDATA_UnsafePointUnsafe // Unsafe for async preemption // PCDATA_UnsafePoint values. PCDATA_UnsafePointSafe = -1 // Safe for async preemption diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go index 6c285ec829..b5480c73ae 100644 --- a/src/runtime/debugcall.go +++ b/src/runtime/debugcall.go @@ -87,7 +87,7 @@ func debugCallCheck(pc uintptr) string { pcdata = 0 // in prologue } stkmap := (*stackmap)(funcdata(f, _FUNCDATA_RegPointerMaps)) - if pcdata == -2 || stkmap == nil { + if pcdata == _PCDATA_RegMapUnsafe || stkmap == nil { // Not at a safe point. ret = debugCallUnsafePoint return diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index 761856576a..17ef2c90d3 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -406,7 +406,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { var startpc uintptr if !go115ReduceLiveness { smi := pcdatavalue(f, _PCDATA_RegMapIndex, pc, nil) - if smi == -2 { + if smi == _PCDATA_RegMapUnsafe { // Unsafe-point marked by compiler. This includes // atomic sequences (e.g., write barrier) and nosplit // functions (except at calls). diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index ddb5ea82b4..fa8d17035e 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -284,6 +284,9 @@ const ( ) const ( + // Only if !go115ReduceLiveness. + _PCDATA_RegMapUnsafe = _PCDATA_UnsafePointUnsafe // Unsafe for async preemption + // PCDATA_UnsafePoint values. _PCDATA_UnsafePointSafe = -1 // Safe for async preemption _PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption -- cgit v1.2.3-54-g00ecf From e8f918535e6c9fc7b85a9dadc9e8035a2e1d1fa4 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 26 Aug 2020 18:19:03 -0400 Subject: cmd/link: strip STAB (symbolic debugging) symbols on darwin On darwin, with external linking, the system linker produces STAB (symbolic debugging) symbols in the binary's symbol table. These include paths of the intermediate object files, like /go.o, which changes from run to run, making the build non-reproducible. Since we run dsymutil to produce debug info and combine them back into the binary, we don't need those STAB symbols anymore. Strip them after running dsymutil. If DWARF is not enabled, we don't run dsymutil. We can pass "-Wl,-S" to let the system linker not generate those symbols. While here, also make it more consistent about DWARF combining. Currently we only do DWARF combining on macOS/AMD64, when DWARF is enabled. On ARM64, we run dsymutil, but then throw the result away. This CL changes it to not run dsymutil (and strip) on ARM64. TODO: add a test. We don't do it here as it fails on some (non-darwin) platforms. Fixes #40979. Change-Id: If770f7828cdb858857d6079e0585bf067f8f7a92 Reviewed-on: https://go-review.googlesource.com/c/go/+/250944 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/lib.go | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 702c902142..54ac109b20 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1238,6 +1238,10 @@ func (ctxt *Link) hostlink() { } } + // On darwin, whether to combine DWARF into executable. + // Only macOS supports unmapped segments such as our __DWARF segment. + combineDwarf := ctxt.IsDarwin() && !*FlagS && !*FlagW && !debug_s && machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() + switch ctxt.HeadType { case objabi.Hdarwin: if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { @@ -1248,6 +1252,9 @@ func (ctxt *Link) hostlink() { if ctxt.DynlinkingGo() && !ctxt.Arch.InFamily(sys.ARM, sys.ARM64) { argv = append(argv, "-Wl,-flat_namespace") } + if !combineDwarf { + argv = append(argv, "-Wl,-S") // suppress STAB (symbolic debugging) symbols + } case objabi.Hopenbsd: argv = append(argv, "-Wl,-nopie") case objabi.Hwindows: @@ -1587,11 +1594,16 @@ func (ctxt *Link) hostlink() { ctxt.Logf("%s", out) } - if !*FlagS && !*FlagW && !debug_s && ctxt.HeadType == objabi.Hdarwin { + if combineDwarf { dsym := filepath.Join(*flagTmpdir, "go.dwarf") if out, err := exec.Command("dsymutil", "-f", *flagOutfile, "-o", dsym).CombinedOutput(); err != nil { Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out) } + // Remove STAB (symbolic debugging) symbols after we are done with them (by dsymutil). + // They contain temporary file paths and make the build not reproducible. + if out, err := exec.Command("strip", "-S", *flagOutfile).CombinedOutput(); err != nil { + Exitf("%s: running strip failed: %v\n%s", os.Args[0], err, out) + } // Skip combining if `dsymutil` didn't generate a file. See #11994. if _, err := os.Stat(dsym); os.IsNotExist(err) { return @@ -1607,15 +1619,12 @@ func (ctxt *Link) hostlink() { if err != nil { Exitf("%s: parsing Mach-O header failed: %v", os.Args[0], err) } - // Only macOS supports unmapped segments such as our __DWARF segment. - if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { - if err := machoCombineDwarf(ctxt, exef, exem, dsym, combinedOutput); err != nil { - Exitf("%s: combining dwarf failed: %v", os.Args[0], err) - } - os.Remove(*flagOutfile) - if err := os.Rename(combinedOutput, *flagOutfile); err != nil { - Exitf("%s: %v", os.Args[0], err) - } + if err := machoCombineDwarf(ctxt, exef, exem, dsym, combinedOutput); err != nil { + Exitf("%s: combining dwarf failed: %v", os.Args[0], err) + } + os.Remove(*flagOutfile) + if err := os.Rename(combinedOutput, *flagOutfile); err != nil { + Exitf("%s: %v", os.Args[0], err) } } } -- cgit v1.2.3-54-g00ecf From ae3680b30b81162da3ae0ea248eea502e8ca2195 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 4 Sep 2020 13:01:08 -0400 Subject: cmd/link: rewrite some code without using reflection In Mach-O DWARF combining, some code was written using reflection, so it could support both 32-bit and 64-bit Mach-O files without duplicating code. We no longer support 32-bit darwin platforms now. 32-bit support can go. Rewrite it with direct field access, for 64-bit only. Change-Id: If1338c3cd37cecf603f4df0c6eb0c890eaebfe5f Reviewed-on: https://go-review.googlesource.com/c/go/+/253557 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/macho_combine_dwarf.go | 95 ++++++++++--------------- 1 file changed, 38 insertions(+), 57 deletions(-) diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go index 9d9f916b8e..e43aeb1eb7 100644 --- a/src/cmd/link/internal/ld/macho_combine_dwarf.go +++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go @@ -217,9 +217,9 @@ func machoCombineDwarf(ctxt *Link, exef *os.File, exem *macho.File, dsym, outexe linkoffset := uint64(linkstart) - linkseg.Offset switch cmd.Cmd { case macho.LoadCmdSegment64: - err = machoUpdateSegment(reader, linkseg, linkoffset, &macho.Segment64{}, &macho.Section64{}) + err = machoUpdateSegment(reader, linkseg, linkoffset) case macho.LoadCmdSegment: - err = machoUpdateSegment(reader, linkseg, linkoffset, &macho.Segment32{}, &macho.Section32{}) + panic("unexpected 32-bit segment") case LC_DYLD_INFO, LC_DYLD_INFO_ONLY: err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &dyldInfoCmd{}, "RebaseOff", "BindOff", "WeakBindOff", "LazyBindOff", "ExportOff") case macho.LoadCmdSymtab: @@ -313,70 +313,56 @@ func machoCompressSection(sectBytes []byte) (compressed bool, contents []byte, e // machoUpdateSegment updates the load command for a moved segment. // Only the linkedit segment should move, and it should have 0 sections. -// seg should be a macho.Segment32 or macho.Segment64 as appropriate. -// sect should be a macho.Section32 or macho.Section64 as appropriate. -func machoUpdateSegment(r loadCmdReader, linkseg *macho.Segment, linkoffset uint64, seg, sect interface{}) error { - if err := r.ReadAt(0, seg); err != nil { +func machoUpdateSegment(r loadCmdReader, linkseg *macho.Segment, linkoffset uint64) error { + var seg macho.Segment64 + if err := r.ReadAt(0, &seg); err != nil { return err } - segValue := reflect.ValueOf(seg) - offset := reflect.Indirect(segValue).FieldByName("Offset") // Only the linkedit segment moved, anything before that is fine. - if offset.Uint() < linkseg.Offset { + if seg.Offset < linkseg.Offset { return nil } - offset.SetUint(offset.Uint() + linkoffset) - if err := r.WriteAt(0, seg); err != nil { + seg.Offset += linkoffset + if err := r.WriteAt(0, &seg); err != nil { return err } // There shouldn't be any sections, but just to make sure... - return machoUpdateSections(r, segValue, reflect.ValueOf(sect), linkoffset, nil) + return machoUpdateSections(r, &seg, linkoffset, nil) } -func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset uint64, compressedSects []*macho.Section) error { - iseg := reflect.Indirect(seg) - nsect := iseg.FieldByName("Nsect").Uint() +func machoUpdateSections(r loadCmdReader, seg *macho.Segment64, deltaOffset uint64, compressedSects []*macho.Section) error { + nsect := seg.Nsect if nsect == 0 { return nil } - sectOffset := int64(iseg.Type().Size()) - - isect := reflect.Indirect(sect) - offsetField := isect.FieldByName("Offset") - reloffField := isect.FieldByName("Reloff") - addrField := isect.FieldByName("Addr") - nameField := isect.FieldByName("Name") - sizeField := isect.FieldByName("Size") - sectSize := int64(isect.Type().Size()) - for i := uint64(0); i < nsect; i++ { - if err := r.ReadAt(sectOffset, sect.Interface()); err != nil { + sectOffset := int64(unsafe.Sizeof(*seg)) + + var sect macho.Section64 + sectSize := int64(unsafe.Sizeof(sect)) + for i := uint32(0); i < nsect; i++ { + if err := r.ReadAt(sectOffset, §); err != nil { return err } if compressedSects != nil { cSect := compressedSects[i] - var name [16]byte - copy(name[:], []byte(cSect.Name)) - nameField.Set(reflect.ValueOf(name)) - sizeField.SetUint(cSect.Size) + copy(sect.Name[:], cSect.Name) + sect.Size = cSect.Size if cSect.Offset != 0 { - offsetField.SetUint(uint64(cSect.Offset) + deltaOffset) + sect.Offset = cSect.Offset + uint32(deltaOffset) } if cSect.Addr != 0 { - addrField.SetUint(cSect.Addr) + sect.Addr = cSect.Addr } } else { - if offsetField.Uint() != 0 { - offsetField.SetUint(offsetField.Uint() + deltaOffset) - } - if reloffField.Uint() != 0 { - reloffField.SetUint(reloffField.Uint() + deltaOffset) + if sect.Offset != 0 { + sect.Offset += uint32(deltaOffset) } - if addrField.Uint() != 0 { - addrField.SetUint(addrField.Uint()) + if sect.Reloff != 0 { + sect.Reloff += uint32(deltaOffset) } } - if err := r.WriteAt(sectOffset, sect.Interface()); err != nil { + if err := r.WriteAt(sectOffset, §); err != nil { return err } sectOffset += sectSize @@ -386,32 +372,27 @@ func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset u // machoUpdateDwarfHeader updates the DWARF segment load command. func machoUpdateDwarfHeader(r *loadCmdReader, compressedSects []*macho.Section, dwarfsize uint64, dwarfstart int64, realdwarf *macho.Segment) error { - var seg, sect interface{} cmd, err := r.Next() if err != nil { return err } - if cmd.Cmd == macho.LoadCmdSegment64 { - seg = new(macho.Segment64) - sect = new(macho.Section64) - } else { - seg = new(macho.Segment32) - sect = new(macho.Section32) + if cmd.Cmd != macho.LoadCmdSegment64 { + panic("not a Segment64") } - if err := r.ReadAt(0, seg); err != nil { + var seg macho.Segment64 + if err := r.ReadAt(0, &seg); err != nil { return err } - segv := reflect.ValueOf(seg).Elem() - segv.FieldByName("Offset").SetUint(uint64(dwarfstart)) + seg.Offset = uint64(dwarfstart) if compressedSects != nil { var segSize uint64 for _, newSect := range compressedSects { segSize += newSect.Size } - segv.FieldByName("Filesz").SetUint(segSize) + seg.Filesz = segSize } else { - segv.FieldByName("Filesz").SetUint(dwarfsize) + seg.Filesz = dwarfsize } // We want the DWARF segment to be considered non-loadable, so @@ -424,14 +405,14 @@ func machoUpdateDwarfHeader(r *loadCmdReader, compressedSects []*macho.Section, // in ImageLoaderMachO.cpp (various versions can be found online, see // https://opensource.apple.com/source/dyld/dyld-519.2.2/src/ImageLoaderMachO.cpp.auto.html // as one example). - segv.FieldByName("Addr").SetUint(0) - segv.FieldByName("Memsz").SetUint(0) - segv.FieldByName("Prot").SetUint(0) + seg.Addr = 0 + seg.Memsz = 0 + seg.Prot = 0 - if err := r.WriteAt(0, seg); err != nil { + if err := r.WriteAt(0, &seg); err != nil { return err } - return machoUpdateSections(*r, segv, reflect.ValueOf(sect), uint64(dwarfstart)-realdwarf.Offset, compressedSects) + return machoUpdateSections(*r, &seg, uint64(dwarfstart)-realdwarf.Offset, compressedSects) } func machoUpdateLoadCommand(r loadCmdReader, linkseg *macho.Segment, linkoffset uint64, cmd interface{}, fields ...string) error { -- cgit v1.2.3-54-g00ecf From a52a5d8a43ac23c34231e92da3aba61cd8cf7d97 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 3 Sep 2020 17:51:01 -0400 Subject: cmd/link: mark ELF PLT sections executable Change-Id: Ie0316a06c30485f783c2175590d7e9fc4fa3e0cd Reviewed-on: https://go-review.googlesource.com/c/go/+/253021 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/data.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 85ce7d8fb7..8324a98a26 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1786,7 +1786,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { culprit := ldr.SymName(state.data[sym.STEXT][0]) Errorf(nil, "dodata found an sym.STEXT symbol: %s", culprit) } - state.allocateSingleSymSections(&Segtext, sym.SELFRXSECT, sym.SRODATA, 04) + state.allocateSingleSymSections(&Segtext, sym.SELFRXSECT, sym.SRODATA, 05) state.allocateSingleSymSections(&Segtext, sym.SMACHOPLT, sym.SRODATA, 05) /* read-only data */ -- cgit v1.2.3-54-g00ecf From 1e6ad65b43ee392676a69f769b1942edd8af0e86 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 4 Sep 2020 14:35:57 -0400 Subject: cmd/link: enable DWARF combining on macOS ARM64 It appears the machoCalcStart function is meant to align the segment, but it doesn't. Replace it with an actual alignment calculation. Also, use the alignment from the configuration, instead of hardcode. With this fix we could enable DWARF combining on macOS ARM64. Change-Id: I19ec771b77d752b83a54c53b6ee65af78a31b8ae Reviewed-on: https://go-review.googlesource.com/c/go/+/253558 Reviewed-by: Than McIntosh --- src/cmd/link/internal/arm64/obj.go | 2 +- src/cmd/link/internal/ld/lib.go | 4 ++-- src/cmd/link/internal/ld/macho_combine_dwarf.go | 17 ++--------------- 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go index 37b72b6c37..a980cfee52 100644 --- a/src/cmd/link/internal/arm64/obj.go +++ b/src/cmd/link/internal/arm64/obj.go @@ -105,7 +105,7 @@ func archinit(ctxt *ld.Link) { *ld.FlagTextAddr = 4096 + int64(ld.HEADR) } if *ld.FlagRound == -1 { - *ld.FlagRound = 4096 + *ld.FlagRound = 16384 // 16K page alignment } } } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 54ac109b20..4295b2a660 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1240,11 +1240,11 @@ func (ctxt *Link) hostlink() { // On darwin, whether to combine DWARF into executable. // Only macOS supports unmapped segments such as our __DWARF segment. - combineDwarf := ctxt.IsDarwin() && !*FlagS && !*FlagW && !debug_s && machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() + combineDwarf := ctxt.IsDarwin() && !*FlagS && !*FlagW && !debug_s && machoPlatform == PLATFORM_MACOS switch ctxt.HeadType { case objabi.Hdarwin: - if machoPlatform == PLATFORM_MACOS && ctxt.IsAMD64() { + if combineDwarf { // Leave room for DWARF combining. // -headerpad is incompatible with -fembed-bitcode. argv = append(argv, "-Wl,-headerpad,1144") diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go index e43aeb1eb7..77ee8a4d62 100644 --- a/src/cmd/link/internal/ld/macho_combine_dwarf.go +++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go @@ -16,10 +16,6 @@ import ( "unsafe" ) -const ( - pageAlign = 12 // 4096 = 1 << 12 -) - type loadCmd struct { Cmd macho.LoadCmd Len uint32 @@ -138,7 +134,7 @@ func machoCombineDwarf(ctxt *Link, exef *os.File, exem *macho.File, dsym, outexe // Now copy the dwarf data into the output. // Kernel requires all loaded segments to be page-aligned in the file, // even though we mark this one as being 0 bytes of virtual address space. - dwarfstart := machoCalcStart(realdwarf.Offset, linkseg.Offset, pageAlign) + dwarfstart := Rnd(int64(linkseg.Offset), int64(*FlagRound)) if _, err := outf.Seek(dwarfstart, 0); err != nil { return err } @@ -166,7 +162,7 @@ func machoCombineDwarf(ctxt *Link, exef *os.File, exem *macho.File, dsym, outexe if _, err := exef.Seek(int64(linkseg.Offset), 0); err != nil { return err } - linkstart := machoCalcStart(linkseg.Offset, uint64(dwarfstart)+dwarfsize, pageAlign) + linkstart := Rnd(dwarfstart+int64(dwarfsize), int64(*FlagRound)) if _, err := outf.Seek(linkstart, 0); err != nil { return err } @@ -432,12 +428,3 @@ func machoUpdateLoadCommand(r loadCmdReader, linkseg *macho.Segment, linkoffset } return nil } - -func machoCalcStart(origAddr, newAddr uint64, alignExp uint32) int64 { - align := uint64(1 << alignExp) - origMod, newMod := origAddr%align, newAddr%align - if origMod == newMod { - return int64(newAddr) - } - return int64(newAddr + align + origMod - newMod) -} -- cgit v1.2.3-54-g00ecf From bdb480fd623e58d0d1d0689a3755367379ea57bc Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Tue, 8 Sep 2020 15:28:43 +0700 Subject: cmd/compile: fix mishandling of unsafe-uintptr arguments in go/defer Currently, the statement: go g(uintptr(f())) gets rewritten into: tmp := f() newproc(8, g, uintptr(tmp)) runtime.KeepAlive(tmp) which doesn't guarantee that tmp is still alive by time the g call is scheduled to run. This CL fixes the issue, by wrapping g call in a closure: go func(p unsafe.Pointer) { g(uintptr(p)) }(f()) then this will be rewritten into: tmp := f() go func(p unsafe.Pointer) { g(uintptr(p)) runtime.KeepAlive(p) }(tmp) runtime.KeepAlive(tmp) // superfluous, but harmless So the unsafe.Pointer p will be kept alive at the time g call runs. Updates #24491 Change-Id: Ic10821251cbb1b0073daec92b82a866c6ebaf567 Reviewed-on: https://go-review.googlesource.com/c/go/+/253457 Run-TryBot: Cuong Manh Le Reviewed-by: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/order.go | 1 + src/cmd/compile/internal/gc/syntax.go | 41 +++++++++++++++++--------- src/cmd/compile/internal/gc/walk.go | 54 ++++++++++++++++++++++++++++------- test/fixedbugs/issue24491.go | 45 +++++++++++++++++++++++++++++ 4 files changed, 117 insertions(+), 24 deletions(-) create mode 100644 test/fixedbugs/issue24491.go diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index aa91160e5c..412f073a8d 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -502,6 +502,7 @@ func (o *Order) call(n *Node) { x := o.copyExpr(arg.Left, arg.Left.Type, false) x.Name.SetKeepalive(true) arg.Left = x + n.SetNeedsWrapper(true) } } diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 47e5e59156..5580f789c5 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -141,19 +141,20 @@ const ( nodeInitorder, _ // tracks state during init1; two bits _, _ // second nodeInitorder bit _, nodeHasBreak - _, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only - _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND - _, nodeIsDDD // is the argument variadic - _, nodeDiag // already printed error about this - _, nodeColas // OAS resulting from := - _, nodeNonNil // guaranteed to be non-nil - _, nodeTransient // storage can be reused immediately after this statement - _, nodeBounded // bounds check unnecessary - _, nodeHasCall // expression contains a function call - _, nodeLikely // if statement condition likely - _, nodeHasVal // node.E contains a Val - _, nodeHasOpt // node.E contains an Opt - _, nodeEmbedded // ODCLFIELD embedded type + _, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only + _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND + _, nodeIsDDD // is the argument variadic + _, nodeDiag // already printed error about this + _, nodeColas // OAS resulting from := + _, nodeNonNil // guaranteed to be non-nil + _, nodeTransient // storage can be reused immediately after this statement + _, nodeBounded // bounds check unnecessary + _, nodeHasCall // expression contains a function call + _, nodeLikely // if statement condition likely + _, nodeHasVal // node.E contains a Val + _, nodeHasOpt // node.E contains an Opt + _, nodeEmbedded // ODCLFIELD embedded type + _, nodeNeedsWrapper // OCALLxxx node that needs to be wrapped ) func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) } @@ -286,6 +287,20 @@ func (n *Node) SetIota(x int64) { n.Xoffset = x } +func (n *Node) NeedsWrapper() bool { + return n.flags&nodeNeedsWrapper != 0 +} + +// SetNeedsWrapper indicates that OCALLxxx node needs to be wrapped by a closure. +func (n *Node) SetNeedsWrapper(b bool) { + switch n.Op { + case OCALLFUNC, OCALLMETH, OCALLINTER: + default: + Fatalf("Node.SetNeedsWrapper %v", n.Op) + } + n.flags.set(nodeNeedsWrapper, b) +} + // mayBeShared reports whether n may occur in multiple places in the AST. // Extra care must be taken when mutating such a node. func (n *Node) mayBeShared() bool { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 0158af8700..ab7f857031 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -232,7 +232,11 @@ func walkstmt(n *Node) *Node { n.Left = copyany(n.Left, &n.Ninit, true) default: - n.Left = walkexpr(n.Left, &n.Ninit) + if n.Left.NeedsWrapper() { + n.Left = wrapCall(n.Left, &n.Ninit) + } else { + n.Left = walkexpr(n.Left, &n.Ninit) + } } case OFOR, OFORUNTIL: @@ -3857,6 +3861,14 @@ func candiscard(n *Node) bool { // builtin(a1, a2, a3) // }(x, y, z) // for print, println, and delete. +// +// Rewrite +// go f(x, y, uintptr(unsafe.Pointer(z))) +// into +// go func(a1, a2, a3) { +// builtin(a1, a2, uintptr(a3)) +// }(x, y, unsafe.Pointer(z)) +// for function contains unsafe-uintptr arguments. var wrapCall_prgen int @@ -3868,9 +3880,17 @@ func wrapCall(n *Node, init *Nodes) *Node { init.AppendNodes(&n.Ninit) } + isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER + // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. + origArgs := make([]*Node, n.List.Len()) t := nod(OTFUNC, nil, nil) for i, arg := range n.List.Slice() { s := lookupN("a", i) + if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.Etype == TUINTPTR && arg.Left.Type.Etype == TUNSAFEPTR { + origArgs[i] = arg + arg = arg.Left + n.List.SetIndex(i, arg) + } t.List.Append(symfield(s, arg.Type)) } @@ -3878,10 +3898,22 @@ func wrapCall(n *Node, init *Nodes) *Node { sym := lookupN("wrap·", wrapCall_prgen) fn := dclfunc(sym, t) - a := nod(n.Op, nil, nil) - a.List.Set(paramNnames(t.Type)) - a = typecheck(a, ctxStmt) - fn.Nbody.Set1(a) + args := paramNnames(t.Type) + for i, origArg := range origArgs { + if origArg == nil { + continue + } + arg := nod(origArg.Op, args[i], nil) + arg.Type = origArg.Type + args[i] = arg + } + call := nod(n.Op, nil, nil) + if !isBuiltinCall { + call.Op = OCALL + call.Left = n.Left + } + call.List.Set(args) + fn.Nbody.Set1(call) funcbody() @@ -3889,12 +3921,12 @@ func wrapCall(n *Node, init *Nodes) *Node { typecheckslice(fn.Nbody.Slice(), ctxStmt) xtop = append(xtop, fn) - a = nod(OCALL, nil, nil) - a.Left = fn.Func.Nname - a.List.Set(n.List.Slice()) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - return a + call = nod(OCALL, nil, nil) + call.Left = fn.Func.Nname + call.List.Set(n.List.Slice()) + call = typecheck(call, ctxStmt) + call = walkexpr(call, init) + return call } // substArgTypes substitutes the given list of types for diff --git a/test/fixedbugs/issue24491.go b/test/fixedbugs/issue24491.go new file mode 100644 index 0000000000..4703368793 --- /dev/null +++ b/test/fixedbugs/issue24491.go @@ -0,0 +1,45 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure unsafe-uintptr arguments are handled correctly. + +package main + +import ( + "runtime" + "unsafe" +) + +var done = make(chan bool, 1) + +func setup() unsafe.Pointer { + s := "ok" + runtime.SetFinalizer(&s, func(p *string) { *p = "FAIL" }) + return unsafe.Pointer(&s) +} + +//go:noinline +//go:uintptrescapes +func test(s string, p uintptr) { + runtime.GC() + if *(*string)(unsafe.Pointer(p)) != "ok" { + panic(s + " return unexpected result") + } + done <- true +} + +func main() { + test("normal", uintptr(setup())) + <-done + + go test("go", uintptr(setup())) + <-done + + func() { + defer test("defer", uintptr(setup())) + }() + <-done +} -- cgit v1.2.3-54-g00ecf From 9cf88333e8255155be4e136c572883bb5ad546bd Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 9 Sep 2020 12:06:18 +0700 Subject: cmd/compile: consistently use Type.IsUnsafePtr() Passes toolstash-check. Change-Id: Iaeae7cc20e26af733642c7c8c7ca0a059e5b07b2 Reviewed-on: https://go-review.googlesource.com/c/go/+/253657 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 6 +++--- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/gc/subr.go | 4 ++-- src/cmd/compile/internal/gc/walk.go | 10 +++++----- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index d5cca4a38b..dc469e276c 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -485,7 +485,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { e.discard(max) case OCONV, OCONVNOP: - if checkPtr(e.curfn, 2) && n.Type.Etype == TUNSAFEPTR && n.Left.Type.IsPtr() { + if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() { // When -d=checkptr=2 is enabled, treat // conversions to unsafe.Pointer as an // escaping operation. This allows better @@ -493,7 +493,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { // easily detect object boundaries on the heap // than the stack. e.assignHeap(n.Left, "conversion to unsafe.Pointer", n) - } else if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR { + } else if n.Type.IsUnsafePtr() && n.Left.Type.Etype == TUINTPTR { e.unsafeValue(k, n.Left) } else { e.expr(k, n.Left) @@ -625,7 +625,7 @@ func (e *Escape) unsafeValue(k EscHole, n *Node) { switch n.Op { case OCONV, OCONVNOP: - if n.Left.Type.Etype == TUNSAFEPTR { + if n.Left.Type.IsUnsafePtr() { e.expr(k, n.Left) } else { e.discard(n.Left) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 52083d999e..89644cd3f2 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2113,7 +2113,7 @@ func (s *state) expr(n *Node) *ssa.Value { } // unsafe.Pointer <--> *T - if to.Etype == TUNSAFEPTR && from.IsPtrShaped() || from.Etype == TUNSAFEPTR && to.IsPtrShaped() { + if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() { return v } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 8fa3fca50f..6d0a40c287 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -781,12 +781,12 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op { } // 8. src is a pointer or uintptr and dst is unsafe.Pointer. - if (src.IsPtr() || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR { + if (src.IsPtr() || src.Etype == TUINTPTR) && dst.IsUnsafePtr() { return OCONVNOP } // 9. src is unsafe.Pointer and dst is a pointer or uintptr. - if src.Etype == TUNSAFEPTR && (dst.IsPtr() || dst.Etype == TUINTPTR) { + if src.IsUnsafePtr() && (dst.IsPtr() || dst.Etype == TUINTPTR) { return OCONVNOP } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index ab7f857031..a9fefb3ddd 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -958,11 +958,11 @@ opswitch: case OCONV, OCONVNOP: n.Left = walkexpr(n.Left, init) if n.Op == OCONVNOP && checkPtr(Curfn, 1) { - if n.Type.IsPtr() && n.Left.Type.Etype == TUNSAFEPTR { // unsafe.Pointer to *T + if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T n = walkCheckPtrAlignment(n, init, nil) break } - if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR { // uintptr to unsafe.Pointer + if n.Type.IsUnsafePtr() && n.Left.Type.Etype == TUINTPTR { // uintptr to unsafe.Pointer n = walkCheckPtrArithmetic(n, init) break } @@ -1127,7 +1127,7 @@ opswitch: n.List.SetSecond(walkexpr(n.List.Second(), init)) case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: - checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.Etype == TUNSAFEPTR + checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr() if checkSlice { n.Left.Left = walkexpr(n.Left.Left, init) } else { @@ -3886,7 +3886,7 @@ func wrapCall(n *Node, init *Nodes) *Node { t := nod(OTFUNC, nil, nil) for i, arg := range n.List.Slice() { s := lookupN("a", i) - if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.Etype == TUINTPTR && arg.Left.Type.Etype == TUNSAFEPTR { + if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.Etype == TUINTPTR && arg.Left.Type.IsUnsafePtr() { origArgs[i] = arg arg = arg.Left n.List.SetIndex(i, arg) @@ -4041,7 +4041,7 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { walk(n.Left) } case OCONVNOP: - if n.Left.Type.Etype == TUNSAFEPTR { + if n.Left.Type.IsUnsafePtr() { n.Left = cheapexpr(n.Left, init) originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR])) } -- cgit v1.2.3-54-g00ecf From 518369601ca2499cea68af86451f17d2856895f8 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 9 Sep 2020 12:09:26 +0700 Subject: cmd/compile: add Type.IsUintptr() to detect type is an uintptr Passes toolstash-check. Change-Id: I7051d45eafbfd4dea73a3d4b5ea6cff39d76cbc1 Reviewed-on: https://go-review.googlesource.com/c/go/+/253658 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/esc.go | 6 +++--- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/subr.go | 4 ++-- src/cmd/compile/internal/gc/walk.go | 4 ++-- src/cmd/compile/internal/types/type.go | 5 +++++ 5 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 4b843aba35..375331d1f5 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -377,7 +377,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { // This really doesn't have much to do with escape analysis per se, // but we are reusing the ability to annotate an individual function // argument and pass those annotations along to importing code. - if f.Type.Etype == TUINTPTR { + if f.Type.IsUintptr() { if Debug['m'] != 0 { Warnl(f.Pos, "assuming %v is unsafe uintptr", name()) } @@ -407,13 +407,13 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { } if fn.Func.Pragma&UintptrEscapes != 0 { - if f.Type.Etype == TUINTPTR { + if f.Type.IsUintptr() { if Debug['m'] != 0 { Warnl(f.Pos, "marking %v as escaping uintptr", name()) } return uintptrEscapesTag } - if f.IsDDD() && f.Type.Elem().Etype == TUINTPTR { + if f.IsDDD() && f.Type.Elem().IsUintptr() { // final argument is ...uintptr. if Debug['m'] != 0 { Warnl(f.Pos, "marking %v as escaping ...uintptr", name()) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index dc469e276c..75da439bb7 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -493,7 +493,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { // easily detect object boundaries on the heap // than the stack. e.assignHeap(n.Left, "conversion to unsafe.Pointer", n) - } else if n.Type.IsUnsafePtr() && n.Left.Type.Etype == TUINTPTR { + } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { e.unsafeValue(k, n.Left) } else { e.expr(k, n.Left) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 6d0a40c287..d3ba53ff0c 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -781,12 +781,12 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op { } // 8. src is a pointer or uintptr and dst is unsafe.Pointer. - if (src.IsPtr() || src.Etype == TUINTPTR) && dst.IsUnsafePtr() { + if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() { return OCONVNOP } // 9. src is unsafe.Pointer and dst is a pointer or uintptr. - if src.IsUnsafePtr() && (dst.IsPtr() || dst.Etype == TUINTPTR) { + if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) { return OCONVNOP } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index a9fefb3ddd..361de7e0f3 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -962,7 +962,7 @@ opswitch: n = walkCheckPtrAlignment(n, init, nil) break } - if n.Type.IsUnsafePtr() && n.Left.Type.Etype == TUINTPTR { // uintptr to unsafe.Pointer + if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer n = walkCheckPtrArithmetic(n, init) break } @@ -3886,7 +3886,7 @@ func wrapCall(n *Node, init *Nodes) *Node { t := nod(OTFUNC, nil, nil) for i, arg := range n.List.Slice() { s := lookupN("a", i) - if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.Etype == TUINTPTR && arg.Left.Type.IsUnsafePtr() { + if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() { origArgs[i] = arg arg = arg.Left n.List.SetIndex(i, arg) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index e4b3d885d9..a777a5fd90 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1230,6 +1230,11 @@ func (t *Type) IsUnsafePtr() bool { return t.Etype == TUNSAFEPTR } +// IsUintptr reports whether t is an uintptr. +func (t *Type) IsUintptr() bool { + return t.Etype == TUINTPTR +} + // IsPtrShaped reports whether t is represented by a single machine pointer. // In addition to regular Go pointer types, this includes map, channel, and // function types and unsafe.Pointer. It does not include array or struct types -- cgit v1.2.3-54-g00ecf From bdad4285709d1c5e04458268880775087be63027 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Fri, 28 Aug 2020 15:05:44 -0400 Subject: cmd/go: make 'go get' preserve sums for content of new requirements This preserves zip sums when 'go get' is run on a module that does not have a package in the root directory. The zip must be fetched to determine whether the package should be loaded, so we already load and verify the sum. Note that 'go mod tidy' may still remove these sums, since they aren't needed to load packages. Fixes #41103 Change-Id: I78f10a25f0392461fdc98518a7c92a38ee3233c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/251880 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modload/init.go | 38 +++++++++++++++++------ src/cmd/go/testdata/script/mod_get_sum_noroot.txt | 11 +++++++ 2 files changed, 39 insertions(+), 10 deletions(-) create mode 100644 src/cmd/go/testdata/script/mod_get_sum_noroot.txt diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 6f93b88eab..8e8fb9e6a1 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -862,14 +862,11 @@ func WriteGoMod() { } } - // Always update go.sum, even if we didn't change go.mod: we may have - // downloaded modules that we didn't have before. - modfetch.WriteGoSum(keepSums()) - if !dirty && cfg.CmdName != "mod tidy" { // The go.mod file has the same semantic content that it had before // (but not necessarily the same exact bytes). - // Ignore any intervening edits. + // Don't write go.mod, but write go.sum in case we added or trimmed sums. + modfetch.WriteGoSum(keepSums(true)) return } @@ -880,6 +877,9 @@ func WriteGoMod() { defer func() { // At this point we have determined to make the go.mod file on disk equal to new. index = indexModFile(new, modFile, false) + + // Update go.sum after releasing the side lock and refreshing the index. + modfetch.WriteGoSum(keepSums(true)) }() // Make a best-effort attempt to acquire the side lock, only to exclude @@ -920,7 +920,10 @@ func WriteGoMod() { // the last load function like ImportPaths, LoadALL, etc.). It also contains // entries for go.mod files needed for MVS (the version of these entries // ends with "/go.mod"). -func keepSums() map[module.Version]bool { +// +// If addDirect is true, the set also includes sums for modules directly +// required by go.mod, as represented by the index, with replacements applied. +func keepSums(addDirect bool) map[module.Version]bool { // Walk the module graph and keep sums needed by MVS. modkey := func(m module.Version) module.Version { return module.Version{Path: m.Path, Version: m.Version + "/go.mod"} @@ -932,9 +935,6 @@ func keepSums() map[module.Version]bool { walk = func(m module.Version) { // If we build using a replacement module, keep the sum for the replacement, // since that's the code we'll actually use during a build. - // - // TODO(golang.org/issue/29182): Perhaps we should keep both sums, and the - // sums for both sets of transitive requirements. r := Replacement(m) if r.Path == "" { keep[modkey(m)] = true @@ -964,9 +964,27 @@ func keepSums() map[module.Version]bool { } } + // Add entries for modules directly required by go.mod. + if addDirect { + for m := range index.require { + var kept module.Version + if r := Replacement(m); r.Path != "" { + kept = r + } else { + kept = m + } + keep[kept] = true + keep[module.Version{Path: kept.Path, Version: kept.Version + "/go.mod"}] = true + } + } + return keep } func TrimGoSum() { - modfetch.TrimGoSum(keepSums()) + // Don't retain sums for direct requirements in go.mod. When TrimGoSum is + // called, go.mod has not been updated, and it may contain requirements on + // modules deleted from the build list. + addDirect := false + modfetch.TrimGoSum(keepSums(addDirect)) } diff --git a/src/cmd/go/testdata/script/mod_get_sum_noroot.txt b/src/cmd/go/testdata/script/mod_get_sum_noroot.txt new file mode 100644 index 0000000000..0d9a840e77 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_sum_noroot.txt @@ -0,0 +1,11 @@ +# When 'go get' is invoked on a module without a package in the root directory, +# it should add sums for the module's go.mod file and its content to go.sum. +# Verifies golang.org/issue/41103. +go mod init m +go get rsc.io/QUOTE +grep '^rsc.io/QUOTE v1.5.2/go.mod ' go.sum +grep '^rsc.io/QUOTE v1.5.2 ' go.sum + +# Double-check rsc.io/QUOTE does not have a root package. +! go list -mod=readonly rsc.io/QUOTE +stderr '^cannot find module providing package rsc.io/QUOTE: import lookup disabled by -mod=readonly$' -- cgit v1.2.3-54-g00ecf From 83ed734df03fd11d71f06bc02906a723afaf3936 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 11 Aug 2020 19:57:50 -0400 Subject: cmd/link: pre-resolve package reference Pre-resolve package index references, so it doesn't need to do a map lookup in every cross-package symbol reference resolution. It increases the memory usage very slightly (O(# imported packages)). Change-Id: Ia76c97ac51f1c2c2d5ea7ae34853850ec69ef0a8 Reviewed-on: https://go-review.googlesource.com/c/go/+/253604 Run-TryBot: Cherry Zhang Reviewed-by: Than McIntosh TryBot-Result: Gobot Gobot --- src/cmd/link/internal/loader/loader.go | 36 +++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 8fd10b0848..43a0352e0b 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -93,11 +93,12 @@ type oReader struct { version int // version of static symbol flags uint32 // read from object file pkgprefix string - syms []Sym // Sym's global index, indexed by local index - ndef int // cache goobj.Reader.NSym() - nhashed64def int // cache goobj.Reader.NHashed64Def() - nhasheddef int // cache goobj.Reader.NHashedDef() - objidx uint32 // index of this reader in the objs slice + syms []Sym // Sym's global index, indexed by local index + pkg []uint32 // indices of referenced package by PkgIdx (index into loader.objs array) + ndef int // cache goobj.Reader.NSym() + nhashed64def int // cache goobj.Reader.NHashed64Def() + nhasheddef int // cache goobj.Reader.NHashedDef() + objidx uint32 // index of this reader in the objs slice } // Total number of defined symbols (package symbols, hashed symbols, and @@ -219,7 +220,7 @@ type Loader struct { deferReturnTramp map[Sym]bool // whether the symbol is a trampoline of a deferreturn call - objByPkg map[string]*oReader // map package path to its Go object reader + objByPkg map[string]uint32 // map package path to the index of its Go object reader anonVersion int // most recently assigned ext static sym pseudo-version @@ -331,7 +332,7 @@ func NewLoader(flags uint32, elfsetstring elfsetstringFunc, reporter *ErrorRepor objSyms: make([]objSym, 1, 100000), // reserve index 0 for nil symbol extReader: extReader, symsByName: [2]map[string]Sym{make(map[string]Sym, 80000), make(map[string]Sym, 50000)}, // preallocate ~2MB for ABI0 and ~1MB for ABI1 symbols - objByPkg: make(map[string]*oReader), + objByPkg: make(map[string]uint32), outer: make(map[Sym]Sym), sub: make(map[Sym]Sym), dynimplib: make(map[Sym]string), @@ -370,7 +371,7 @@ func (l *Loader) addObj(pkg string, r *oReader) Sym { } pkg = objabi.PathToPrefix(pkg) // the object file contains escaped package path if _, ok := l.objByPkg[pkg]; !ok { - l.objByPkg[pkg] = r + l.objByPkg[pkg] = r.objidx } i := Sym(len(l.objSyms)) l.start[r] = i @@ -635,12 +636,7 @@ func (l *Loader) resolve(r *oReader, s goobj.SymRef) Sym { case goobj.PkgIdxSelf: rr = r default: - pkg := r.Pkg(int(p)) - var ok bool - rr, ok = l.objByPkg[pkg] - if !ok { - log.Fatalf("reference of nonexisted package %s, from %v", pkg, r.unit.Lib) - } + rr = l.objs[r.pkg[p]].r } return l.toGlobal(rr, s.SymIdx) } @@ -2195,6 +2191,18 @@ func loadObjRefs(l *Loader, r *oReader, arch *sys.Arch) { } } + // referenced packages + npkg := r.NPkg() + r.pkg = make([]uint32, npkg) + for i := 1; i < npkg; i++ { // PkgIdx 0 is a dummy invalid package + pkg := r.Pkg(i) + objidx, ok := l.objByPkg[pkg] + if !ok { + log.Fatalf("reference of nonexisted package %s, from %v", pkg, r.unit.Lib) + } + r.pkg[i] = objidx + } + // load flags of package refs for i, n := 0, r.NRefFlags(); i < n; i++ { rf := r.RefFlags(i) -- cgit v1.2.3-54-g00ecf From 9ef3ee339634d1a349e7b9bb4cae32aacc326f61 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 9 Sep 2020 23:47:41 +0700 Subject: cmd/link: remove unnecessary type conversion for nitablinks It's already an uint64. Change-Id: Ic4cdb957aa4f9245c1ea3f946bcb740f116dd04b Reviewed-on: https://go-review.googlesource.com/c/go/+/253679 Run-TryBot: Cuong Manh Le Reviewed-by: Than McIntosh TryBot-Result: Gobot Gobot --- src/cmd/link/internal/ld/symtab.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index bc880955b8..56363cdaae 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -681,8 +681,8 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { itablinkSym := ldr.Lookup("runtime.itablink", 0) nitablinks := uint64(ldr.SymSize(itablinkSym)) / uint64(ctxt.Arch.PtrSize) moduledata.AddAddr(ctxt.Arch, itablinkSym) - moduledata.AddUint(ctxt.Arch, uint64(nitablinks)) - moduledata.AddUint(ctxt.Arch, uint64(nitablinks)) + moduledata.AddUint(ctxt.Arch, nitablinks) + moduledata.AddUint(ctxt.Arch, nitablinks) // The ptab slice if ptab := ldr.Lookup("go.plugin.tabs", 0); ptab != 0 && ldr.AttrReachable(ptab) { ldr.SetAttrLocal(ptab, true) -- cgit v1.2.3-54-g00ecf From 34835df04891a1d54394888b763af88f9476101d Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 9 Sep 2020 16:52:18 +0000 Subject: runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration Both ReadMemStatsSlow and CheckScavengedBits iterate over the page allocator's chunks but don't actually check if they exist. During the development process the chunks index became sparse, so now this was a possibility. If the runtime tests' heap is sparse we might end up segfaulting in either one of these functions, though this will generally be very rare. The pattern here to return nil for a nonexistent chunk is also useful elsewhere, so this change introduces tryChunkOf which won't throw, but might return nil. It also updates the documentation of chunkOf. Fixes #41296. Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/253777 Run-TryBot: Michael Knyszek Reviewed-by: Keith Randall TryBot-Result: Gobot Gobot --- src/runtime/export_test.go | 17 ++++++++++------- src/runtime/mpagealloc.go | 13 +++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 3307000c51..929bb35db6 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -358,7 +358,11 @@ func ReadMemStatsSlow() (base, slow MemStats) { } for i := mheap_.pages.start; i < mheap_.pages.end; i++ { - pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages) + chunk := mheap_.pages.tryChunkOf(i) + if chunk == nil { + continue + } + pg := chunk.scavenged.popcntRange(0, pallocChunkPages) slow.HeapReleased += uint64(pg) * pageSize } for _, p := range allp { @@ -756,11 +760,7 @@ func (p *PageAlloc) InUse() []AddrRange { // Returns nil if the PallocData's L2 is missing. func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData { ci := chunkIdx(i) - l2 := (*pageAlloc)(p).chunks[ci.l1()] - if l2 == nil { - return nil - } - return (*PallocData)(&l2[ci.l2()]) + return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci)) } // AddrRange represents a range over addresses. @@ -900,7 +900,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { lock(&mheap_.lock) chunkLoop: for i := mheap_.pages.start; i < mheap_.pages.end; i++ { - chunk := mheap_.pages.chunkOf(i) + chunk := mheap_.pages.tryChunkOf(i) + if chunk == nil { + continue + } for j := 0; j < pallocChunkPages/64; j++ { // Run over each 64-bit bitmap section and ensure // scavenged is being cleared properly on allocation. diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 8b3c62c375..c90a6378bd 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { s.scav.scavLWM = maxSearchAddr } +// tryChunkOf returns the bitmap data for the given chunk. +// +// Returns nil if the chunk data has not been mapped. +func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { + l2 := s.chunks[ci.l1()] + if l2 == nil { + return nil + } + return &l2[ci.l2()] +} + // chunkOf returns the chunk at the given chunk index. +// +// The chunk index must be valid or this method may throw. func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData { return &s.chunks[ci.l1()][ci.l2()] } -- cgit v1.2.3-54-g00ecf From 2556eb76c8e752907600274d323a52e97418d14f Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 9 Sep 2020 10:40:11 -0700 Subject: runtime: ignore SIGPROF if profiling disable for thread This avoids a deadlock on prof.signalLock between setcpuprofilerate and cpuprof.add if a SIGPROF is delivered to the thread between the call to setThreadCPUProfiler and acquiring prof.signalLock. Fixes #41014 Change-Id: Ie825e8594f93a19fb1a6320ed640f4e631553596 Reviewed-on: https://go-review.googlesource.com/c/go/+/253758 Run-TryBot: Ian Lance Taylor Reviewed-by: Bryan C. Mills TryBot-Result: Gobot Gobot --- src/runtime/proc.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 341d52aea8..739745aa26 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -3928,6 +3928,13 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { return } + // If mp.profilehz is 0, then profiling is not enabled for this thread. + // We must check this to avoid a deadlock between setcpuprofilerate + // and the call to cpuprof.add, below. + if mp != nil && mp.profilehz == 0 { + return + } + // On mips{,le}, 64bit atomics are emulated with spinlocks, in // runtime/internal/atomic. If SIGPROF arrives while the program is inside // the critical section, it creates a deadlock (when writing the sample). -- cgit v1.2.3-54-g00ecf From b96d32bd92087470f85cfab99e289e609a593d03 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 1 Jul 2020 00:45:34 -0400 Subject: cmd/go/internal/modload: track which packages are in 'all' during loading If the user requests the 'all' pattern in addition to explicit roots outside of 'all', we should not load the transitive dependencies of those explicit roots as if they were *in* 'all'. Without the '-test' flag, we should not load test dependencies of any package outside of 'all'. Even *with* the '-test' flag, we should only load test dependencies of the requested roots, not test dependencies of other packages that happen to be imported by those roots. More precise tracking of membership in 'all' will be important when we implement lazy loading, because membership in 'all' determines which module dependencies we will record in the main module's go.mod file. This change also reduces reliance on global state, factors out the loading process into several smaller functions, and sets us up to reuse the 'go mod vendor' version of the 'all' pattern for lazy loading. For #36460 Fixes #40799 Change-Id: I5ca21c86a860daee1316f732cea131a331d8ddf9 Reviewed-on: https://go-review.googlesource.com/c/go/+/240505 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/load.go | 727 +++++++++++++++++++++--------- src/cmd/go/testdata/script/mod_notall.txt | 4 +- 2 files changed, 525 insertions(+), 206 deletions(-) diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 2a37f1d874..64ef60230e 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -4,6 +4,95 @@ package modload +// This file contains the module-mode package loader, as well as some accessory +// functions pertaining to the package import graph. +// +// There are several exported entry points into package loading (such as +// ImportPathsQuiet and LoadALL), but they are all implemented in terms of +// loadFromRoots, which itself manipulates an instance of the loader struct. +// +// Although most of the loading state is maintained in the loader struct, +// one key piece - the build list - is a global, so that it can be modified +// separate from the loading operation, such as during "go get" +// upgrades/downgrades or in "go mod" operations. +// TODO(#40775): It might be nice to make the loader take and return +// a buildList rather than hard-coding use of the global. +// +// Loading is an iterative process. On each iteration, we try to load the +// requested packages and their transitive imports, then try to resolve modules +// for any imported packages that are still missing. +// +// The first step of each iteration identifies a set of “root” packages. +// Normally the root packages are exactly those matching the named pattern +// arguments. However, for the "all" meta-pattern and related functions +// (LoadALL, LoadVendor), the final set of packages is computed from the package +// import graph, and therefore cannot be an initial input to loading that graph. +// Instead, the root packages for the "all" pattern are those contained in the +// main module, and allPatternIsRoot parameter to the loader instructs it to +// dynamically expand those roots to the full "all" pattern as loading +// progresses. +// +// The pkgInAll flag on each loadPkg instance tracks whether that +// package is known to match the "all" meta-pattern. +// A package matches the "all" pattern if: +// - it is in the main module, or +// - it is imported by any test in the main module, or +// - it is imported by another package in "all", or +// - the main module specifies a go version ≤ 1.15, and the package is imported +// by a *test of* another package in "all". +// +// When we implement lazy loading, we will record the modules providing packages +// in "all" even when we are only loading individual packages, so we set the +// pkgInAll flag regardless of the whether the "all" pattern is a root. +// (This is necessary to maintain the “import invariant” described in +// https://golang.org/design/36460-lazy-module-loading.) +// +// Because "go mod vendor" prunes out the tests of vendored packages, the +// behavior of the "all" pattern with -mod=vendor in Go 1.11–1.15 is the same +// as the "all" pattern (regardless of the -mod flag) in 1.16+. +// The allClosesOverTests parameter to the loader indicates whether the "all" +// pattern should close over tests (as in Go 1.11–1.15) or stop at only those +// packages transitively imported by the packages and tests in the main module +// ("all" in Go 1.16+ and "go mod vendor" in Go 1.11+). +// +// Note that it is possible for a loaded package NOT to be in "all" even when we +// are loading the "all" pattern. For example, packages that are transitive +// dependencies of other roots named on the command line must be loaded, but are +// not in "all". (The mod_notall test illustrates this behavior.) +// Similarly, if the LoadTests flag is set but the "all" pattern does not close +// over test dependencies, then when we load the test of a package that is in +// "all" but outside the main module, the dependencies of that test will not +// necessarily themselves be in "all". That configuration does not arise in Go +// 1.11–1.15, but it will be possible with lazy loading in Go 1.16+. +// +// Loading proceeds from the roots, using a parallel work-queue with a limit on +// the amount of active work (to avoid saturating disks, CPU cores, and/or +// network connections). Each package is added to the queue the first time it is +// imported by another package. When we have finished identifying the imports of +// a package, we add the test for that package if it is needed. A test may be +// needed if: +// - the package matches a root pattern and tests of the roots were requested, or +// - the package is in the main module and the "all" pattern is requested +// (because the "all" pattern includes the dependencies of tests in the main +// module), or +// - the package is in "all" and the definition of "all" we are using includes +// dependencies of tests (as is the case in Go ≤1.15). +// +// After all available packages have been loaded, we examine the results to +// identify any requested or imported packages that are still missing, and if +// so, which modules we could add to the module graph in order to make the +// missing packages available. We add those to the module graph and iterate, +// until either all packages resolve successfully or we cannot identify any +// module that would resolve any remaining missing package. +// +// If the main module is “tidy” (that is, if "go mod tidy" is a no-op for it) +// and all requested packages are in "all", then loading completes in a single +// iteration. +// TODO(bcmills): We should also be able to load in a single iteration if the +// requested packages all come from modules that are themselves tidy, regardless +// of whether those packages are in "all". Today, that requires two iterations +// if those packages are not found in existing dependencies of the main module. + import ( "bytes" "context" @@ -14,8 +103,12 @@ import ( "path" pathpkg "path" "path/filepath" + "reflect" + "runtime" "sort" "strings" + "sync" + "sync/atomic" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -43,10 +136,6 @@ var buildList []module.Version // loaded is the most recently-used package loader. // It holds details about individual packages. -// -// Note that loaded.buildList is only valid during a load operation; -// afterward, it is copied back into the global buildList, -// which should be used instead. var loaded *loader // ImportPaths returns the set of packages matching the args (patterns), @@ -63,7 +152,18 @@ func ImportPaths(ctx context.Context, patterns []string) []*search.Match { // packages. The build tags should typically be imports.Tags() or // imports.AnyTags(); a nil map has no special meaning. func ImportPathsQuiet(ctx context.Context, patterns []string, tags map[string]bool) []*search.Match { - updateMatches := func(matches []*search.Match, iterating bool) { + InitMod(ctx) + + allPatternIsRoot := false + var matches []*search.Match + for _, pattern := range search.CleanPatterns(patterns) { + matches = append(matches, search.NewMatch(pattern)) + if pattern == "all" { + allPatternIsRoot = true + } + } + + updateMatches := func(ld *loader) { for _, m := range matches { switch { case m.IsLocal(): @@ -90,7 +190,7 @@ func ImportPathsQuiet(ctx context.Context, patterns []string, tags map[string]bo // indicates that. ModRoot() - if !iterating { + if ld != nil { m.AddError(err) } continue @@ -103,19 +203,18 @@ func ImportPathsQuiet(ctx context.Context, patterns []string, tags map[string]bo case strings.Contains(m.Pattern(), "..."): m.Errs = m.Errs[:0] - matchPackages(ctx, m, loaded.tags, includeStd, buildList) + matchPackages(ctx, m, tags, includeStd, buildList) case m.Pattern() == "all": - loaded.testAll = true - if iterating { - // Enumerate the packages in the main module. - // We'll load the dependencies as we find them. + if ld == nil { + // The initial roots are the packages in the main module. + // loadFromRoots will expand that to "all". m.Errs = m.Errs[:0] - matchPackages(ctx, m, loaded.tags, omitStd, []module.Version{Target}) + matchPackages(ctx, m, tags, omitStd, []module.Version{Target}) } else { // Starting with the packages in the main module, // enumerate the full list of "all". - m.Pkgs = loaded.computePatternAll(m.Pkgs) + m.Pkgs = ld.computePatternAll() } case m.Pattern() == "std" || m.Pattern() == "cmd": @@ -129,25 +228,22 @@ func ImportPathsQuiet(ctx context.Context, patterns []string, tags map[string]bo } } - InitMod(ctx) - - var matches []*search.Match - for _, pattern := range search.CleanPatterns(patterns) { - matches = append(matches, search.NewMatch(pattern)) - } + loaded = loadFromRoots(loaderParams{ + tags: tags, + allPatternIsRoot: allPatternIsRoot, + allClosesOverTests: true, // until lazy loading in Go 1.16+ - loaded = newLoader(tags) - loaded.load(func() []string { - var roots []string - updateMatches(matches, true) - for _, m := range matches { - roots = append(roots, m.Pkgs...) - } - return roots + listRoots: func() (roots []string) { + updateMatches(nil) + for _, m := range matches { + roots = append(roots, m.Pkgs...) + } + return roots + }, }) // One last pass to finalize wildcards. - updateMatches(matches, false) + updateMatches(loaded) checkMultiplePaths() WriteGoMod() @@ -347,12 +443,14 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { base.Fatalf("go: %v", err) } - loaded = newLoader(tags) - loaded.load(func() []string { - var roots []string - roots = append(roots, imports...) - roots = append(roots, testImports...) - return roots + loaded = loadFromRoots(loaderParams{ + tags: tags, + listRoots: func() (roots []string) { + roots = append(roots, imports...) + roots = append(roots, testImports...) + return roots + }, + allClosesOverTests: true, // until lazy loading. }) WriteGoMod() } @@ -397,9 +495,14 @@ func LoadBuildList(ctx context.Context) []module.Version { return buildList } +// ReloadBuildList resets the state of loaded packages, then loads and returns +// the build list set in SetBuildList. func ReloadBuildList() []module.Version { - loaded = newLoader(imports.Tags()) - loaded.load(func() []string { return nil }) + loaded = loadFromRoots(loaderParams{ + tags: imports.Tags(), + listRoots: func() []string { return nil }, + allClosesOverTests: true, // until lazy loading, but doesn't matter because the root list is empty. + }) return buildList } @@ -410,6 +513,7 @@ func ReloadBuildList() []module.Version { // This set is useful for deciding whether a particular import is needed // anywhere in a module. func LoadALL(ctx context.Context) []string { + InitMod(ctx) return loadAll(ctx, true) } @@ -418,20 +522,18 @@ func LoadALL(ctx context.Context) []string { // ignored completely. // This set is useful for identifying the which packages to include in a vendor directory. func LoadVendor(ctx context.Context) []string { + InitMod(ctx) return loadAll(ctx, false) } -func loadAll(ctx context.Context, testAll bool) []string { - InitMod(ctx) - - loaded = newLoader(imports.AnyTags()) - loaded.isALL = true - loaded.testAll = testAll - if !testAll { - loaded.testRoots = true - } - all := TargetPackages(ctx, "...") - loaded.load(func() []string { return all.Pkgs }) +func loadAll(ctx context.Context, closeOverTests bool) []string { + inTarget := TargetPackages(ctx, "...") + loaded = loadFromRoots(loaderParams{ + tags: imports.AnyTags(), + listRoots: func() []string { return inTarget.Pkgs }, + allPatternIsRoot: true, + allClosesOverTests: closeOverTests, + }) checkMultiplePaths() WriteGoMod() @@ -443,7 +545,7 @@ func loadAll(ctx context.Context, testAll bool) []string { } paths = append(paths, pkg.path) } - for _, err := range all.Errs { + for _, err := range inTarget.Errs { base.Errorf("%v", err) } base.ExitIfErrors() @@ -604,75 +706,157 @@ func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath str // the required packages for a particular build, // checking that the packages are available in the module set, // and updating the module set if needed. -// Loading is an iterative process: try to load all the needed packages, -// but if imports are missing, try to resolve those imports, and repeat. -// -// Although most of the loading state is maintained in the loader struct, -// one key piece - the build list - is a global, so that it can be modified -// separate from the loading operation, such as during "go get" -// upgrades/downgrades or in "go mod" operations. -// TODO(rsc): It might be nice to make the loader take and return -// a buildList rather than hard-coding use of the global. type loader struct { - tags map[string]bool // tags for scanDir - testRoots bool // include tests for roots - isALL bool // created with LoadALL - testAll bool // include tests for all packages - forceStdVendor bool // if true, load standard-library dependencies from the vendor subtree + loaderParams + + forceStdVendor bool // if true, load standard-library dependencies from the vendor subtree + + work *par.Queue // reset on each iteration roots []*loadPkg - pkgs []*loadPkg - work *par.Work // current work queue - pkgCache *par.Cache // map from string to *loadPkg + pkgCache *par.Cache // package path (string) → *loadPkg + pkgs []*loadPkg // transitive closure of loaded packages and tests; populated in buildStacks // computed at end of iterations direct map[string]bool // imported directly by main module } +type loaderParams struct { + tags map[string]bool // tags for scanDir + listRoots func() []string + allPatternIsRoot bool // Is the "all" pattern an additional root? + allClosesOverTests bool // Does the "all" pattern include the transitive closure of tests of packages in "all"? +} + // LoadTests controls whether the loaders load tests of the root packages. var LoadTests bool -func newLoader(tags map[string]bool) *loader { - ld := new(loader) - ld.tags = tags - ld.testRoots = LoadTests - - // Inside the "std" and "cmd" modules, we prefer to use the vendor directory - // unless the command explicitly changes the module graph. - if !targetInGorootSrc || (cfg.CmdName != "get" && !strings.HasPrefix(cfg.CmdName, "mod ")) { - ld.forceStdVendor = true +func (ld *loader) reset() { + select { + case <-ld.work.Idle(): + default: + panic("loader.reset when not idle") } - return ld -} - -func (ld *loader) reset() { ld.roots = nil - ld.pkgs = nil - ld.work = new(par.Work) ld.pkgCache = new(par.Cache) + ld.pkgs = nil } // A loadPkg records information about a single loaded package. type loadPkg struct { - path string // import path + // Populated at construction time: + path string // import path + testOf *loadPkg + + // Populated at construction time and updated by (*loader).applyPkgFlags: + flags atomicLoadPkgFlags + + // Populated by (*loader).load: mod module.Version // module providing package dir string // directory containing source code - imports []*loadPkg // packages imported by this one err error // error loading package - stack *loadPkg // package importing this one in minimal import stack for this pkg - test *loadPkg // package with test imports, if we need test - testOf *loadPkg - testImports []string // test-only imports, saved for use by pkg.test. + imports []*loadPkg // packages imported by this one + testImports []string // test-only imports, saved for use by pkg.test. + inStd bool + + // Populated by (*loader).pkgTest: + testOnce sync.Once + test *loadPkg + + // Populated by postprocessing in (*loader).buildStacks: + stack *loadPkg // package importing this one in minimal import stack for this pkg +} + +// loadPkgFlags is a set of flags tracking metadata about a package. +type loadPkgFlags int8 + +const ( + // pkgInAll indicates that the package is in the "all" package pattern, + // regardless of whether we are loading the "all" package pattern. + // + // When the pkgInAll flag and pkgImportsLoaded flags are both set, the caller + // who set the last of those flags must propagate the pkgInAll marking to all + // of the imports of the marked package. + // + // A test is marked with pkgInAll if that test would promote the packages it + // imports to be in "all" (such as when the test is itself within the main + // module, or when ld.allClosesOverTests is true). + pkgInAll loadPkgFlags = 1 << iota + + // pkgIsRoot indicates that the package matches one of the root package + // patterns requested by the caller. + // + // If LoadTests is set, then when pkgIsRoot and pkgImportsLoaded are both set, + // the caller who set the last of those flags must populate a test for the + // package (in the pkg.test field). + // + // If the "all" pattern is included as a root, then non-test packages in "all" + // are also roots (and must be marked pkgIsRoot). + pkgIsRoot + + // pkgImportsLoaded indicates that the imports and testImports fields of a + // loadPkg have been populated. + pkgImportsLoaded +) + +// has reports whether all of the flags in cond are set in f. +func (f loadPkgFlags) has(cond loadPkgFlags) bool { + return f&cond == cond +} + +// An atomicLoadPkgFlags stores a loadPkgFlags for which individual flags can be +// added atomically. +type atomicLoadPkgFlags struct { + bits int32 +} + +// update sets the given flags in af (in addition to any flags already set). +// +// update returns the previous flag state so that the caller may determine which +// flags were newly-set. +func (af *atomicLoadPkgFlags) update(flags loadPkgFlags) (old loadPkgFlags) { + for { + old := atomic.LoadInt32(&af.bits) + new := old | int32(flags) + if new == old || atomic.CompareAndSwapInt32(&af.bits, old, new) { + return loadPkgFlags(old) + } + } +} + +// has reports whether all of the flags in cond are set in af. +func (af *atomicLoadPkgFlags) has(cond loadPkgFlags) bool { + return loadPkgFlags(atomic.LoadInt32(&af.bits))&cond == cond +} + +// isTest reports whether pkg is a test of another package. +func (pkg *loadPkg) isTest() bool { + return pkg.testOf != nil } var errMissing = errors.New("cannot find package") -// load attempts to load the build graph needed to process a set of root packages. -// The set of root packages is defined by the addRoots function, -// which must call add(path) with the import path of each root package. -func (ld *loader) load(roots func() []string) { +// loadFromRoots attempts to load the build graph needed to process a set of +// root packages and their dependencies. +// +// The set of root packages is returned by the params.listRoots function, and +// expanded to the full set of packages by tracing imports (and possibly tests) +// as needed. +func loadFromRoots(params loaderParams) *loader { + ld := &loader{ + loaderParams: params, + work: par.NewQueue(runtime.GOMAXPROCS(0)), + } + + // Inside the "std" and "cmd" modules, we prefer to use the vendor directory + // unless the command explicitly changes the module graph. + // TODO(bcmills): Is this still needed now that we have automatic vendoring? + if !targetInGorootSrc || (cfg.CmdName != "get" && !strings.HasPrefix(cfg.CmdName, "mod ")) { + ld.forceStdVendor = true + } + var err error reqs := Reqs() buildList, err = mvs.BuildList(Target, reqs) @@ -680,47 +864,34 @@ func (ld *loader) load(roots func() []string) { base.Fatalf("go: %v", err) } - added := make(map[string]bool) + addedModuleFor := make(map[string]bool) for { ld.reset() - if roots != nil { - // Note: the returned roots can change on each iteration, - // since the expansion of package patterns depends on the - // build list we're using. - for _, path := range roots() { - ld.work.Add(ld.pkg(path, true)) + + // Load the root packages and their imports. + // Note: the returned roots can change on each iteration, + // since the expansion of package patterns depends on the + // build list we're using. + inRoots := map[*loadPkg]bool{} + for _, path := range ld.listRoots() { + root := ld.pkg(path, pkgIsRoot) + if !inRoots[root] { + ld.roots = append(ld.roots, root) + inRoots[root] = true } } - ld.work.Do(10, ld.doPkg) + + // ld.pkg adds imported packages to the work queue and calls applyPkgFlags, + // which adds tests (and test dependencies) as needed. + // + // When all of the work in the queue has completed, we'll know that the + // transitive closure of dependencies has been loaded. + <-ld.work.Idle() + ld.buildStacks() - numAdded := 0 - haveMod := make(map[module.Version]bool) - for _, m := range buildList { - haveMod[m] = true - } - modAddedBy := make(map[module.Version]*loadPkg) - for _, pkg := range ld.pkgs { - if err, ok := pkg.err.(*ImportMissingError); ok && err.Module.Path != "" { - if err.newMissingVersion != "" { - base.Fatalf("go: %s: package provided by %s at latest version %s but not at required version %s", pkg.stackText(), err.Module.Path, err.Module.Version, err.newMissingVersion) - } - fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, err.Module.Path, err.Module.Version) - if added[pkg.path] { - base.Fatalf("go: %s: looping trying to add package", pkg.stackText()) - } - added[pkg.path] = true - numAdded++ - if !haveMod[err.Module] { - haveMod[err.Module] = true - modAddedBy[err.Module] = pkg - buildList = append(buildList, err.Module) - } - continue - } - // Leave other errors for Import or load.Packages to report. - } - base.ExitIfErrors() - if numAdded == 0 { + + modAddedBy := resolveMissingImports(addedModuleFor, ld.pkgs) + if len(modAddedBy) == 0 { break } @@ -753,92 +924,257 @@ func (ld *loader) load(roots func() []string) { } } - // Mix in direct markings (really, lack of indirect markings) - // from go.mod, unless we scanned the whole module - // and can therefore be sure we know better than go.mod. - if !ld.isALL && modFile != nil { + // If we didn't scan all of the imports from the main module, or didn't use + // imports.AnyTags, then we didn't necessarily load every package that + // contributes “direct” imports — so we can't safely mark existing + // dependencies as indirect-only. + // Conservatively mark those dependencies as direct. + if modFile != nil && (!ld.allPatternIsRoot || !reflect.DeepEqual(ld.tags, imports.AnyTags())) { for _, r := range modFile.Require { if !r.Indirect { ld.direct[r.Mod.Path] = true } } } + + return ld } -// pkg returns the *loadPkg for path, creating and queuing it if needed. -// If the package should be tested, its test is created but not queued -// (the test is queued after processing pkg). -// If isRoot is true, the pkg is being queued as one of the roots of the work graph. -func (ld *loader) pkg(path string, isRoot bool) *loadPkg { - return ld.pkgCache.Do(path, func() interface{} { - pkg := &loadPkg{ - path: path, +// resolveMissingImports adds module dependencies to the global build list +// in order to resolve missing packages from pkgs. +// +// The newly-resolved packages are added to the addedModuleFor map, and +// resolveMissingImports returns a map from each newly-added module version to +// the first package for which that module was added. +func resolveMissingImports(addedModuleFor map[string]bool, pkgs []*loadPkg) (modAddedBy map[module.Version]*loadPkg) { + haveMod := make(map[module.Version]bool) + for _, m := range buildList { + haveMod[m] = true + } + + modAddedBy = make(map[module.Version]*loadPkg) + for _, pkg := range pkgs { + if pkg.isTest() { + // If we are missing a test, we are also missing its non-test version, and + // we should only add the missing import once. + continue } - if ld.testRoots && isRoot || ld.testAll { - test := &loadPkg{ - path: path, - testOf: pkg, + if err, ok := pkg.err.(*ImportMissingError); ok && err.Module.Path != "" { + if err.newMissingVersion != "" { + base.Fatalf("go: %s: package provided by %s at latest version %s but not at required version %s", pkg.stackText(), err.Module.Path, err.Module.Version, err.newMissingVersion) } - pkg.test = test + fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, err.Module.Path, err.Module.Version) + if addedModuleFor[pkg.path] { + base.Fatalf("go: %s: looping trying to add package", pkg.stackText()) + } + addedModuleFor[pkg.path] = true + if !haveMod[err.Module] { + haveMod[err.Module] = true + modAddedBy[err.Module] = pkg + buildList = append(buildList, err.Module) + } + continue } - if isRoot { - ld.roots = append(ld.roots, pkg) + // Leave other errors for Import or load.Packages to report. + } + base.ExitIfErrors() + + return modAddedBy +} + +// pkg locates the *loadPkg for path, creating and queuing it for loading if +// needed, and updates its state to reflect the given flags. +// +// The imports of the returned *loadPkg will be loaded asynchronously in the +// ld.work queue, and its test (if requested) will also be populated once +// imports have been resolved. When ld.work goes idle, all transitive imports of +// the requested package (and its test, if requested) will have been loaded. +func (ld *loader) pkg(path string, flags loadPkgFlags) *loadPkg { + if flags.has(pkgImportsLoaded) { + panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set") + } + + pkg := ld.pkgCache.Do(path, func() interface{} { + pkg := &loadPkg{ + path: path, } - ld.work.Add(pkg) + ld.applyPkgFlags(pkg, flags) + + ld.work.Add(func() { ld.load(pkg) }) return pkg }).(*loadPkg) + + ld.applyPkgFlags(pkg, flags) + return pkg } -// doPkg processes a package on the work queue. -func (ld *loader) doPkg(item interface{}) { - // TODO: what about replacements? - pkg := item.(*loadPkg) - var imports []string - if pkg.testOf != nil { - pkg.dir = pkg.testOf.dir - pkg.mod = pkg.testOf.mod - imports = pkg.testOf.testImports - } else { - if strings.Contains(pkg.path, "@") { - // Leave for error during load. - return - } - if build.IsLocalImport(pkg.path) || filepath.IsAbs(pkg.path) { - // Leave for error during load. - // (Module mode does not allow local imports.) - return - } +// applyPkgFlags updates pkg.flags to set the given flags and propagate the +// (transitive) effects of those flags, possibly loading or enqueueing further +// packages as a result. +func (ld *loader) applyPkgFlags(pkg *loadPkg, flags loadPkgFlags) { + if flags == 0 { + return + } - // TODO(matloob): Handle TODO context. This needs to be threaded through Do. - pkg.mod, pkg.dir, pkg.err = Import(context.TODO(), pkg.path) - if pkg.dir == "" { - return + if flags.has(pkgInAll) && ld.allPatternIsRoot && !pkg.isTest() { + // This package matches a root pattern by virtue of being in "all". + flags |= pkgIsRoot + } + + old := pkg.flags.update(flags) + new := old | flags + if new == old || !new.has(pkgImportsLoaded) { + // We either didn't change the state of pkg, or we don't know anything about + // its dependencies yet. Either way, we can't usefully load its test or + // update its dependencies. + return + } + + if !pkg.isTest() { + // Check whether we should add (or update the flags for) a test for pkg. + // ld.pkgTest is idempotent and extra invocations are inexpensive, + // so it's ok if we call it more than is strictly necessary. + wantTest := false + switch { + case ld.allPatternIsRoot && pkg.mod == Target: + // We are loading the "all" pattern, which includes packages imported by + // tests in the main module. This package is in the main module, so we + // need to identify the imports of its test even if LoadTests is not set. + // + // (We will filter out the extra tests explicitly in computePatternAll.) + wantTest = true + + case ld.allPatternIsRoot && ld.allClosesOverTests && new.has(pkgInAll): + // This variant of the "all" pattern includes imports of tests of every + // package that is itself in "all", and pkg is in "all", so its test is + // also in "all" (as above). + wantTest = true + + case LoadTests && new.has(pkgIsRoot): + // LoadTest explicitly requests tests of “the root packages”. + wantTest = true } - var testImports []string - var err error - imports, testImports, err = scanDir(pkg.dir, ld.tags) - if err != nil { - pkg.err = err - return + + if wantTest { + var testFlags loadPkgFlags + if pkg.mod == Target || (ld.allClosesOverTests && new.has(pkgInAll)) { + // Tests of packages in the main module are in "all", in the sense that + // they cause the packages they import to also be in "all". So are tests + // of packages in "all" if "all" closes over test dependencies. + testFlags |= pkgInAll + } + ld.pkgTest(pkg, testFlags) } - if pkg.test != nil { - pkg.testImports = testImports + } + + if new.has(pkgInAll) && !old.has(pkgInAll|pkgImportsLoaded) { + // We have just marked pkg with pkgInAll, or we have just loaded its + // imports, or both. Now is the time to propagate pkgInAll to the imports. + for _, dep := range pkg.imports { + ld.applyPkgFlags(dep, pkgInAll) } } +} + +// load loads an individual package. +func (ld *loader) load(pkg *loadPkg) { + if strings.Contains(pkg.path, "@") { + // Leave for error during load. + return + } + if build.IsLocalImport(pkg.path) || filepath.IsAbs(pkg.path) { + // Leave for error during load. + // (Module mode does not allow local imports.) + return + } + + pkg.mod, pkg.dir, pkg.err = Import(context.TODO(), pkg.path) + if pkg.dir == "" { + return + } + if pkg.mod == Target { + // Go ahead and mark pkg as in "all". This provides the invariant that a + // package that is *only* imported by other packages in "all" is always + // marked as such before loading its imports. + // + // We don't actually rely on that invariant at the moment, but it may + // improve efficiency somewhat and makes the behavior a bit easier to reason + // about (by reducing churn on the flag bits of dependencies), and costs + // essentially nothing (these atomic flag ops are essentially free compared + // to scanning source code for imports). + ld.applyPkgFlags(pkg, pkgInAll) + } + + imports, testImports, err := scanDir(pkg.dir, ld.tags) + if err != nil { + pkg.err = err + return + } + + pkg.inStd = (search.IsStandardImportPath(pkg.path) && search.InDir(pkg.dir, cfg.GOROOTsrc) != "") - inStd := (search.IsStandardImportPath(pkg.path) && search.InDir(pkg.dir, cfg.GOROOTsrc) != "") + pkg.imports = make([]*loadPkg, 0, len(imports)) + var importFlags loadPkgFlags + if pkg.flags.has(pkgInAll) { + importFlags = pkgInAll + } for _, path := range imports { - if inStd { + if pkg.inStd { + // Imports from packages in "std" should resolve using GOROOT/src/vendor + // even when "std" is not the main module. path = ld.stdVendor(pkg.path, path) } - pkg.imports = append(pkg.imports, ld.pkg(path, false)) + pkg.imports = append(pkg.imports, ld.pkg(path, importFlags)) } + pkg.testImports = testImports - // Now that pkg.dir, pkg.mod, pkg.testImports are set, we can queue pkg.test. - // TODO: All that's left is creating new imports. Why not just do it now? - if pkg.test != nil { - ld.work.Add(pkg.test) + ld.applyPkgFlags(pkg, pkgImportsLoaded) +} + +// pkgTest locates the test of pkg, creating it if needed, and updates its state +// to reflect the given flags. +// +// pkgTest requires that the imports of pkg have already been loaded (flagged +// with pkgImportsLoaded). +func (ld *loader) pkgTest(pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { + if pkg.isTest() { + panic("pkgTest called on a test package") + } + + createdTest := false + pkg.testOnce.Do(func() { + pkg.test = &loadPkg{ + path: pkg.path, + testOf: pkg, + mod: pkg.mod, + dir: pkg.dir, + err: pkg.err, + inStd: pkg.inStd, + } + ld.applyPkgFlags(pkg.test, testFlags) + createdTest = true + }) + + test := pkg.test + if createdTest { + test.imports = make([]*loadPkg, 0, len(pkg.testImports)) + var importFlags loadPkgFlags + if test.flags.has(pkgInAll) { + importFlags = pkgInAll + } + for _, path := range pkg.testImports { + if pkg.inStd { + path = ld.stdVendor(test.path, path) + } + test.imports = append(test.imports, ld.pkg(path, importFlags)) + } + pkg.testImports = nil + ld.applyPkgFlags(test, pkgImportsLoaded) + } else { + ld.applyPkgFlags(test, testFlags) } + + return test } // stdVendor returns the canonical import path for the package with the given @@ -868,30 +1204,13 @@ func (ld *loader) stdVendor(parentPath, path string) string { // computePatternAll returns the list of packages matching pattern "all", // starting with a list of the import paths for the packages in the main module. -func (ld *loader) computePatternAll(paths []string) []string { - seen := make(map[*loadPkg]bool) - var all []string - var walk func(*loadPkg) - walk = func(pkg *loadPkg) { - if seen[pkg] { - return - } - seen[pkg] = true - if pkg.testOf == nil { +func (ld *loader) computePatternAll() (all []string) { + for _, pkg := range ld.pkgs { + if pkg.flags.has(pkgInAll) && !pkg.isTest() { all = append(all, pkg.path) } - for _, p := range pkg.imports { - walk(p) - } - if p := pkg.test; p != nil { - walk(p) - } - } - for _, path := range paths { - walk(ld.pkg(path, false)) } sort.Strings(all) - return all } diff --git a/src/cmd/go/testdata/script/mod_notall.txt b/src/cmd/go/testdata/script/mod_notall.txt index 72a02485a4..29ca6066fa 100644 --- a/src/cmd/go/testdata/script/mod_notall.txt +++ b/src/cmd/go/testdata/script/mod_notall.txt @@ -18,7 +18,7 @@ stdout '^x/otherdep$' ! stdout '^x/fromotherroottest$' ! stdout '^y/fromotherdeptest$' -# TODO(#40799): cmp go.mod go.mod.orig +cmp go.mod go.mod.orig # With -deps -test, test dependencies of other roots should be included, # but test dependencies of non-roots should not. @@ -33,7 +33,7 @@ stdout '^x/otherdep$' stdout '^x/fromotherroottest$' ! stdout '^y/fromotherdeptest$' -# TODO(#40799): cmp go.mod go.mod.orig +cmp go.mod go.mod.orig -- m.go -- package m -- cgit v1.2.3-54-g00ecf From 26d27f96fec733fe09751b49b47282c9109fb8ad Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Thu, 27 Aug 2020 16:34:59 -0400 Subject: cmd/go/internal/modload: remove (*loader).forceStdVendor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit forceStdVendor was a special-case mechanism to allow Go contributors to use vendored dependencies by default when working in GOROOT/src. As of Go 1.14,¹ the 'go' command uses vendored dependencies by default within all modules, so the 'std' and 'cmd' modules no longer need to be special cases, and we can remove this special-case code. ¹ https://golang.org/doc/go1.14#vendor Updates #33848 Updates #30241 Change-Id: Ib2fb5841c253113b17fa86a086ce85a22ac3d121 Reviewed-on: https://go-review.googlesource.com/c/go/+/251159 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/load.go | 25 +++++------ src/cmd/go/testdata/script/mod_list_std.txt | 64 ++++++++++++++++++--------- src/cmd/go/testdata/script/mod_std_vendor.txt | 6 +-- 3 files changed, 57 insertions(+), 38 deletions(-) diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 64ef60230e..8a3af534a5 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -709,8 +709,6 @@ func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath str type loader struct { loaderParams - forceStdVendor bool // if true, load standard-library dependencies from the vendor subtree - work *par.Queue // reset on each iteration @@ -850,13 +848,6 @@ func loadFromRoots(params loaderParams) *loader { work: par.NewQueue(runtime.GOMAXPROCS(0)), } - // Inside the "std" and "cmd" modules, we prefer to use the vendor directory - // unless the command explicitly changes the module graph. - // TODO(bcmills): Is this still needed now that we have automatic vendoring? - if !targetInGorootSrc || (cfg.CmdName != "get" && !strings.HasPrefix(cfg.CmdName, "mod ")) { - ld.forceStdVendor = true - } - var err error reqs := Reqs() buildList, err = mvs.BuildList(Target, reqs) @@ -1120,8 +1111,8 @@ func (ld *loader) load(pkg *loadPkg) { } for _, path := range imports { if pkg.inStd { - // Imports from packages in "std" should resolve using GOROOT/src/vendor - // even when "std" is not the main module. + // Imports from packages in "std" and "cmd" should resolve using + // GOROOT/src/vendor even when "std" is not the main module. path = ld.stdVendor(pkg.path, path) } pkg.imports = append(pkg.imports, ld.pkg(path, importFlags)) @@ -1185,13 +1176,21 @@ func (ld *loader) stdVendor(parentPath, path string) string { } if str.HasPathPrefix(parentPath, "cmd") { - if ld.forceStdVendor || Target.Path != "cmd" { + if Target.Path != "cmd" { vendorPath := pathpkg.Join("cmd", "vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath } } - } else if ld.forceStdVendor || Target.Path != "std" { + } else if Target.Path != "std" || str.HasPathPrefix(parentPath, "vendor") { + // If we are outside of the 'std' module, resolve imports from within 'std' + // to the vendor directory. + // + // Do the same for importers beginning with the prefix 'vendor/' even if we + // are *inside* of the 'std' module: the 'vendor/' packages that resolve + // globally from GOROOT/src/vendor (and are listed as part of 'go list std') + // are distinct from the real module dependencies, and cannot import internal + // packages from the real module. vendorPath := pathpkg.Join("vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath diff --git a/src/cmd/go/testdata/script/mod_list_std.txt b/src/cmd/go/testdata/script/mod_list_std.txt index 76a3b00d1c..baf7908ab9 100644 --- a/src/cmd/go/testdata/script/mod_list_std.txt +++ b/src/cmd/go/testdata/script/mod_list_std.txt @@ -6,8 +6,13 @@ env GOPROXY=off # Outside of GOROOT, our vendored packages should be reported as part of the standard library. go list -f '{{if .Standard}}{{.ImportPath}}{{end}}' std cmd -stdout ^vendor/golang.org/x/net/http2/hpack +stdout ^vendor/golang\.org/x/net/http2/hpack stdout ^cmd/vendor/golang\.org/x/arch/x86/x86asm +! stdout ^golang\.org/x/ + +# The dependencies of those packages should also be vendored. +go list -deps vendor/golang.org/x/crypto/chacha20 +stdout ^vendor/golang\.org/x/crypto/internal/subtle # cmd/... should match the same packages it used to match in GOPATH mode. go list cmd/... @@ -23,40 +28,57 @@ stdout ^bytes$ ! stdout ^builtin$ ! stdout ^cmd/ ! stdout ^vendor/ +! stdout ^golang\.org/x/ + +# Vendored dependencies should appear with their 'vendor/' paths in std (they're +# in GOROOT/src, but not in the 'std' module following the usual module-boundary +# rules). -# Within the std module, listing ./... should omit the 'std' prefix: -# the package paths should be the same via ./... or the 'std' meta-pattern. -# TODO(golang.org/issue/30241): Make that work. -# Today, they are listed in 'std' but not './...'. cd $GOROOT/src -go list ./... -! stdout ^vendor/golang.org/x # TODO: should be included, or should be omitted from 'std'. -cp stdout $WORK/listdot.txt go list std -stdout ^vendor/golang.org/x # TODO: remove vendor/ prefix -# TODO: cmp stdout $WORK/listdot.txt +stdout ^vendor/golang.org/x/net/http2/hpack +! stdout ^golang\.org/x + +# The dependencies of packages with an explicit 'vendor/' prefix should +# still themselves resolve to vendored packages. +go list -deps vendor/golang.org/x/crypto/chacha20 +stdout ^vendor/golang.org/x/crypto/internal/subtle +! stdout ^golang\.org/x + +# Within the std module, the dependencies of the non-vendored packages within +# std should appear to come from modules, but they should be loaded from the +# vendor directory (just like ordinary vendored module dependencies). go list all -stdout ^vendor/golang.org/x # TODO: remove vendor/ prefix. +stdout ^golang.org/x/ ! stdout ^std/ +! stdout ^cmd/ +! stdout ^vendor/ +go list -deps -f '{{if not .Standard}}{{.ImportPath}}{{end}}' std +! stdout ^vendor/golang.org/x/net/http2/hpack +stdout ^golang.org/x/net/http2/hpack -# Within the std module, the vendored dependencies of std should appear -# to come from the actual modules. -# TODO(golang.org/issue/30241): Make that work. -# Today, they still have the vendor/ prefix. -go list std -stdout ^vendor/golang.org/x/net/http2/hpack # TODO -! stdout ^golang.org/x/net/http2/hpack # TODO +go list -f '{{.Dir}}' golang.org/x/net/http2/hpack +stdout $GOROOT[/\\]src[/\\]vendor -go list -deps -f '{{if not .Standard}}{{.ImportPath}}{{end}}' std -# ! stdout ^vendor/golang.org/x/net/http2/hpack # TODO -! stdout ^golang.org/x/net/http2/hpack # TODO +# Within the std module, the packages within the module should omit the 'std/' +# prefix (they retain their own identities), but should respect normal module +# boundaries (vendored packages are not included in the module, even though they +# are included in the 'std' pattern). + +go list ./... +stdout ^bytes$ +! stdout ^builtin$ +! stdout ^cmd/ +! stdout ^vendor/ +! stdout ^golang\.org/x/ # Within std, the vendored dependencies of cmd should still appear to be part of cmd. + go list -f '{{if .Standard}}{{.ImportPath}}{{end}}' cmd stdout ^cmd/vendor/golang\.org/x/arch/x86/x86asm diff --git a/src/cmd/go/testdata/script/mod_std_vendor.txt b/src/cmd/go/testdata/script/mod_std_vendor.txt index 5986cff594..fb954d74ed 100644 --- a/src/cmd/go/testdata/script/mod_std_vendor.txt +++ b/src/cmd/go/testdata/script/mod_std_vendor.txt @@ -37,12 +37,10 @@ stderr 'use of vendored package' # When run within the 'std' module, 'go list -test' should report vendored # transitive dependencies at their original module paths. -# TODO(golang.org/issue/30241): Make that work. -# Today, they're standard packages as long as they exist. cd $GOROOT/src go list -test -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' net/http -stdout ^vendor/golang.org/x/net/http2/hpack # TODO: remove vendor/ prefix -! stdout ^golang.org/x/net/http2/hpack +stdout ^golang.org/x/net/http2/hpack +! stdout ^vendor/golang.org/x/net/http2/hpack -- go.mod -- module m -- cgit v1.2.3-54-g00ecf From 015a5a5c5c4b4ce4dce55601032b8e2f5fbcca9a Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 28 Aug 2020 18:19:22 -0400 Subject: cmd/go/internal/modload: rework import resolution modload.Import previously performed two otherwise-separable tasks: 1. Identify which module in the build list contains the requested package. 2. If no such module exists, search available modules to try to find the missing package. This change splits those two tasks into two separate unexported functions, and reports import-resolution errors by attaching them to the package rather than emitting them directly to stderr. That allows 'list' to report the errors, but 'list -e' to ignore them. With the two tasks now separate, it will be easier to avoid the overhead of resolving missing packages during lazy loading if we discover that some existing dependency needs to be promoted to the top level (potentially altering the main module's selected versions, and thus suppling packages that were previously missing). For #36460 Updates #26909 Change-Id: I32bd853b266d7cd231d1f45f92b0650d95c4bcbd Reviewed-on: https://go-review.googlesource.com/c/go/+/251445 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/list/list.go | 21 ++++++- src/cmd/go/internal/modload/import.go | 71 ++++++++++++++-------- src/cmd/go/internal/modload/import_test.go | 44 +++++++++++--- src/cmd/go/internal/modload/load.go | 59 ++++++++++-------- src/cmd/go/testdata/script/list_bad_import.txt | 18 +++--- src/cmd/go/testdata/script/list_test_err.txt | 3 + src/cmd/go/testdata/script/mod_list_bad_import.txt | 18 +++--- .../testdata/script/mod_missingpkg_prerelease.txt | 4 +- 8 files changed, 158 insertions(+), 80 deletions(-) diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index 6d81c1cad1..65003dc883 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -545,7 +545,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { // Note that -deps is applied after -test, // so that you only get descriptions of tests for the things named // explicitly on the command line, not for all dependencies. - pkgs = load.PackageList(pkgs) + pkgs = loadPackageList(pkgs) } // Do we need to run a build to gather information? @@ -580,7 +580,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listTest { all := pkgs if !*listDeps { - all = load.PackageList(pkgs) + all = loadPackageList(pkgs) } // Update import paths to distinguish the real package p // from p recompiled for q.test. @@ -697,6 +697,23 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } +// loadPackageList is like load.PackageList, but prints error messages and exits +// with nonzero status if listE is not set and any package in the expanded list +// has errors. +func loadPackageList(roots []*load.Package) []*load.Package { + pkgs := load.PackageList(roots) + + if !*listE { + for _, pkg := range pkgs { + if pkg.Error != nil { + base.Errorf("%v", pkg.Error) + } + } + } + + return pkgs +} + // TrackingWriter tracks the last byte written on every write so // we can avoid printing a newline if one was already written or // if there is no output at all. diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 6459e716b7..e04d66c5b1 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -26,6 +26,8 @@ import ( "golang.org/x/mod/semver" ) +var errImportMissing = errors.New("import missing") + type ImportMissingError struct { Path string Module module.Version @@ -48,6 +50,11 @@ func (e *ImportMissingError) Error() string { } return "cannot find module providing package " + e.Path } + + if e.newMissingVersion != "" { + return fmt.Sprintf("package %s provided by %s at latest version %s but not at required version %s", e.Path, e.Module.Path, e.Module.Version, e.newMissingVersion) + } + return fmt.Sprintf("missing module for import: %s@%s provides %s", e.Module.Path, e.Module.Version, e.Path) } @@ -100,18 +107,20 @@ func (e *AmbiguousImportError) Error() string { var _ load.ImportPathError = &AmbiguousImportError{} -// Import finds the module and directory in the build list -// containing the package with the given import path. -// The answer must be unique: Import returns an error -// if multiple modules attempt to provide the same package. -// Import can return a module with an empty m.Path, for packages in the standard library. -// Import can return an empty directory string, for fake packages like "C" and "unsafe". +// importFromBuildList finds the module and directory in the build list +// containing the package with the given import path. The answer must be unique: +// importFromBuildList returns an error if multiple modules attempt to provide +// the same package. +// +// importFromBuildList can return a module with an empty m.Path, for packages in +// the standard library. +// +// importFromBuildList can return an empty directory string, for fake packages +// like "C" and "unsafe". // // If the package cannot be found in the current build list, -// Import returns an ImportMissingError as the error. -// If Import can identify a module that could be added to supply the package, -// the ImportMissingError records that module. -func Import(ctx context.Context, path string) (m module.Version, dir string, err error) { +// importFromBuildList returns errImportMissing as the error. +func importFromBuildList(ctx context.Context, path string) (m module.Version, dir string, err error) { if strings.Contains(path, "@") { return module.Version{}, "", fmt.Errorf("import path should not have @version") } @@ -190,8 +199,14 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err return module.Version{}, "", &AmbiguousImportError{importPath: path, Dirs: dirs, Modules: mods} } - // Look up module containing the package, for addition to the build list. - // Goal is to determine the module, download it to dir, and return m, dir, ErrMissing. + return module.Version{}, "", errImportMissing +} + +// queryImport attempts to locate a module that can be added to the current +// build list to provide the package with the given import path. +func queryImport(ctx context.Context, path string) (module.Version, error) { + pathIsStd := search.IsStandardImportPath(path) + if cfg.BuildMod == "readonly" { var queryErr error if !pathIsStd { @@ -201,10 +216,10 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } } - return module.Version{}, "", &ImportMissingError{Path: path, QueryErr: queryErr} + return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr} } if modRoot == "" && !allowMissingModuleImports { - return module.Version{}, "", &ImportMissingError{ + return module.Version{}, &ImportMissingError{ Path: path, QueryErr: errors.New("working directory is not part of a module"), } @@ -226,7 +241,7 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err } } - mods = make([]module.Version, 0, len(latest)) + mods := make([]module.Version, 0, len(latest)) for p, v := range latest { // If the replacement didn't specify a version, synthesize a // pseudo-version with an appropriate major version and a timestamp below @@ -252,19 +267,19 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err root, isLocal, err := fetch(ctx, m) if err != nil { // Report fetch error as above. - return module.Version{}, "", err + return module.Version{}, err } if _, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { - return m, "", err + return m, err } else if ok { - return m, "", &ImportMissingError{Path: path, Module: m} + return m, nil } } if len(mods) > 0 && module.CheckPath(path) != nil { // The package path is not valid to fetch remotely, // so it can only exist if in a replaced module, // and we know from the above loop that it is not. - return module.Version{}, "", &PackageNotInModuleError{ + return module.Version{}, &PackageNotInModuleError{ Mod: mods[0], Query: "latest", Pattern: path, @@ -281,7 +296,7 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err // QueryPackage cannot possibly find a module containing this package. // // Instead of trying QueryPackage, report an ImportMissingError immediately. - return module.Version{}, "", &ImportMissingError{Path: path} + return module.Version{}, &ImportMissingError{Path: path} } fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) @@ -291,12 +306,13 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err if errors.Is(err, os.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever // low-level error QueryPackage produced. - return module.Version{}, "", &ImportMissingError{Path: path, QueryErr: err} + return module.Version{}, &ImportMissingError{Path: path, QueryErr: err} } else { - return module.Version{}, "", err + return module.Version{}, err } } - m = candidates[0].Mod + + m := candidates[0].Mod newMissingVersion := "" for _, c := range candidates { cm := c.Mod @@ -310,13 +326,20 @@ func Import(ctx context.Context, path string) (m module.Version, dir string, err // version (e.g., v1.0.0) of a module, but we have a newer version // of the same module in the build list (e.g., v1.0.1-beta), and // the package is not present there. + // + // TODO(#41113): This is probably incorrect when there are multiple + // candidates, such as when a nested module is split out but only one + // half of the split is tagged. m = cm newMissingVersion = bm.Version break } } } - return m, "", &ImportMissingError{Path: path, Module: m, newMissingVersion: newMissingVersion} + if newMissingVersion != "" { + return m, &ImportMissingError{Path: path, Module: m, newMissingVersion: newMissingVersion} + } + return m, nil } // maybeInModule reports whether, syntactically, diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index 47ce89a084..22d5b82e21 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -10,15 +10,20 @@ import ( "regexp" "strings" "testing" + + "golang.org/x/mod/module" ) var importTests = []struct { path string + m module.Version err string }{ { path: "golang.org/x/net/context", - err: "missing module for import: golang.org/x/net@.* provides golang.org/x/net/context", + m: module.Version{ + Path: "golang.org/x/net", + }, }, { path: "golang.org/x/net", @@ -26,15 +31,23 @@ var importTests = []struct { }, { path: "golang.org/x/text", - err: "missing module for import: golang.org/x/text@.* provides golang.org/x/text", + m: module.Version{ + Path: "golang.org/x/text", + }, }, { path: "github.com/rsc/quote/buggy", - err: "missing module for import: github.com/rsc/quote@v1.5.2 provides github.com/rsc/quote/buggy", + m: module.Version{ + Path: "github.com/rsc/quote", + Version: "v1.5.2", + }, }, { path: "github.com/rsc/quote", - err: "missing module for import: github.com/rsc/quote@v1.5.2 provides github.com/rsc/quote", + m: module.Version{ + Path: "github.com/rsc/quote", + Version: "v1.5.2", + }, }, { path: "golang.org/x/foo/bar", @@ -42,7 +55,7 @@ var importTests = []struct { }, } -func TestImport(t *testing.T) { +func TestQueryImport(t *testing.T) { testenv.MustHaveExternalNetwork(t) testenv.MustHaveExecPath(t, "git") defer func(old bool) { @@ -55,12 +68,23 @@ func TestImport(t *testing.T) { for _, tt := range importTests { t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { // Note that there is no build list, so Import should always fail. - m, dir, err := Import(ctx, tt.path) - if err == nil { - t.Fatalf("Import(%q) = %v, %v, nil; expected error", tt.path, m, dir) + m, err := queryImport(ctx, tt.path) + + if tt.err == "" { + if err != nil { + t.Fatalf("queryImport(_, %q): %v", tt.path, err) + } + } else { + if err == nil { + t.Fatalf("queryImport(_, %q) = %v, nil; expected error", tt.path, m) + } + if !regexp.MustCompile(tt.err).MatchString(err.Error()) { + t.Fatalf("queryImport(_, %q): error %q, want error matching %#q", tt.path, err, tt.err) + } } - if !regexp.MustCompile(tt.err).MatchString(err.Error()) { - t.Fatalf("Import(%q): error %q, want error matching %#q", tt.path, err, tt.err) + + if m.Path != tt.m.Path || (tt.m.Version != "" && m.Version != tt.m.Version) { + t.Errorf("queryImport(_, %q) = %v, _; want %v", tt.path, m, tt.m) } }) } diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 8a3af534a5..2096dfb636 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -881,7 +881,7 @@ func loadFromRoots(params loaderParams) *loader { ld.buildStacks() - modAddedBy := resolveMissingImports(addedModuleFor, ld.pkgs) + modAddedBy := ld.resolveMissingImports(addedModuleFor) if len(modAddedBy) == 0 { break } @@ -937,38 +937,45 @@ func loadFromRoots(params loaderParams) *loader { // The newly-resolved packages are added to the addedModuleFor map, and // resolveMissingImports returns a map from each newly-added module version to // the first package for which that module was added. -func resolveMissingImports(addedModuleFor map[string]bool, pkgs []*loadPkg) (modAddedBy map[module.Version]*loadPkg) { - haveMod := make(map[module.Version]bool) - for _, m := range buildList { - haveMod[m] = true - } - - modAddedBy = make(map[module.Version]*loadPkg) - for _, pkg := range pkgs { +func (ld *loader) resolveMissingImports(addedModuleFor map[string]bool) (modAddedBy map[module.Version]*loadPkg) { + var needPkgs []*loadPkg + for _, pkg := range ld.pkgs { if pkg.isTest() { // If we are missing a test, we are also missing its non-test version, and // we should only add the missing import once. continue } - if err, ok := pkg.err.(*ImportMissingError); ok && err.Module.Path != "" { - if err.newMissingVersion != "" { - base.Fatalf("go: %s: package provided by %s at latest version %s but not at required version %s", pkg.stackText(), err.Module.Path, err.Module.Version, err.newMissingVersion) - } - fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, err.Module.Path, err.Module.Version) - if addedModuleFor[pkg.path] { - base.Fatalf("go: %s: looping trying to add package", pkg.stackText()) - } - addedModuleFor[pkg.path] = true - if !haveMod[err.Module] { - haveMod[err.Module] = true - modAddedBy[err.Module] = pkg - buildList = append(buildList, err.Module) - } + if pkg.err != errImportMissing { + // Leave other errors for Import or load.Packages to report. continue } - // Leave other errors for Import or load.Packages to report. + + needPkgs = append(needPkgs, pkg) + + pkg := pkg + ld.work.Add(func() { + pkg.mod, pkg.err = queryImport(context.TODO(), pkg.path) + }) + } + <-ld.work.Idle() + + modAddedBy = map[module.Version]*loadPkg{} + for _, pkg := range needPkgs { + if pkg.err != nil { + continue + } + + fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, pkg.mod.Path, pkg.mod.Version) + if addedModuleFor[pkg.path] { + // TODO(bcmills): This should only be an error if pkg.mod is the same + // version we already tried to add previously. + base.Fatalf("go: %s: looping trying to add package", pkg.stackText()) + } + if modAddedBy[pkg.mod] == nil { + modAddedBy[pkg.mod] = pkg + buildList = append(buildList, pkg.mod) + } } - base.ExitIfErrors() return modAddedBy } @@ -1079,7 +1086,7 @@ func (ld *loader) load(pkg *loadPkg) { return } - pkg.mod, pkg.dir, pkg.err = Import(context.TODO(), pkg.path) + pkg.mod, pkg.dir, pkg.err = importFromBuildList(context.TODO(), pkg.path) if pkg.dir == "" { return } diff --git a/src/cmd/go/testdata/script/list_bad_import.txt b/src/cmd/go/testdata/script/list_bad_import.txt index b8f9d586f3..dbec35069c 100644 --- a/src/cmd/go/testdata/script/list_bad_import.txt +++ b/src/cmd/go/testdata/script/list_bad_import.txt @@ -15,10 +15,11 @@ stdout 'incomplete' stdout 'bad dep: .*example.com[/\\]notfound' # Listing with -deps should also fail. -# BUG: Today, it does not. -# ! go list -deps example.com/direct -# stderr example.com[/\\]notfound -go list -deps example.com/direct +! go list -deps example.com/direct +stderr example.com[/\\]notfound + +# But -e -deps should succeed. +go list -e -deps example.com/direct stdout example.com/notfound @@ -31,10 +32,11 @@ stdout incomplete stdout 'bad dep: .*example.com[/\\]notfound' # Again, -deps should fail. -# BUG: Again, it does not. -# ! go list -deps example.com/indirect -# stderr example.com[/\\]notfound -go list -deps example.com/indirect +! go list -deps example.com/indirect +stderr example.com[/\\]notfound + +# But -deps -e should succeed. +go list -e -deps example.com/indirect stdout example.com/notfound diff --git a/src/cmd/go/testdata/script/list_test_err.txt b/src/cmd/go/testdata/script/list_test_err.txt index a174b5e9ad..c6f1ecf400 100644 --- a/src/cmd/go/testdata/script/list_test_err.txt +++ b/src/cmd/go/testdata/script/list_test_err.txt @@ -22,6 +22,9 @@ go list -e -test -deps -f '{{.ImportPath}} {{.Error | printf "%q"}}' syntaxerr stdout 'pkgdep ' stdout 'testdep_a ' stdout 'testdep_b ' +stdout 'syntaxerr ' +stdout 'syntaxerr \[syntaxerr.test\] ' +stdout 'syntaxerr_test \[syntaxerr.test\] ' stdout 'syntaxerr\.test "[^"]*expected declaration' ! stderr 'expected declaration' diff --git a/src/cmd/go/testdata/script/mod_list_bad_import.txt b/src/cmd/go/testdata/script/mod_list_bad_import.txt index 8a66e0b72a..b3e2fff67d 100644 --- a/src/cmd/go/testdata/script/mod_list_bad_import.txt +++ b/src/cmd/go/testdata/script/mod_list_bad_import.txt @@ -12,10 +12,11 @@ stdout 'incomplete' stdout 'bad dep: .*example.com/notfound' # Listing with -deps should also fail. -# BUG: Today, it does not. -# ! go list -deps example.com/direct -# stderr example.com/notfound -go list -deps example.com/direct +! go list -deps example.com/direct +stderr example.com/notfound + +# But -e -deps should succeed. +go list -e -deps example.com/direct stdout example.com/notfound @@ -28,10 +29,11 @@ stdout incomplete stdout 'bad dep: .*example.com/notfound' # Again, -deps should fail. -# BUG: Again, it does not. -# ! go list -deps example.com/indirect -# stderr example.com/notfound -go list -deps example.com/indirect +! go list -deps example.com/indirect +stderr example.com/notfound + +# But -e -deps should succeed. +go list -e -deps example.com/indirect stdout example.com/notfound diff --git a/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt b/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt index 319ff85587..1ba8d3d22a 100644 --- a/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt +++ b/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt @@ -1,7 +1,7 @@ env GO111MODULE=on -! go list use.go -stderr 'example.com/missingpkg/deprecated: package provided by example.com/missingpkg at latest version v1.0.0 but not at required version v1.0.1-beta' +! go list -deps use.go +stderr '^use.go:4:2: package example.com/missingpkg/deprecated provided by example.com/missingpkg at latest version v1.0.0 but not at required version v1.0.1-beta$' -- go.mod -- module m -- cgit v1.2.3-54-g00ecf From 2c8d2a0c51f4085e56b5ab05ed9fb17fc6d08261 Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Mon, 31 Aug 2020 21:37:40 +0100 Subject: net/http: fix data race due to writeLoop goroutine left running Fix a data race for clients that mutate requests after receiving a response error which is caused by the writeLoop goroutine left running, this can be seen on cancelled requests. Fixes #37669 Change-Id: Ia4743c6b8abde3a7503de362cc6a3782e19e7f60 Reviewed-on: https://go-review.googlesource.com/c/go/+/251858 Reviewed-by: Bryan C. Mills Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot --- src/net/http/transport.go | 10 ++++- src/net/http/transport_test.go | 99 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 1 deletion(-) diff --git a/src/net/http/transport.go b/src/net/http/transport.go index c23042b1e3..b97c4268b5 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -1967,6 +1967,15 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return nil } + // Wait for the writeLoop goroutine to terminate to avoid data + // races on callers who mutate the request on failure. + // + // When resc in pc.roundTrip and hence rc.ch receives a responseAndError + // with a non-nil error it implies that the persistConn is either closed + // or closing. Waiting on pc.writeLoopDone is hence safe as all callers + // close closech which in turn ensures writeLoop returns. + <-pc.writeLoopDone + // If the request was canceled, that's better than network // failures that were likely the result of tearing down the // connection. @@ -1992,7 +2001,6 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return err } if pc.isBroken() { - <-pc.writeLoopDone if pc.nwrite == startBytesWritten { return nothingWrittenError{err} } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 2d9ca10bf0..f4b7623630 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -25,6 +25,7 @@ import ( "io" "io/ioutil" "log" + mrand "math/rand" "net" . "net/http" "net/http/httptest" @@ -6284,3 +6285,101 @@ func TestTransportRejectsSignInContentLength(t *testing.T) { t.Fatalf("Error mismatch\nGot: %q\nWanted substring: %q", got, want) } } + +// dumpConn is a net.Conn which writes to Writer and reads from Reader +type dumpConn struct { + io.Writer + io.Reader +} + +func (c *dumpConn) Close() error { return nil } +func (c *dumpConn) LocalAddr() net.Addr { return nil } +func (c *dumpConn) RemoteAddr() net.Addr { return nil } +func (c *dumpConn) SetDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } + +// delegateReader is a reader that delegates to another reader, +// once it arrives on a channel. +type delegateReader struct { + c chan io.Reader + r io.Reader // nil until received from c +} + +func (r *delegateReader) Read(p []byte) (int, error) { + if r.r == nil { + var ok bool + if r.r, ok = <-r.c; !ok { + return 0, errors.New("delegate closed") + } + } + return r.r.Read(p) +} + +func testTransportRace(req *Request) { + save := req.Body + pr, pw := io.Pipe() + defer pr.Close() + defer pw.Close() + dr := &delegateReader{c: make(chan io.Reader)} + + t := &Transport{ + Dial: func(net, addr string) (net.Conn, error) { + return &dumpConn{pw, dr}, nil + }, + } + defer t.CloseIdleConnections() + + quitReadCh := make(chan struct{}) + // Wait for the request before replying with a dummy response: + go func() { + defer close(quitReadCh) + + req, err := ReadRequest(bufio.NewReader(pr)) + if err == nil { + // Ensure all the body is read; otherwise + // we'll get a partial dump. + io.Copy(ioutil.Discard, req.Body) + req.Body.Close() + } + select { + case dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n"): + case quitReadCh <- struct{}{}: + // Ensure delegate is closed so Read doesn't block forever. + close(dr.c) + } + }() + + t.RoundTrip(req) + + // Ensure the reader returns before we reset req.Body to prevent + // a data race on req.Body. + pw.Close() + <-quitReadCh + + req.Body = save +} + +// Issue 37669 +// Test that a cancellation doesn't result in a data race due to the writeLoop +// goroutine being left running, if the caller mutates the processed Request +// upon completion. +func TestErrorWriteLoopRace(t *testing.T) { + if testing.Short() { + return + } + t.Parallel() + for i := 0; i < 1000; i++ { + delay := time.Duration(mrand.Intn(5)) * time.Millisecond + ctx, cancel := context.WithTimeout(context.Background(), delay) + defer cancel() + + r := bytes.NewBuffer(make([]byte, 10000)) + req, err := NewRequestWithContext(ctx, MethodPost, "http://example.com", r) + if err != nil { + t.Fatal(err) + } + + testTransportRace(req) + } +} -- cgit v1.2.3-54-g00ecf From cd91ab5d9601c975286f1ac83cd289e34aa117f8 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 28 Aug 2020 21:32:05 -0400 Subject: cmd/go/internal/modload: fix spurious import resolution error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Due to a bug in CL 173017, if QueryPackages found multiple candidates for the given package and *at least* one of those candidates was not available to add, we would reject *all* such candidates — even those that were still viable. Now, we return the first viable candidate, and only return an error if *no* candidate is viable given the current build list. Fixes #41113 Change-Id: Idb2e77244be7c0f5dd511efb142c3059925d7336 Reviewed-on: https://go-review.googlesource.com/c/go/+/251446 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/import.go | 26 ++++++++++---------- ...xample.com_split-incompatible_subpkg_v0.1.0.txt | 14 +++++++++++ ....com_split-incompatible_v2.0.0+incompatible.txt | 10 ++++++++ ..._split-incompatible_v2.1.0-pre+incompatible.txt | 10 ++++++++ .../go/testdata/script/mod_import_issue41113.txt | 28 ++++++++++++++++++++++ 5 files changed, 76 insertions(+), 12 deletions(-) create mode 100644 src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt create mode 100644 src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt create mode 100644 src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt create mode 100644 src/cmd/go/testdata/script/mod_import_issue41113.txt diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index e04d66c5b1..c625184b8b 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -312,10 +312,10 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { } } - m := candidates[0].Mod - newMissingVersion := "" - for _, c := range candidates { + candidate0MissingVersion := "" + for i, c := range candidates { cm := c.Mod + canAdd := true for _, bm := range buildList { if bm.Path == cm.Path && semver.Compare(bm.Version, cm.Version) > 0 { // QueryPackage proposed that we add module cm to provide the package, @@ -326,20 +326,22 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { // version (e.g., v1.0.0) of a module, but we have a newer version // of the same module in the build list (e.g., v1.0.1-beta), and // the package is not present there. - // - // TODO(#41113): This is probably incorrect when there are multiple - // candidates, such as when a nested module is split out but only one - // half of the split is tagged. - m = cm - newMissingVersion = bm.Version + canAdd = false + if i == 0 { + candidate0MissingVersion = bm.Version + } break } } + if canAdd { + return cm, nil + } } - if newMissingVersion != "" { - return m, &ImportMissingError{Path: path, Module: m, newMissingVersion: newMissingVersion} + return module.Version{}, &ImportMissingError{ + Path: path, + Module: candidates[0].Mod, + newMissingVersion: candidate0MissingVersion, } - return m, nil } // maybeInModule reports whether, syntactically, diff --git a/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt b/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt new file mode 100644 index 0000000000..8f9e49176c --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt @@ -0,0 +1,14 @@ +Written by hand. +Test case for getting a package that has been moved to a nested module, +with a +incompatible verison (and thus no go.mod file) at the root module. + +-- .mod -- +module example.com/split-incompatible/subpkg +-- .info -- +{"Version": "v0.1.0"} +-- go.mod -- +module example.com/split-incompatible/subpkg + +go 1.16 +-- subpkg.go -- +package subpkg diff --git a/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt b/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt new file mode 100644 index 0000000000..35c3f27710 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt @@ -0,0 +1,10 @@ +Written by hand. +Test case for getting a package that has been moved to a nested module, +with a +incompatible verison (and thus no go.mod file) at the root module. + +-- .mod -- +module example.com/split-incompatible +-- .info -- +{"Version": "v2.0.0+incompatible"} +-- subpkg/subpkg.go -- +package subpkg diff --git a/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt b/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt new file mode 100644 index 0000000000..917fc0f559 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt @@ -0,0 +1,10 @@ +Written by hand. +Test case for getting a package that has been moved to a nested module, +with a +incompatible verison (and thus no go.mod file) at the root module. + +-- .mod -- +module example.com/split-incompatible +-- .info -- +{"Version": "v2.1.0-pre+incompatible"} +-- README.txt -- +subpkg has moved to module example.com/split-incompatible/subpkg diff --git a/src/cmd/go/testdata/script/mod_import_issue41113.txt b/src/cmd/go/testdata/script/mod_import_issue41113.txt new file mode 100644 index 0000000000..e98ac63d48 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_import_issue41113.txt @@ -0,0 +1,28 @@ +# Regression test for https://golang.org/issue/41113. +# +# When resolving a missing import path, the inability to add the package from +# one module path should not interfere with adding a nested path. + +# Initially, our module depends on split-incompatible v2.1.0-pre+incompatible, +# from which an imported package has been removed (and relocated to the nested +# split-incompatible/subpkg module). modload.QueryPackage will suggest +# split-incompatible v2.0.0+incompatible, which we cannot use (because it would +# be an implicit downgrade), and split-incompatible/subpkg v0.1.0, which we +# *should* use. + +go mod tidy + +go list -m all +stdout '^example.com/split-incompatible/subpkg v0\.1\.0$' +! stdout '^example.com/split-incompatible .*' + +-- go.mod -- +module golang.org/issue/41113 + +go 1.16 + +require example.com/split-incompatible v2.1.0-pre+incompatible +-- x.go -- +package issue41113 + +import _ "example.com/split-incompatible/subpkg" -- cgit v1.2.3-54-g00ecf From b4944ef310ed43fad53c6128344e4bed2b346c88 Mon Sep 17 00:00:00 2001 From: Tzu-Chiao Yeh Date: Sun, 6 Sep 2020 09:43:34 +0800 Subject: cmd: update golang.org/x/tools to v0.0.0-20200901153117-6e59e24738da Includes the latest fix on vet to warn unused context.WithValue result. Fixes #41149 Change-Id: I06c204f40ef12b0f62f59b1bbdf1fe06ccd6565d Reviewed-on: https://go-review.googlesource.com/c/go/+/252941 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 15 +- .../go/analysis/passes/structtag/structtag.go | 6 +- .../go/analysis/passes/unmarshal/unmarshal.go | 7 +- .../analysis/passes/unusedresult/unusedresult.go | 2 +- .../x/tools/internal/analysisinternal/analysis.go | 343 +++++++++++++++++- .../golang.org/x/tools/internal/lsp/fuzzy/input.go | 168 +++++++++ .../x/tools/internal/lsp/fuzzy/matcher.go | 398 +++++++++++++++++++++ src/cmd/vendor/modules.txt | 3 +- 9 files changed, 912 insertions(+), 32 deletions(-) create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 68ce1705e4..0952dbb84c 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -9,6 +9,6 @@ require ( golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect - golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 + golang.org/x/tools v0.0.0-20200901153117-6e59e24738da golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 // indirect ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index cb64a5d475..adbc5a96ac 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -6,33 +6,34 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 h1:S1+yTUaFPXuDZnPDbO+TrDFIjPzQraYH8/CwSlu9Fac= github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/arch v0.0.0-20200511175325-f7c78586839d h1:YvwchuJby5xEAPdBGmdAVSiVME50C+RJfJJwJJsGEV8= golang.org/x/arch v0.0.0-20200511175325-f7c78586839d/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 h1:5B6i6EAiSYyejWfvc5Rc9BbI3rzIsrrXfAQBWnYfn+w= golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200901153117-6e59e24738da h1:8nFbt74voFOsM+Hb5XtF+1SNbbf3dzikH5osZO1hyyo= +golang.org/x/tools v0.0.0-20200901153117-6e59e24738da/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 h1:Jhw4VC65LaKnpq9FvcK+a8ZzrFm3D+UygvMMrhkOw70= golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index e09160379f..f0b15051c5 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -116,7 +116,11 @@ func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, s } for _, enc := range [...]string{"json", "xml"} { - if reflect.StructTag(tag).Get(enc) != "" { + switch reflect.StructTag(tag).Get(enc) { + // Ignore warning if the field not exported and the tag is marked as + // ignored. + case "", "-": + default: pass.Reportf(field.Pos(), "struct field %s has %s tag but is not exported", field.Name(), enc) return } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index f9cc993cbb..92b37caff9 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -30,7 +30,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (interface{}, error) { switch pass.Pkg.Path() { - case "encoding/gob", "encoding/json", "encoding/xml": + case "encoding/gob", "encoding/json", "encoding/xml", "encoding/asn1": // These packages know how to use their own APIs. // Sometimes they are testing what happens to incorrect programs. return nil, nil @@ -53,9 +53,10 @@ func run(pass *analysis.Pass) (interface{}, error) { recv := fn.Type().(*types.Signature).Recv() if fn.Name() == "Unmarshal" && recv == nil { // "encoding/json".Unmarshal - // "encoding/xml".Unmarshal + // "encoding/xml".Unmarshal + // "encoding/asn1".Unmarshal switch fn.Pkg().Path() { - case "encoding/json", "encoding/xml": + case "encoding/json", "encoding/xml", "encoding/asn1": argidx = 1 // func([]byte, interface{}) } } else if fn.Name() == "Decode" && recv != nil { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index 76d4ab2382..bececee7e9 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -44,7 +44,7 @@ var funcs, stringMethods stringSetFlag func init() { // TODO(adonovan): provide a comment syntax to allow users to // add their functions to this set using facts. - funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse") + funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse,context.WithValue,context.WithCancel,context.WithDeadline,context.WithTimeout") Analyzer.Flags.Var(&funcs, "funcs", "comma-separated list of functions whose results must be used") diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 26586810c7..01f6e829f7 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -14,6 +14,12 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/lsp/fuzzy" +) + +var ( + GetTypeErrors func(p interface{}) []types.Error + SetTypeErrors func(p interface{}, errors []types.Error) ) func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { @@ -45,32 +51,34 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T default: panic("unknown basic type") } - case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice: + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: return ast.NewIdent("nil") case *types.Struct: - texpr := typeExpr(fset, f, pkg, typ) // typ because we want the name here. + texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here. if texpr == nil { return nil } return &ast.CompositeLit{ Type: texpr, } - case *types.Array: - texpr := typeExpr(fset, f, pkg, u.Elem()) - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: &ast.ArrayType{ - Elt: texpr, - Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())}, - }, - } } return nil } -func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { +// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of +// analysisinternal.ZeroValue) +func IsZeroValue(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { switch t := typ.(type) { case *types.Basic: switch t.Kind() { @@ -79,7 +87,96 @@ func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty default: return ast.NewIdent(t.Name()) } + case *types.Pointer: + x := TypeExpr(fset, f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(fset, f, pkg, t.Key()) + value := TypeExpr(fset, f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(fset, f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(fset, f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(fset, f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } case *types.Named: + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } if t.Obj().Pkg() == pkg { return ast.NewIdent(t.Obj().Name()) } @@ -101,14 +198,15 @@ func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty X: ast.NewIdent(pkgName), Sel: ast.NewIdent(t.Obj().Name()), } + case *types.Struct: + return ast.NewIdent(t.String()) + case *types.Interface: + return ast.NewIdent(t.String()) default: - return nil // TODO: anonymous structs, but who does that + return nil } } -var GetTypeErrors = func(p interface{}) []types.Error { return nil } -var SetTypeErrors = func(p interface{}, errors []types.Error) {} - type TypeErrorPass string const ( @@ -116,3 +214,212 @@ const ( NoResultValues TypeErrorPass = "noresultvalues" UndeclaredName TypeErrorPass = "undeclaredname" ) + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} + +// WalkASTWithParent walks the AST rooted at n. The semantics are +// similar to ast.Inspect except it does not call f(nil). +func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { + var ancestors []ast.Node + ast.Inspect(n, func(n ast.Node) (recurse bool) { + if n == nil { + ancestors = ancestors[:len(ancestors)-1] + return false + } + + var parent ast.Node + if len(ancestors) > 0 { + parent = ancestors[len(ancestors)-1] + } + ancestors = append(ancestors, n) + return f(n, parent) + }) +} + +// FindMatchingIdents finds all identifiers in 'node' that match any of the given types. +// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within +// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that +// is unrecognized. +func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident { + matches := map[types.Type][]*ast.Ident{} + // Initialize matches to contain the variable types we are searching for. + for _, typ := range typs { + if typ == nil { + continue + } + matches[typ] = []*ast.Ident{} + } + seen := map[types.Object]struct{}{} + ast.Inspect(node, func(n ast.Node) bool { + if n == nil { + return false + } + // Prevent circular definitions. If 'pos' is within an assignment statement, do not + // allow any identifiers in that assignment statement to be selected. Otherwise, + // we could do the following, where 'x' satisfies the type of 'f0': + // + // x := fakeStruct{f0: x} + // + assignment, ok := n.(*ast.AssignStmt) + if ok && pos > assignment.Pos() && pos <= assignment.End() { + return false + } + if n.End() > pos { + return n.Pos() <= pos + } + ident, ok := n.(*ast.Ident) + if !ok || ident.Name == "_" { + return true + } + obj := info.Defs[ident] + if obj == nil || obj.Type() == nil { + return true + } + if _, ok := obj.(*types.TypeName); ok { + return true + } + // Prevent duplicates in matches' values. + if _, ok = seen[obj]; ok { + return true + } + seen[obj] = struct{}{} + // Find the scope for the given position. Then, check whether the object + // exists within the scope. + innerScope := pkg.Scope().Innermost(pos) + if innerScope == nil { + return true + } + _, foundObj := innerScope.LookupParent(ident.Name, pos) + if foundObj != obj { + return true + } + // The object must match one of the types that we are searching for. + if idents, ok := matches[obj.Type()]; ok { + matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name)) + } + // If the object type does not exactly match any of the target types, greedily + // find the first target type that the object type can satisfy. + for typ := range matches { + if obj.Type() == typ { + continue + } + if equivalentTypes(obj.Type(), typ) { + matches[typ] = append(matches[typ], ast.NewIdent(ident.Name)) + } + } + return true + }) + return matches +} + +func equivalentTypes(want, got types.Type) bool { + if want == got || types.Identical(want, got) { + return true + } + // Code segment to help check for untyped equality from (golang/go#32146). + if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { + if lhs, ok := got.Underlying().(*types.Basic); ok { + return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType + } + } + return types.AssignableTo(want, got) +} + +// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the +// given pattern. We return the identifier whose name is most similar to the pattern. +func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr { + fuzz := fuzzy.NewMatcher(pattern) + var bestFuzz ast.Expr + highScore := float32(0) // minimum score is 0 (no match) + for _, ident := range idents { + // TODO: Improve scoring algorithm. + score := fuzz.Score(ident.Name) + if score > highScore { + highScore = score + bestFuzz = ident + } else if score == 0 { + // Order matters in the fuzzy matching algorithm. If we find no match + // when matching the target to the identifier, try matching the identifier + // to the target. + revFuzz := fuzzy.NewMatcher(ident.Name) + revScore := revFuzz.Score(pattern) + if revScore > highScore { + highScore = revScore + bestFuzz = ident + } + } + } + return bestFuzz +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go new file mode 100644 index 0000000000..ac377035ec --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzzy + +import ( + "unicode" +) + +// RuneRole specifies the role of a rune in the context of an input. +type RuneRole byte + +const ( + // RNone specifies a rune without any role in the input (i.e., whitespace/non-ASCII). + RNone RuneRole = iota + // RSep specifies a rune with the role of segment separator. + RSep + // RTail specifies a rune which is a lower-case tail in a word in the input. + RTail + // RUCTail specifies a rune which is an upper-case tail in a word in the input. + RUCTail + // RHead specifies a rune which is the first character in a word in the input. + RHead +) + +// RuneRoles detects the roles of each byte rune in an input string and stores it in the output +// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string +// or when it filled the output. If output is nil, then it gets created. +func RuneRoles(str string, reuse []RuneRole) []RuneRole { + var output []RuneRole + if cap(reuse) < len(str) { + output = make([]RuneRole, 0, len(str)) + } else { + output = reuse[:0] + } + + prev, prev2 := rtNone, rtNone + for i := 0; i < len(str); i++ { + r := rune(str[i]) + + role := RNone + + curr := rtLower + if str[i] <= unicode.MaxASCII { + curr = runeType(rt[str[i]] - '0') + } + + if curr == rtLower { + if prev == rtNone || prev == rtPunct { + role = RHead + } else { + role = RTail + } + } else if curr == rtUpper { + role = RHead + + if prev == rtUpper { + // This and previous characters are both upper case. + + if i+1 == len(str) { + // This is last character, previous was also uppercase -> this is UCTail + // i.e., (current char is C): aBC / BC / ABC + role = RUCTail + } + } + } else if curr == rtPunct { + switch r { + case '.', ':': + role = RSep + } + } + if curr != rtLower { + if i > 1 && output[i-1] == RHead && prev2 == rtUpper && (output[i-2] == RHead || output[i-2] == RUCTail) { + // The previous two characters were uppercase. The current one is not a lower case, so the + // previous one can't be a HEAD. Make it a UCTail. + // i.e., (last char is current char - B must be a UCTail): ABC / ZABC / AB. + output[i-1] = RUCTail + } + } + + output = append(output, role) + prev2 = prev + prev = curr + } + return output +} + +type runeType byte + +const ( + rtNone runeType = iota + rtPunct + rtLower + rtUpper +) + +const rt = "00000000000000000000000000000000000000000000001122222222221000000333333333333333333333333330000002222222222222222222222222200000" + +// LastSegment returns the substring representing the last segment from the input, where each +// byte has an associated RuneRole in the roles slice. This makes sense only for inputs of Symbol +// or Filename type. +func LastSegment(input string, roles []RuneRole) string { + // Exclude ending separators. + end := len(input) - 1 + for end >= 0 && roles[end] == RSep { + end-- + } + if end < 0 { + return "" + } + + start := end - 1 + for start >= 0 && roles[start] != RSep { + start-- + } + + return input[start+1 : end+1] +} + +// ToLower transforms the input string to lower case, which is stored in the output byte slice. +// The lower casing considers only ASCII values - non ASCII values are left unmodified. +// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets +// created. +func ToLower(input string, reuse []byte) []byte { + output := reuse + if cap(reuse) < len(input) { + output = make([]byte, len(input)) + } + + for i := 0; i < len(input); i++ { + r := rune(input[i]) + if r <= unicode.MaxASCII { + if 'A' <= r && r <= 'Z' { + r += 'a' - 'A' + } + } + output[i] = byte(r) + } + return output[:len(input)] +} + +// WordConsumer defines a consumer for a word delimited by the [start,end) byte offsets in an input +// (start is inclusive, end is exclusive). +type WordConsumer func(start, end int) + +// Words find word delimiters in an input based on its bytes' mappings to rune roles. The offset +// delimiters for each word are fed to the provided consumer function. +func Words(roles []RuneRole, consume WordConsumer) { + var wordStart int + for i, r := range roles { + switch r { + case RUCTail, RTail: + case RHead, RNone, RSep: + if i != wordStart { + consume(wordStart, i) + } + wordStart = i + if r != RHead { + // Skip this character. + wordStart = i + 1 + } + } + } + if wordStart != len(roles) { + consume(wordStart, len(roles)) + } +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go new file mode 100644 index 0000000000..16a643097d --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go @@ -0,0 +1,398 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fuzzy implements a fuzzy matching algorithm. +package fuzzy + +import ( + "bytes" + "fmt" +) + +const ( + // MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs + // will be truncated to this size. + MaxInputSize = 127 + // MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer + // inputs are truncated to this size. + MaxPatternSize = 63 +) + +type scoreVal int + +func (s scoreVal) val() int { + return int(s) >> 1 +} + +func (s scoreVal) prevK() int { + return int(s) & 1 +} + +func score(val int, prevK int /*0 or 1*/) scoreVal { + return scoreVal(val<<1 + prevK) +} + +// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern. +// The matcher does not support parallel usage. +type Matcher struct { + pattern string + patternLower []byte // lower-case version of the pattern + patternShort []byte // first characters of the pattern + caseSensitive bool // set if the pattern is mix-cased + + patternRoles []RuneRole // the role of each character in the pattern + roles []RuneRole // the role of each character in the tested string + + scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal + + scoreScale float32 + + lastCandidateLen int // in bytes + lastCandidateMatched bool + + // Here we save the last candidate in lower-case. This is basically a byte slice we reuse for + // performance reasons, so the slice is not reallocated for every candidate. + lowerBuf [MaxInputSize]byte + rolesBuf [MaxInputSize]RuneRole +} + +func (m *Matcher) bestK(i, j int) int { + if m.scores[i][j][0].val() < m.scores[i][j][1].val() { + return 1 + } + return 0 +} + +// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern. +func NewMatcher(pattern string) *Matcher { + if len(pattern) > MaxPatternSize { + pattern = pattern[:MaxPatternSize] + } + + m := &Matcher{ + pattern: pattern, + patternLower: ToLower(pattern, nil), + } + + for i, c := range m.patternLower { + if pattern[i] != c { + m.caseSensitive = true + break + } + } + + if len(pattern) > 3 { + m.patternShort = m.patternLower[:3] + } else { + m.patternShort = m.patternLower + } + + m.patternRoles = RuneRoles(pattern, nil) + + if len(pattern) > 0 { + maxCharScore := 4 + m.scoreScale = 1 / float32(maxCharScore*len(pattern)) + } + + return m +} + +// Score returns the score returned by matching the candidate to the pattern. +// This is not designed for parallel use. Multiple candidates must be scored sequentially. +// Returns a score between 0 and 1 (0 - no match, 1 - perfect match). +func (m *Matcher) Score(candidate string) float32 { + if len(candidate) > MaxInputSize { + candidate = candidate[:MaxInputSize] + } + lower := ToLower(candidate, m.lowerBuf[:]) + m.lastCandidateLen = len(candidate) + + if len(m.pattern) == 0 { + // Empty patterns perfectly match candidates. + return 1 + } + + if m.match(candidate, lower) { + sc := m.computeScore(candidate, lower) + if sc > minScore/2 && !m.poorMatch() { + m.lastCandidateMatched = true + if len(m.pattern) == len(candidate) { + // Perfect match. + return 1 + } + + if sc < 0 { + sc = 0 + } + normalizedScore := float32(sc) * m.scoreScale + if normalizedScore > 1 { + normalizedScore = 1 + } + + return normalizedScore + } + } + + m.lastCandidateMatched = false + return 0 +} + +const minScore = -10000 + +// MatchedRanges returns matches ranges for the last scored string as a flattened array of +// [begin, end) byte offset pairs. +func (m *Matcher) MatchedRanges() []int { + if len(m.pattern) == 0 || !m.lastCandidateMatched { + return nil + } + i, j := m.lastCandidateLen, len(m.pattern) + if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 { + return nil + } + + var ret []int + k := m.bestK(i, j) + for i > 0 { + take := (k == 1) + k = m.scores[i][j][k].prevK() + if take { + if len(ret) == 0 || ret[len(ret)-1] != i { + ret = append(ret, i) + ret = append(ret, i-1) + } else { + ret[len(ret)-1] = i - 1 + } + j-- + } + i-- + } + // Reverse slice. + for i := 0; i < len(ret)/2; i++ { + ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i] + } + return ret +} + +func (m *Matcher) match(candidate string, candidateLower []byte) bool { + i, j := 0, 0 + for ; i < len(candidateLower) && j < len(m.patternLower); i++ { + if candidateLower[i] == m.patternLower[j] { + j++ + } + } + if j != len(m.patternLower) { + return false + } + + // The input passes the simple test against pattern, so it is time to classify its characters. + // Character roles are used below to find the last segment. + m.roles = RuneRoles(candidate, m.rolesBuf[:]) + + return true +} + +func (m *Matcher) computeScore(candidate string, candidateLower []byte) int { + pattLen, candLen := len(m.pattern), len(candidate) + + for j := 0; j <= len(m.pattern); j++ { + m.scores[0][j][0] = minScore << 1 + m.scores[0][j][1] = minScore << 1 + } + m.scores[0][0][0] = score(0, 0) // Start with 0. + + segmentsLeft, lastSegStart := 1, 0 + for i := 0; i < candLen; i++ { + if m.roles[i] == RSep { + segmentsLeft++ + lastSegStart = i + 1 + } + } + + // A per-character bonus for a consecutive match. + consecutiveBonus := 2 + wordIdx := 0 // Word count within segment. + for i := 1; i <= candLen; i++ { + + role := m.roles[i-1] + isHead := role == RHead + + if isHead { + wordIdx++ + } else if role == RSep && segmentsLeft > 1 { + wordIdx = 0 + segmentsLeft-- + } + + var skipPenalty int + if i == 1 || (i-1) == lastSegStart { + // Skipping the start of first or last segment. + skipPenalty++ + } + + for j := 0; j <= pattLen; j++ { + // By default, we don't have a match. Fill in the skip data. + m.scores[i][j][1] = minScore << 1 + + // Compute the skip score. + k := 0 + if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() { + k = 1 + } + + skipScore := m.scores[i-1][j][k].val() + // Do not penalize missing characters after the last matched segment. + if j != pattLen { + skipScore -= skipPenalty + } + m.scores[i][j][0] = score(skipScore, k) + + if j == 0 || candidateLower[i-1] != m.patternLower[j-1] { + // Not a match. + continue + } + pRole := m.patternRoles[j-1] + + if role == RTail && pRole == RHead { + if j > 1 { + // Not a match: a head in the pattern matches a tail character in the candidate. + continue + } + // Special treatment for the first character of the pattern. We allow + // matches in the middle of a word if they are long enough, at least + // min(3, pattern.length) characters. + if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) { + continue + } + } + + // Compute the char score. + var charScore int + // Bonus 1: the char is in the candidate's last segment. + if segmentsLeft <= 1 { + charScore++ + } + // Bonus 2: Case match or a Head in the pattern aligns with one in the word. + // Single-case patterns lack segmentation signals and we assume any character + // can be a head of a segment. + if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) { + charScore++ + } + + // Penalty 1: pattern char is Head, candidate char is Tail. + if role == RTail && pRole == RHead { + charScore-- + } + // Penalty 2: first pattern character matched in the middle of a word. + if j == 1 && role == RTail { + charScore -= 4 + } + + // Third dimension encodes whether there is a gap between the previous match and the current + // one. + for k := 0; k < 2; k++ { + sc := m.scores[i-1][j-1][k].val() + charScore + + isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart + if isConsecutive { + // Bonus 3: a consecutive match. First character match also gets a bonus to + // ensure prefix final match score normalizes to 1.0. + // Logically, this is a part of charScore, but we have to compute it here because it + // only applies for consecutive matches (k == 1). + sc += consecutiveBonus + } + if k == 0 { + // Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack + // of alignment. + if role == RTail || role == RUCTail { + sc -= 3 + } + } + + if sc > m.scores[i][j][1].val() { + m.scores[i][j][1] = score(sc, k) + } + } + } + } + + result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val() + + return result +} + +// ScoreTable returns the score table computed for the provided candidate. Used only for debugging. +func (m *Matcher) ScoreTable(candidate string) string { + var buf bytes.Buffer + + var line1, line2, separator bytes.Buffer + line1.WriteString("\t") + line2.WriteString("\t") + for j := 0; j < len(m.pattern); j++ { + line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j])) + separator.WriteString("----------------") + } + + buf.WriteString(line1.String()) + buf.WriteString("\n") + buf.WriteString(separator.String()) + buf.WriteString("\n") + + for i := 1; i <= len(candidate); i++ { + line1.Reset() + line2.Reset() + + line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1])) + line2.WriteString("\t") + + for j := 1; j <= len(m.pattern); j++ { + line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK()))) + line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK()))) + } + buf.WriteString(line1.String()) + buf.WriteString("\n") + buf.WriteString(line2.String()) + buf.WriteString("\n") + buf.WriteString(separator.String()) + buf.WriteString("\n") + } + + return buf.String() +} + +func dir(prevK int) rune { + if prevK == 0 { + return 'M' + } + return 'H' +} + +func (m *Matcher) poorMatch() bool { + if len(m.pattern) < 2 { + return false + } + + i, j := m.lastCandidateLen, len(m.pattern) + k := m.bestK(i, j) + + var counter, len int + for i > 0 { + take := (k == 1) + k = m.scores[i][j][k].prevK() + if take { + len++ + if k == 0 && len < 3 && m.roles[i-1] == RTail { + // Short match in the middle of a word + counter++ + if counter > 1 { + return true + } + } + j-- + } else { + len = 0 + } + i-- + } + return false +} diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index c0c008e038..c827365400 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -45,7 +45,7 @@ golang.org/x/mod/zip golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 +# golang.org/x/tools v0.0.0-20200901153117-6e59e24738da ## explicit golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/internal/analysisflags @@ -84,6 +84,7 @@ golang.org/x/tools/go/cfg golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/lsp/fuzzy # golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 ## explicit golang.org/x/xerrors -- cgit v1.2.3-54-g00ecf From d27ebc7b8630993269c36e7728a7f30543ffa048 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 13 Mar 2020 16:32:37 -0400 Subject: cmd/go/internal/modload: implement the "all" pattern for lazy loading The new semantics of the "all" package pattern can be implemented without actually changing module loading per se. This change implements those semantics, so that the change can be decoupled from the changes to the module requirement graph. For #36460 Change-Id: I0ee8b17afa8b728dc470a42a540fcc01764a4442 Reviewed-on: https://go-review.googlesource.com/c/go/+/240623 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- doc/go1.16.html | 14 ++++- src/cmd/go/internal/modcmd/tidy.go | 12 ++++ src/cmd/go/internal/modcmd/why.go | 2 + src/cmd/go/internal/modload/load.go | 16 ++++-- src/cmd/go/internal/modload/modfile.go | 22 ++++++++ src/cmd/go/testdata/script/mod_all.txt | 100 +++++++++++++++++++++++++++++++-- 6 files changed, 154 insertions(+), 12 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 0ffaecc5a9..95e63d0d5a 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -52,7 +52,7 @@ Do not send CLs removing the interior tags from such phrases. TODO: write and link to tutorial or blog post

-

+

When using go test, a test that calls os.Exit(0) during execution of a test function will now be considered to fail. @@ -62,6 +62,18 @@ Do not send CLs removing the interior tags from such phrases. that is still considered to be a passing test.

+

The all pattern

+ +

+ When the main module's go.mod file + declares go 1.16 or higher, the all + package pattern now matches only those packages that are transitively imported + by a package or test found in the main module. (Packages imported by tests + of packages imported by the main module are no longer included.) This is + the same set of packages retained + by go mod vendor since Go 1.11. +

+

TODO

diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index c7c53d7c0c..4dcb62e02f 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -40,6 +40,18 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go mod tidy: no arguments allowed") } + // Tidy aims to make 'go test' reproducible for any package in 'all', so we + // need to include test dependencies. For modules that specify go 1.15 or + // earlier this is a no-op (because 'all' saturates transitive test + // dependencies). + // + // However, with lazy loading (go 1.16+) 'all' includes only the packages that + // are transitively imported by the main module, not the test dependencies of + // those packages. In order to make 'go test' reproducible for the packages + // that are in 'all' but outside of the main module, we must explicitly + // request that their test dependencies be included. + modload.LoadTests = true + modload.LoadALL(ctx) modload.TidyBuildList() modload.TrimGoSum() diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index b16887d318..30b15fc153 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -65,6 +65,8 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { loadALL := modload.LoadALL if *whyVendor { loadALL = modload.LoadVendor + } else { + modload.LoadTests = true } if *whyM { listU := false diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 2096dfb636..9cedc219b6 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -231,7 +231,7 @@ func ImportPathsQuiet(ctx context.Context, patterns []string, tags map[string]bo loaded = loadFromRoots(loaderParams{ tags: tags, allPatternIsRoot: allPatternIsRoot, - allClosesOverTests: true, // until lazy loading in Go 1.16+ + allClosesOverTests: index.allPatternClosesOverTests(), listRoots: func() (roots []string) { updateMatches(nil) @@ -450,7 +450,7 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { roots = append(roots, testImports...) return roots }, - allClosesOverTests: true, // until lazy loading. + allClosesOverTests: index.allPatternClosesOverTests(), }) WriteGoMod() } @@ -501,7 +501,7 @@ func ReloadBuildList() []module.Version { loaded = loadFromRoots(loaderParams{ tags: imports.Tags(), listRoots: func() []string { return nil }, - allClosesOverTests: true, // until lazy loading, but doesn't matter because the root list is empty. + allClosesOverTests: index.allPatternClosesOverTests(), // but doesn't matter because the root list is empty. }) return buildList } @@ -512,9 +512,13 @@ func ReloadBuildList() []module.Version { // It adds modules to the build list as needed to satisfy new imports. // This set is useful for deciding whether a particular import is needed // anywhere in a module. +// +// In modules that specify "go 1.16" or higher, ALL follows only one layer of +// test dependencies. In "go 1.15" or lower, ALL follows the imports of tests of +// dependencies of tests. func LoadALL(ctx context.Context) []string { InitMod(ctx) - return loadAll(ctx, true) + return loadAll(ctx, index.allPatternClosesOverTests()) } // LoadVendor is like LoadALL but only follows test dependencies @@ -523,7 +527,9 @@ func LoadALL(ctx context.Context) []string { // This set is useful for identifying the which packages to include in a vendor directory. func LoadVendor(ctx context.Context) []string { InitMod(ctx) - return loadAll(ctx, false) + // 'go mod vendor' has never followed test dependencies since Go 1.11. + const closeOverTests = false + return loadAll(ctx, closeOverTests) } func loadAll(ctx context.Context, closeOverTests bool) []string { diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index a45c4a63be..18dd293ac9 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -25,6 +25,11 @@ import ( "golang.org/x/mod/semver" ) +// lazyLoadingVersion is the Go version (plus leading "v") at which lazy module +// loading takes effect. +const lazyLoadingVersionV = "v1.16" +const go116EnableLazyLoading = true + var modFile *modfile.File // A modFileIndex is an index of data corresponding to a modFile @@ -249,6 +254,23 @@ func indexModFile(data []byte, modFile *modfile.File, needsFix bool) *modFileInd return i } +// allPatternClosesOverTests reports whether the "all" pattern includes +// dependencies of tests outside the main module (as in Go 1.11–1.15). +// (Otherwise — as in Go 1.16+ — the "all" pattern includes only the packages +// transitively *imported by* the packages and tests in the main module.) +func (i *modFileIndex) allPatternClosesOverTests() bool { + if !go116EnableLazyLoading { + return true + } + if i != nil && semver.Compare(i.goVersionV, lazyLoadingVersionV) < 0 { + // The module explicitly predates the change in "all" for lazy loading, so + // continue to use the older interpretation. (If i == nil, we not in any + // module at all and should use the latest semantics.) + return true + } + return false +} + // modFileIsDirty reports whether the go.mod file differs meaningfully // from what was indexed. // If modFile has been changed (even cosmetically) since it was first read, diff --git a/src/cmd/go/testdata/script/mod_all.txt b/src/cmd/go/testdata/script/mod_all.txt index 9f4b0a4e4d..aac66292d6 100644 --- a/src/cmd/go/testdata/script/mod_all.txt +++ b/src/cmd/go/testdata/script/mod_all.txt @@ -187,17 +187,105 @@ stdout '^example.com/main_test \[example.com/main.test\]$' stdout '^example.com/main/testonly.test$' stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' -# TODO(#36460): +rm vendor + +# Convert all modules to go 1.16 to enable lazy loading. +go mod edit -go=1.16 a/go.mod +go mod edit -go=1.16 b/go.mod +go mod edit -go=1.16 c/go.mod +go mod edit -go=1.16 d/go.mod +go mod edit -go=1.16 q/go.mod +go mod edit -go=1.16 r/go.mod +go mod edit -go=1.16 s/go.mod +go mod edit -go=1.16 t/go.mod +go mod edit -go=1.16 u/go.mod +go mod edit -go=1.16 w/go.mod +go mod edit -go=1.16 x/go.mod +go mod edit -go=1.16 + +# With lazy loading, 'go list all' with neither -mod=vendor nor -test should +# match -mod=vendor without -test in 1.15. -# With lazy loading, 'go list all' without -mod=vendor should match -# 'go mod vendor'. +go list -f $PKGFMT all +stdout -count=8 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/t$' +stdout '^example.com/u$' -# 'go list -test all' should expand that to cover test dependencies -# of packages imported by the main module. +# 'go list -test all' should expand that to include the test variants of the +# packages in 'all', but not the dependencies of outside tests. -# 'go list -m all' should cover the packages in 'go list -test all'. +go list -test -f $PKGFMT all +stdout -count=25 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/a.test$' +stdout '^example.com/a_test \[example.com/a.test\]$' +stdout '^example.com/b.test$' +stdout '^example.com/b_test \[example.com/b.test\]$' +stdout '^example.com/main.test$' +stdout '^example.com/main \[example.com/main.test\]$' +stdout '^example.com/main_test \[example.com/main.test\]$' +stdout '^example.com/main/testonly.test$' +stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' +stdout '^example.com/q.test$' +stdout '^example.com/q_test \[example.com/q.test\]$' +stdout '^example.com/r.test$' +stdout '^example.com/r_test \[example.com/r.test\]$' +stdout '^example.com/t.test$' +stdout '^example.com/t_test \[example.com/t.test\]$' +stdout '^example.com/u.test$' +stdout '^example.com/u_test \[example.com/u.test\]$' + +# 'go list -test -deps all' should include the dependencies of those tests, +# but not the tests of the dependencies of outside tests. + +go list -test -deps -f $PKGFMT all +stdout -count=28 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/c$' +stdout '^example.com/main$' +stdout '^example.com/main/testonly$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/s$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/w$' +stdout '^example.com/a.test$' +stdout '^example.com/a_test \[example.com/a.test\]$' +stdout '^example.com/b.test$' +stdout '^example.com/b_test \[example.com/b.test\]$' +stdout '^example.com/main.test$' +stdout '^example.com/main \[example.com/main.test\]$' +stdout '^example.com/main_test \[example.com/main.test\]$' +stdout '^example.com/main/testonly.test$' +stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' +stdout '^example.com/q.test$' +stdout '^example.com/q_test \[example.com/q.test\]$' +stdout '^example.com/r.test$' +stdout '^example.com/r_test \[example.com/r.test\]$' +stdout '^example.com/t.test$' +stdout '^example.com/t_test \[example.com/t.test\]$' +stdout '^example.com/u.test$' +stdout '^example.com/u_test \[example.com/u.test\]$' +# TODO(#36460): +# 'go list -m all' should exactly cover the packages in 'go list -test all'. + -- go.mod -- module example.com/main -- cgit v1.2.3-54-g00ecf From 363fb4bcc814069e3d80e22bd022599179ec1c62 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 1 Jul 2020 22:38:45 -0400 Subject: cmd/go/internal/modload: consolidate buildList and associated functions into one file Change-Id: I310c37c7f0ce5581f07cf6e27d1f6361d03b92ef Reviewed-on: https://go-review.googlesource.com/c/go/+/244077 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- src/cmd/go/internal/modget/get.go | 4 ++ src/cmd/go/internal/modload/buildlist.go | 117 +++++++++++++++++++++++++++++++ src/cmd/go/internal/modload/load.go | 103 --------------------------- 3 files changed, 121 insertions(+), 103 deletions(-) create mode 100644 src/cmd/go/internal/modload/buildlist.go diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 4ca7f5b529..126b1f4bd4 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -628,6 +628,10 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { if err != nil { base.Fatalf("go: %v", err) } + + // TODO(bcmills) What should happen here under lazy loading? + // Downgrading may intentionally violate the lazy-loading invariants. + modload.SetBuildList(buildList) modload.ReloadBuildList() // note: does not update go.mod base.ExitIfErrors() diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go new file mode 100644 index 0000000000..2302b044e8 --- /dev/null +++ b/src/cmd/go/internal/modload/buildlist.go @@ -0,0 +1,117 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/imports" + "cmd/go/internal/mvs" + "context" + "fmt" + "os" + + "golang.org/x/mod/module" +) + +// buildList is the list of modules to use for building packages. +// It is initialized by calling ImportPaths, ImportFromFiles, +// LoadALL, or LoadBuildList, each of which uses loaded.load. +// +// Ideally, exactly ONE of those functions would be called, +// and exactly once. Most of the time, that's true. +// During "go get" it may not be. TODO(rsc): Figure out if +// that restriction can be established, or else document why not. +// +var buildList []module.Version + +// LoadBuildList loads and returns the build list from go.mod. +// The loading of the build list happens automatically in ImportPaths: +// LoadBuildList need only be called if ImportPaths is not +// (typically in commands that care about the module but +// no particular package). +func LoadBuildList(ctx context.Context) []module.Version { + InitMod(ctx) + ReloadBuildList() + WriteGoMod() + return buildList +} + +// ReloadBuildList resets the state of loaded packages, then loads and returns +// the build list set in SetBuildList. +func ReloadBuildList() []module.Version { + loaded = loadFromRoots(loaderParams{ + tags: imports.Tags(), + listRoots: func() []string { return nil }, + allClosesOverTests: index.allPatternClosesOverTests(), // but doesn't matter because the root list is empty. + }) + return buildList +} + +// BuildList returns the module build list, +// typically constructed by a previous call to +// LoadBuildList or ImportPaths. +// The caller must not modify the returned list. +func BuildList() []module.Version { + return buildList +} + +// SetBuildList sets the module build list. +// The caller is responsible for ensuring that the list is valid. +// SetBuildList does not retain a reference to the original list. +func SetBuildList(list []module.Version) { + buildList = append([]module.Version{}, list...) +} + +// TidyBuildList trims the build list to the minimal requirements needed to +// retain the same versions of all packages from the preceding Load* or +// ImportPaths* call. +func TidyBuildList() { + used := map[module.Version]bool{Target: true} + for _, pkg := range loaded.pkgs { + used[pkg.mod] = true + } + + keep := []module.Version{Target} + var direct []string + for _, m := range buildList[1:] { + if used[m] { + keep = append(keep, m) + if loaded.direct[m.Path] { + direct = append(direct, m.Path) + } + } else if cfg.BuildV { + if _, ok := index.require[m]; ok { + fmt.Fprintf(os.Stderr, "unused %s\n", m.Path) + } + } + } + + min, err := mvs.Req(Target, direct, &mvsReqs{buildList: keep}) + if err != nil { + base.Fatalf("go: %v", err) + } + buildList = append([]module.Version{Target}, min...) +} + +// checkMultiplePaths verifies that a given module path is used as itself +// or as a replacement for another module, but not both at the same time. +// +// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) +func checkMultiplePaths() { + firstPath := make(map[module.Version]string, len(buildList)) + for _, mod := range buildList { + src := mod + if rep := Replacement(mod); rep.Path != "" { + src = rep + } + if prev, ok := firstPath[src]; !ok { + firstPath[src] = mod.Path + } else if prev != mod.Path { + base.Errorf("go: %s@%s used for two different module paths (%s and %s)", src.Path, src.Version, prev, mod.Path) + } + } + base.ExitIfErrors() +} diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 9cedc219b6..6050646594 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -118,22 +118,10 @@ import ( "cmd/go/internal/par" "cmd/go/internal/search" "cmd/go/internal/str" - "cmd/go/internal/trace" "golang.org/x/mod/module" ) -// buildList is the list of modules to use for building packages. -// It is initialized by calling ImportPaths, ImportFromFiles, -// LoadALL, or LoadBuildList, each of which uses loaded.load. -// -// Ideally, exactly ONE of those functions would be called, -// and exactly once. Most of the time, that's true. -// During "go get" it may not be. TODO(rsc): Figure out if -// that restriction can be established, or else document why not. -// -var buildList []module.Version - // loaded is the most recently-used package loader. // It holds details about individual packages. var loaded *loader @@ -250,26 +238,6 @@ func ImportPathsQuiet(ctx context.Context, patterns []string, tags map[string]bo return matches } -// checkMultiplePaths verifies that a given module path is used as itself -// or as a replacement for another module, but not both at the same time. -// -// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) -func checkMultiplePaths() { - firstPath := make(map[module.Version]string, len(buildList)) - for _, mod := range buildList { - src := mod - if rep := Replacement(mod); rep.Path != "" { - src = rep - } - if prev, ok := firstPath[src]; !ok { - firstPath[src] = mod.Path - } else if prev != mod.Path { - base.Errorf("go: %s@%s used for two different module paths (%s and %s)", src.Path, src.Version, prev, mod.Path) - } - } - base.ExitIfErrors() -} - // matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories // outside of the standard library and active modules. func matchLocalDirs(m *search.Match) { @@ -481,31 +449,6 @@ func DirImportPath(dir string) string { return "." } -// LoadBuildList loads and returns the build list from go.mod. -// The loading of the build list happens automatically in ImportPaths: -// LoadBuildList need only be called if ImportPaths is not -// (typically in commands that care about the module but -// no particular package). -func LoadBuildList(ctx context.Context) []module.Version { - ctx, span := trace.StartSpan(ctx, "LoadBuildList") - defer span.Done() - InitMod(ctx) - ReloadBuildList() - WriteGoMod() - return buildList -} - -// ReloadBuildList resets the state of loaded packages, then loads and returns -// the build list set in SetBuildList. -func ReloadBuildList() []module.Version { - loaded = loadFromRoots(loaderParams{ - tags: imports.Tags(), - listRoots: func() []string { return nil }, - allClosesOverTests: index.allPatternClosesOverTests(), // but doesn't matter because the root list is empty. - }) - return buildList -} - // LoadALL returns the set of all packages in the current module // and their dependencies in any other modules, without filtering // due to build tags, except "+build ignore". @@ -571,52 +514,6 @@ func TargetPackages(ctx context.Context, pattern string) *search.Match { return m } -// BuildList returns the module build list, -// typically constructed by a previous call to -// LoadBuildList or ImportPaths. -// The caller must not modify the returned list. -func BuildList() []module.Version { - return buildList -} - -// SetBuildList sets the module build list. -// The caller is responsible for ensuring that the list is valid. -// SetBuildList does not retain a reference to the original list. -func SetBuildList(list []module.Version) { - buildList = append([]module.Version{}, list...) -} - -// TidyBuildList trims the build list to the minimal requirements needed to -// retain the same versions of all packages from the preceding Load* or -// ImportPaths* call. -func TidyBuildList() { - used := map[module.Version]bool{Target: true} - for _, pkg := range loaded.pkgs { - used[pkg.mod] = true - } - - keep := []module.Version{Target} - var direct []string - for _, m := range buildList[1:] { - if used[m] { - keep = append(keep, m) - if loaded.direct[m.Path] { - direct = append(direct, m.Path) - } - } else if cfg.BuildV { - if _, ok := index.require[m]; ok { - fmt.Fprintf(os.Stderr, "unused %s\n", m.Path) - } - } - } - - min, err := mvs.Req(Target, direct, &mvsReqs{buildList: keep}) - if err != nil { - base.Fatalf("go: %v", err) - } - buildList = append([]module.Version{Target}, min...) -} - // ImportMap returns the actual package import path // for an import path found in source code. // If the given import path does not appear in the source code -- cgit v1.2.3-54-g00ecf From 521393e7e05cd9272ae6023387fa92839d72eb4f Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Mon, 27 Jul 2020 13:57:12 -0400 Subject: cmd/go/internal/modget: move MVS code to a separate file For #36460 Change-Id: Ie81c03df18c6987527da765d5f6575556340cb01 Reviewed-on: https://go-review.googlesource.com/c/go/+/249877 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- src/cmd/go/internal/modget/get.go | 188 +---------------------------------- src/cmd/go/internal/modget/mvs.go | 202 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 203 insertions(+), 187 deletions(-) create mode 100644 src/cmd/go/internal/modget/mvs.go diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 126b1f4bd4..cf9ad66b3d 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -290,7 +290,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // what was requested. modload.DisallowWriteGoMod() - // Allow looking up modules for import paths outside of a module. + // Allow looking up modules for import paths when outside of a module. // 'go get' is expected to do this, unlike other commands. modload.AllowMissingModuleImports() @@ -885,192 +885,6 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc return m, nil } -// An upgrader adapts an underlying mvs.Reqs to apply an -// upgrade policy to a list of targets and their dependencies. -type upgrader struct { - mvs.Reqs - - // cmdline maps a module path to a query made for that module at a - // specific target version. Each query corresponds to a module - // matched by a command line argument. - cmdline map[string]*query - - // upgrade is a set of modules providing dependencies of packages - // matched by command line arguments. If -u or -u=patch is set, - // these modules are upgraded accordingly. - upgrade map[string]bool -} - -// newUpgrader creates an upgrader. cmdline contains queries made at -// specific versions for modules matched by command line arguments. pkgs -// is the set of packages matched by command line arguments. If -u or -u=patch -// is set, modules providing dependencies of pkgs are upgraded accordingly. -func newUpgrader(cmdline map[string]*query, pkgs map[string]bool) *upgrader { - u := &upgrader{ - Reqs: modload.Reqs(), - cmdline: cmdline, - } - if getU != "" { - u.upgrade = make(map[string]bool) - - // Traverse package import graph. - // Initialize work queue with root packages. - seen := make(map[string]bool) - var work []string - add := func(path string) { - if !seen[path] { - seen[path] = true - work = append(work, path) - } - } - for pkg := range pkgs { - add(pkg) - } - for len(work) > 0 { - pkg := work[0] - work = work[1:] - m := modload.PackageModule(pkg) - u.upgrade[m.Path] = true - - // testImports is empty unless test imports were actually loaded, - // i.e., -t was set or "all" was one of the arguments. - imports, testImports := modload.PackageImports(pkg) - for _, imp := range imports { - add(imp) - } - for _, imp := range testImports { - add(imp) - } - } - } - return u -} - -// Required returns the requirement list for m. -// For the main module, we override requirements with the modules named -// one the command line, and we include new requirements. Otherwise, -// we defer to u.Reqs. -func (u *upgrader) Required(m module.Version) ([]module.Version, error) { - rs, err := u.Reqs.Required(m) - if err != nil { - return nil, err - } - if m != modload.Target { - return rs, nil - } - - overridden := make(map[string]bool) - for i, m := range rs { - if q := u.cmdline[m.Path]; q != nil && q.m.Version != "none" { - rs[i] = q.m - overridden[q.m.Path] = true - } - } - for _, q := range u.cmdline { - if !overridden[q.m.Path] && q.m.Path != modload.Target.Path && q.m.Version != "none" { - rs = append(rs, q.m) - } - } - return rs, nil -} - -// Upgrade returns the desired upgrade for m. -// -// If m was requested at a specific version on the command line, then -// Upgrade returns that version. -// -// If -u is set and m provides a dependency of a package matched by -// command line arguments, then Upgrade may provider a newer tagged version. -// If m is a tagged version, then Upgrade will return the latest tagged -// version (with the same minor version number if -u=patch). -// If m is a pseudo-version, then Upgrade returns the latest tagged version -// only if that version has a time-stamp newer than m. This special case -// prevents accidental downgrades when already using a pseudo-version -// newer than the latest tagged version. -// -// If none of the above cases apply, then Upgrade returns m. -func (u *upgrader) Upgrade(m module.Version) (module.Version, error) { - // Allow pkg@vers on the command line to override the upgrade choice v. - // If q's version is < m.Version, then we're going to downgrade anyway, - // and it's cleaner to avoid moving back and forth and picking up - // extraneous other newer dependencies. - // If q's version is > m.Version, then we're going to upgrade past - // m.Version anyway, and again it's cleaner to avoid moving back and forth - // picking up extraneous other newer dependencies. - if q := u.cmdline[m.Path]; q != nil { - return q.m, nil - } - - if !u.upgrade[m.Path] { - // Not involved in upgrade. Leave alone. - return m, nil - } - - // Run query required by upgrade semantics. - // Note that Query "latest" is not the same as using repo.Latest, - // which may return a pseudoversion for the latest commit. - // Query "latest" returns the newest tagged version or the newest - // prerelease version if there are no non-prereleases, or repo.Latest - // if there aren't any tagged versions. - // If we're querying "upgrade" or "patch", Query will compare the current - // version against the chosen version and will return the current version - // if it is newer. - info, err := modload.Query(context.TODO(), m.Path, string(getU), m.Version, modload.CheckAllowed) - if err != nil { - // Report error but return m, to let version selection continue. - // (Reporting the error will fail the command at the next base.ExitIfErrors.) - - // Special case: if the error is for m.Version itself and m.Version has a - // replacement, then keep it and don't report the error: the fact that the - // version is invalid is likely the reason it was replaced to begin with. - var vErr *module.InvalidVersionError - if errors.As(err, &vErr) && vErr.Version == m.Version && modload.Replacement(m).Path != "" { - return m, nil - } - - // Special case: if the error is "no matching versions" then don't - // even report the error. Because Query does not consider pseudo-versions, - // it may happen that we have a pseudo-version but during -u=patch - // the query v0.0 matches no versions (not even the one we're using). - var noMatch *modload.NoMatchingVersionError - if !errors.As(err, &noMatch) { - base.Errorf("go get: upgrading %s@%s: %v", m.Path, m.Version, err) - } - return m, nil - } - - if info.Version != m.Version { - logOncef("go: %s %s => %s", m.Path, getU, info.Version) - } - return module.Version{Path: m.Path, Version: info.Version}, nil -} - -// buildListForLostUpgrade returns the build list for the module graph -// rooted at lost. Unlike mvs.BuildList, the target module (lost) is not -// treated specially. The returned build list may contain a newer version -// of lost. -// -// buildListForLostUpgrade is used after a downgrade has removed a module -// requested at a specific version. This helps us understand the requirements -// implied by each downgrade. -func buildListForLostUpgrade(lost module.Version, reqs mvs.Reqs) ([]module.Version, error) { - return mvs.BuildList(lostUpgradeRoot, &lostUpgradeReqs{Reqs: reqs, lost: lost}) -} - -var lostUpgradeRoot = module.Version{Path: "lost-upgrade-root", Version: ""} - -type lostUpgradeReqs struct { - mvs.Reqs - lost module.Version -} - -func (r *lostUpgradeReqs) Required(mod module.Version) ([]module.Version, error) { - if mod == lostUpgradeRoot { - return []module.Version{r.lost}, nil - } - return r.Reqs.Required(mod) -} - // reportRetractions prints warnings if any modules in the build list are // retracted. func reportRetractions(ctx context.Context) { diff --git a/src/cmd/go/internal/modget/mvs.go b/src/cmd/go/internal/modget/mvs.go new file mode 100644 index 0000000000..19fffd2947 --- /dev/null +++ b/src/cmd/go/internal/modget/mvs.go @@ -0,0 +1,202 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modget + +import ( + "context" + "errors" + + "cmd/go/internal/base" + "cmd/go/internal/modload" + "cmd/go/internal/mvs" + + "golang.org/x/mod/module" +) + +// An upgrader adapts an underlying mvs.Reqs to apply an +// upgrade policy to a list of targets and their dependencies. +type upgrader struct { + mvs.Reqs + + // cmdline maps a module path to a query made for that module at a + // specific target version. Each query corresponds to a module + // matched by a command line argument. + cmdline map[string]*query + + // upgrade is a set of modules providing dependencies of packages + // matched by command line arguments. If -u or -u=patch is set, + // these modules are upgraded accordingly. + upgrade map[string]bool +} + +// newUpgrader creates an upgrader. cmdline contains queries made at +// specific versions for modules matched by command line arguments. pkgs +// is the set of packages matched by command line arguments. If -u or -u=patch +// is set, modules providing dependencies of pkgs are upgraded accordingly. +func newUpgrader(cmdline map[string]*query, pkgs map[string]bool) *upgrader { + u := &upgrader{ + Reqs: modload.Reqs(), + cmdline: cmdline, + } + if getU != "" { + u.upgrade = make(map[string]bool) + + // Traverse package import graph. + // Initialize work queue with root packages. + seen := make(map[string]bool) + var work []string + add := func(path string) { + if !seen[path] { + seen[path] = true + work = append(work, path) + } + } + for pkg := range pkgs { + add(pkg) + } + for len(work) > 0 { + pkg := work[0] + work = work[1:] + m := modload.PackageModule(pkg) + u.upgrade[m.Path] = true + + // testImports is empty unless test imports were actually loaded, + // i.e., -t was set or "all" was one of the arguments. + imports, testImports := modload.PackageImports(pkg) + for _, imp := range imports { + add(imp) + } + for _, imp := range testImports { + add(imp) + } + } + } + return u +} + +// Required returns the requirement list for m. +// For the main module, we override requirements with the modules named +// one the command line, and we include new requirements. Otherwise, +// we defer to u.Reqs. +func (u *upgrader) Required(m module.Version) ([]module.Version, error) { + rs, err := u.Reqs.Required(m) + if err != nil { + return nil, err + } + if m != modload.Target { + return rs, nil + } + + overridden := make(map[string]bool) + for i, m := range rs { + if q := u.cmdline[m.Path]; q != nil && q.m.Version != "none" { + rs[i] = q.m + overridden[q.m.Path] = true + } + } + for _, q := range u.cmdline { + if !overridden[q.m.Path] && q.m.Path != modload.Target.Path && q.m.Version != "none" { + rs = append(rs, q.m) + } + } + return rs, nil +} + +// Upgrade returns the desired upgrade for m. +// +// If m was requested at a specific version on the command line, then +// Upgrade returns that version. +// +// If -u is set and m provides a dependency of a package matched by +// command line arguments, then Upgrade may provider a newer tagged version. +// If m is a tagged version, then Upgrade will return the latest tagged +// version (with the same minor version number if -u=patch). +// If m is a pseudo-version, then Upgrade returns the latest tagged version +// only if that version has a time-stamp newer than m. This special case +// prevents accidental downgrades when already using a pseudo-version +// newer than the latest tagged version. +// +// If none of the above cases apply, then Upgrade returns m. +func (u *upgrader) Upgrade(m module.Version) (module.Version, error) { + // Allow pkg@vers on the command line to override the upgrade choice v. + // If q's version is < m.Version, then we're going to downgrade anyway, + // and it's cleaner to avoid moving back and forth and picking up + // extraneous other newer dependencies. + // If q's version is > m.Version, then we're going to upgrade past + // m.Version anyway, and again it's cleaner to avoid moving back and forth + // picking up extraneous other newer dependencies. + if q := u.cmdline[m.Path]; q != nil { + return q.m, nil + } + + if !u.upgrade[m.Path] { + // Not involved in upgrade. Leave alone. + return m, nil + } + + // Run query required by upgrade semantics. + // Note that Query "latest" is not the same as using repo.Latest, + // which may return a pseudoversion for the latest commit. + // Query "latest" returns the newest tagged version or the newest + // prerelease version if there are no non-prereleases, or repo.Latest + // if there aren't any tagged versions. + // If we're querying "upgrade" or "patch", Query will compare the current + // version against the chosen version and will return the current version + // if it is newer. + info, err := modload.Query(context.TODO(), m.Path, string(getU), m.Version, modload.CheckAllowed) + if err != nil { + // Report error but return m, to let version selection continue. + // (Reporting the error will fail the command at the next base.ExitIfErrors.) + + // Special case: if the error is for m.Version itself and m.Version has a + // replacement, then keep it and don't report the error: the fact that the + // version is invalid is likely the reason it was replaced to begin with. + var vErr *module.InvalidVersionError + if errors.As(err, &vErr) && vErr.Version == m.Version && modload.Replacement(m).Path != "" { + return m, nil + } + + // Special case: if the error is "no matching versions" then don't + // even report the error. Because Query does not consider pseudo-versions, + // it may happen that we have a pseudo-version but during -u=patch + // the query v0.0 matches no versions (not even the one we're using). + var noMatch *modload.NoMatchingVersionError + if !errors.As(err, &noMatch) { + base.Errorf("go get: upgrading %s@%s: %v", m.Path, m.Version, err) + } + return m, nil + } + + if info.Version != m.Version { + logOncef("go: %s %s => %s", m.Path, getU, info.Version) + } + return module.Version{Path: m.Path, Version: info.Version}, nil +} + +// buildListForLostUpgrade returns the build list for the module graph +// rooted at lost. Unlike mvs.BuildList, the target module (lost) is not +// treated specially. The returned build list may contain a newer version +// of lost. +// +// buildListForLostUpgrade is used after a downgrade has removed a module +// requested at a specific version. This helps us understand the requirements +// implied by each downgrade. +func buildListForLostUpgrade(lost module.Version, reqs mvs.Reqs) ([]module.Version, error) { + return mvs.BuildList(lostUpgradeRoot, &lostUpgradeReqs{Reqs: reqs, lost: lost}) +} + +var lostUpgradeRoot = module.Version{Path: "lost-upgrade-root", Version: ""} + +type lostUpgradeReqs struct { + mvs.Reqs + lost module.Version +} + +func (r *lostUpgradeReqs) Required(mod module.Version) ([]module.Version, error) { + if mod == lostUpgradeRoot { + return []module.Version{r.lost}, nil + } + return r.Reqs.Required(mod) +} -- cgit v1.2.3-54-g00ecf From 564b350c08a1906e8f6a876fef4cca71f6516d4c Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Mon, 27 Jul 2020 12:57:36 -0400 Subject: cmd/go/internal/modload: rename LoadBuildList and BuildList MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With lazy loading, the “build list” can be refined as packages are loaded. Rename functions that return the build list to more precisely describe the set of modules returned by the call. Also eliminate a redundant call to LoadBuildList (right before ListModules, which itself begins with the same call). For #36460 Change-Id: I0fc4f9dd7602e0df5e166e329ee5d516d810ca53 Reviewed-on: https://go-review.googlesource.com/c/go/+/249878 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/list/list.go | 2 -- src/cmd/go/internal/modcmd/graph.go | 2 +- src/cmd/go/internal/modcmd/vendor.go | 2 +- src/cmd/go/internal/modcmd/verify.go | 2 +- src/cmd/go/internal/modget/get.go | 10 +++---- src/cmd/go/internal/modload/build.go | 2 +- src/cmd/go/internal/modload/buildlist.go | 47 ++++++++++++++++++-------------- src/cmd/go/internal/modload/list.go | 2 +- 8 files changed, 36 insertions(+), 33 deletions(-) diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index 65003dc883..23500dd9d8 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -437,8 +437,6 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } - modload.LoadBuildList(ctx) - mods := modload.ListModules(ctx, args, *listU, *listVersions, *listRetracted) if !*listE { for _, m := range mods { diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 6da12b9cab..513536a010 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -48,7 +48,7 @@ func runGraph(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: cannot find main module; see 'go help modules'") } } - modload.LoadBuildList(ctx) + modload.LoadAllModules(ctx) reqs := modload.MinReqs() format := func(m module.Version) string { diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index e5353b5c7f..30334f3a42 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -77,7 +77,7 @@ func runVendor(ctx context.Context, cmd *base.Command, args []string) { } var buf bytes.Buffer - for _, m := range modload.BuildList()[1:] { + for _, m := range modload.LoadedModules()[1:] { if pkgs := modpkgs[m]; len(pkgs) > 0 || isExplicit[m] { line := moduleLine(m, modload.Replacement(m)) buf.WriteString(line) diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index 73ab714d10..d542825823 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -60,7 +60,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { sem := make(chan token, runtime.GOMAXPROCS(0)) // Use a slice of result channels, so that the output is deterministic. - mods := modload.LoadBuildList(ctx)[1:] + mods := modload.LoadAllModules(ctx)[1:] errsChans := make([]<-chan []error, len(mods)) for i, mod := range mods { diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index cf9ad66b3d..a2a8287d84 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -278,7 +278,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { } modload.LoadTests = *getT - buildList := modload.LoadBuildList(ctx) + buildList := modload.LoadAllModules(ctx) buildList = buildList[:len(buildList):len(buildList)] // copy on append versionByPath := make(map[string]string) for _, m := range buildList { @@ -599,7 +599,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { base.ExitIfErrors() // Stop if no changes have been made to the build list. - buildList = modload.BuildList() + buildList = modload.LoadedModules() eq := len(buildList) == len(prevBuildList) for i := 0; eq && i < len(buildList); i++ { eq = buildList[i] == prevBuildList[i] @@ -617,7 +617,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // Handle downgrades. var down []module.Version - for _, m := range modload.BuildList() { + for _, m := range modload.LoadedModules() { q := byPath[m.Path] if q != nil && semver.Compare(m.Version, q.m.Version) > 0 { down = append(down, module.Version{Path: m.Path, Version: q.m.Version}) @@ -641,7 +641,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { var lostUpgrades []*query if len(down) > 0 { versionByPath = make(map[string]string) - for _, m := range modload.BuildList() { + for _, m := range modload.LoadedModules() { versionByPath[m.Path] = m.Version } for _, q := range byPath { @@ -892,7 +892,7 @@ func reportRetractions(ctx context.Context) { // Use modload.ListModules, since that provides information in the same format // as 'go list -m'. Don't query for "all", since that's not allowed outside a // module. - buildList := modload.BuildList() + buildList := modload.LoadedModules() args := make([]string, 0, len(buildList)) for _, m := range buildList { if m.Version == "" { diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index e9f9a82fab..9ca6230500 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -76,7 +76,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { return moduleInfo(ctx, m, fromBuildList, listRetracted) } - for _, m := range BuildList() { + for _, m := range LoadedModules() { if m.Path == path { fromBuildList := true return moduleInfo(ctx, m, fromBuildList, listRetracted) diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 2302b044e8..581a1b944a 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -27,34 +27,28 @@ import ( // var buildList []module.Version -// LoadBuildList loads and returns the build list from go.mod. -// The loading of the build list happens automatically in ImportPaths: -// LoadBuildList need only be called if ImportPaths is not -// (typically in commands that care about the module but -// no particular package). -func LoadBuildList(ctx context.Context) []module.Version { +// LoadAllModules loads and returns the list of modules matching the "all" +// module pattern, starting with the Target module and in a deterministic +// (stable) order, without loading any packages. +// +// Modules are loaded automatically (and lazily) in ImportPaths: +// LoadAllModules need only be called if ImportPaths is not, +// typically in commands that care about modules but no particular package. +// +// The caller must not modify the returned list. +func LoadAllModules(ctx context.Context) []module.Version { InitMod(ctx) ReloadBuildList() WriteGoMod() return buildList } -// ReloadBuildList resets the state of loaded packages, then loads and returns -// the build list set in SetBuildList. -func ReloadBuildList() []module.Version { - loaded = loadFromRoots(loaderParams{ - tags: imports.Tags(), - listRoots: func() []string { return nil }, - allClosesOverTests: index.allPatternClosesOverTests(), // but doesn't matter because the root list is empty. - }) - return buildList -} - -// BuildList returns the module build list, -// typically constructed by a previous call to -// LoadBuildList or ImportPaths. +// LoadedModules returns the list of module requirements loaded or set by a +// previous call (typically LoadAllModules or ImportPaths), starting with the +// Target module and in a deterministic (stable) order. +// // The caller must not modify the returned list. -func BuildList() []module.Version { +func LoadedModules() []module.Version { return buildList } @@ -65,6 +59,17 @@ func SetBuildList(list []module.Version) { buildList = append([]module.Version{}, list...) } +// ReloadBuildList resets the state of loaded packages, then loads and returns +// the build list set in SetBuildList. +func ReloadBuildList() []module.Version { + loaded = loadFromRoots(loaderParams{ + tags: imports.Tags(), + listRoots: func() []string { return nil }, + allClosesOverTests: index.allPatternClosesOverTests(), // but doesn't matter because the root list is empty. + }) + return buildList +} + // TidyBuildList trims the build list to the minimal requirements needed to // retain the same versions of all packages from the preceding Load* or // ImportPaths* call. diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 8c7b9a3950..3491f941cd 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -58,7 +58,7 @@ func ListModules(ctx context.Context, args []string, listU, listVersions, listRe } func listModules(ctx context.Context, args []string, listVersions, listRetracted bool) []*modinfo.ModulePublic { - LoadBuildList(ctx) + LoadAllModules(ctx) if len(args) == 0 { return []*modinfo.ModulePublic{moduleInfo(ctx, buildList[0], true, listRetracted)} } -- cgit v1.2.3-54-g00ecf From aa476ba6f43ebc4e7ddb6599a7ad35d9fbf1ec6d Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 1 Sep 2020 00:34:03 -0400 Subject: cmd/go/internal/modload: refactor pathInModuleCache I found the control flow of this function a bit tricky to reason about due to nesting and interaction between conditions and iteration. This change factors out a helper function that can return early instead of mixing conditionals and 'continue' statements. Also remove the (unused) ModuleUsedDirectly function. For #36460 Change-Id: I60a2a5a1b32989e5a17a14e1a8c858b280cda8f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/251998 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/load.go | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 6050646594..1664d8c5be 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -374,7 +374,7 @@ var ( // pathInModuleCache returns the import path of the directory dir, // if dir is in the module cache copy of a module in our build list. func pathInModuleCache(dir string) string { - for _, m := range buildList[1:] { + tryMod := func(m module.Version) (string, bool) { var root string var err error if repl := Replacement(m); repl.Path != "" && repl.Version == "" { @@ -388,13 +388,26 @@ func pathInModuleCache(dir string) string { root, err = modfetch.DownloadDir(m) } if err != nil { - continue + return "", false } - if sub := search.InDir(dir, root); sub != "" { - sub = filepath.ToSlash(sub) - if !strings.Contains(sub, "/vendor/") && !strings.HasPrefix(sub, "vendor/") && !strings.Contains(sub, "@") { - return path.Join(m.Path, filepath.ToSlash(sub)) - } + + sub := search.InDir(dir, root) + if sub == "" { + return "", false + } + sub = filepath.ToSlash(sub) + if strings.Contains(sub, "/vendor/") || strings.HasPrefix(sub, "vendor/") || strings.Contains(sub, "@") { + return "", false + } + + return path.Join(m.Path, filepath.ToSlash(sub)), true + } + + for _, m := range buildList[1:] { + if importPath, ok := tryMod(m); ok { + // checkMultiplePaths ensures that a module can be used for at most one + // requirement, so this must be it. + return importPath } } return "" @@ -568,12 +581,6 @@ func PackageImports(path string) (imports, testImports []string) { return imports, testImports } -// ModuleUsedDirectly reports whether the main module directly imports -// some package in the module with the given path. -func ModuleUsedDirectly(path string) bool { - return loaded.direct[path] -} - // Lookup returns the source directory, import path, and any loading error for // the package at path as imported from the package in parentDir. // Lookup requires that one of the Load functions in this package has already -- cgit v1.2.3-54-g00ecf From dfdc3880b01d46d1d8125ab9eea0606b2fa5b819 Mon Sep 17 00:00:00 2001 From: fanzha02 Date: Thu, 20 Aug 2020 17:02:18 +0800 Subject: cmd/internal/obj/arm64: enable some SIMD instructions Enable VBSL, VBIT, VCMTST, VUXTL VUXTL2 and FMOVQ SIMD instructions required by the issue #40725. And FMOVQ instrucion is used to move a large constant to a Vn register. Add test cases. Fixes #40725 Change-Id: I1cac1922a0a0165d698a4b73a41f7a5f0a0ad549 Reviewed-on: https://go-review.googlesource.com/c/go/+/249758 Reviewed-by: Cherry Zhang --- src/cmd/asm/internal/asm/testdata/arm64.s | 15 +++ src/cmd/asm/internal/asm/testdata/arm64error.s | 5 + src/cmd/internal/obj/arm64/a.out.go | 6 ++ src/cmd/internal/obj/arm64/anames.go | 6 ++ src/cmd/internal/obj/arm64/asm7.go | 121 ++++++++++++++++++++++--- 5 files changed, 139 insertions(+), 14 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index f0c716a2b5..451ca749ba 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -145,6 +145,17 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VZIP2 V10.D2, V13.D2, V3.D2 // a379ca4e VZIP1 V17.S2, V4.S2, V26.S2 // 9a38910e VZIP2 V25.S2, V14.S2, V25.S2 // d979990e + VUXTL V30.B8, V30.H8 // dea7082f + VUXTL V30.H4, V29.S4 // dda7102f + VUXTL V29.S2, V2.D2 // a2a7202f + VUXTL2 V30.H8, V30.S4 // dea7106f + VUXTL2 V29.S4, V2.D2 // a2a7206f + VUXTL2 V30.B16, V2.H8 // c2a7086f + VBIT V21.B16, V25.B16, V4.B16 // 241fb56e + VBSL V23.B16, V3.B16, V7.B16 // 671c776e + VCMTST V2.B8, V29.B8, V2.B8 // a28f220e + VCMTST V2.D2, V23.D2, V3.D2 // e38ee24e + VSUB V2.B8, V30.B8, V30.B8 // de87222e MOVD (R2)(R6.SXTW), R4 // 44c866f8 MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8 MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8 @@ -186,6 +197,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVS $(0.96875), F3 // 03f02d1e FMOVD $(28.0), F4 // 0490671e +// move a large constant to a Vd. + FMOVD $0x8040201008040201, V20 // FMOVD $-9205322385119247871, V20 + FMOVQ $0x8040201008040202, V29 // FMOVQ $-9205322385119247870, V29 + FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc FMOVS (R2)(R6<<2), F4 // 447866bc FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index 9f377817a9..2a911b4cce 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -340,4 +340,9 @@ TEXT errors(SB),$0 MRS PMSWINC_EL0, R3 // ERROR "system register is not readable" MRS OSLAR_EL1, R3 // ERROR "system register is not readable" VLD3R.P 24(R15), [V15.H4,V16.H4,V17.H4] // ERROR "invalid post-increment offset" + VBIT V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement" + VBSL V1.D2, V12.D2, V3.D2 // ERROR "invalid arrangement" + VUXTL V30.D2, V30.H8 // ERROR "operand mismatch" + VUXTL2 V20.B8, V21.H8 // ERROR "operand mismatch" + VUXTL V3.D2, V4.B8 // ERROR "operand mismatch" RET diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index 03e0278a33..ab065e07e5 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -874,6 +874,7 @@ const ( AFLDPS AFMOVD AFMOVS + AFMOVQ AFMULD AFMULS AFNEGD @@ -987,9 +988,14 @@ const ( AVUSHR AVSHL AVSRI + AVBSL + AVBIT AVTBL AVZIP1 AVZIP2 + AVCMTST + AVUXTL + AVUXTL2 ALAST AB = obj.AJMP ABL = obj.ACALL diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 65ecd007ea..8961f04b0c 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -381,6 +381,7 @@ var Anames = []string{ "FLDPS", "FMOVD", "FMOVS", + "FMOVQ", "FMULD", "FMULS", "FNEGD", @@ -494,8 +495,13 @@ var Anames = []string{ "VUSHR", "VSHL", "VSRI", + "VBSL", + "VBIT", "VTBL", "VZIP1", "VZIP2", + "VCMTST", + "VUXTL", + "VUXTL2", "LAST", } diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 0b90e31392..7ce18d0f13 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -393,6 +393,11 @@ var optab = []Optab{ {AMOVK, C_VCON, C_NONE, C_NONE, C_REG, 33, 4, 0, 0, 0}, {AMOVD, C_AACON, C_NONE, C_NONE, C_REG, 4, 4, REGFROM, 0, 0}, + // Move a large constant to a Vn. + {AFMOVQ, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + {AFMOVD, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + {AFMOVS, C_LCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + /* jump operations */ {AB, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, {ABL, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, @@ -403,12 +408,14 @@ var optab = []Optab{ {obj.ARET, C_NONE, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0}, {obj.ARET, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0}, {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 7, 4, 0, 0, 0}, - {AADRP, C_SBRA, C_NONE, C_NONE, C_REG, 60, 4, 0, 0, 0}, - {AADR, C_SBRA, C_NONE, C_NONE, C_REG, 61, 4, 0, 0, 0}, {ACBZ, C_REG, C_NONE, C_NONE, C_SBRA, 39, 4, 0, 0, 0}, {ATBZ, C_VCON, C_REG, C_NONE, C_SBRA, 40, 4, 0, 0, 0}, {AERET, C_NONE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0}, + // get a PC-relative address + {AADRP, C_SBRA, C_NONE, C_NONE, C_REG, 60, 4, 0, 0, 0}, + {AADR, C_SBRA, C_NONE, C_NONE, C_REG, 61, 4, 0, 0, 0}, + {ACLREX, C_NONE, C_NONE, C_NONE, C_VCON, 38, 4, 0, 0, 0}, {ACLREX, C_NONE, C_NONE, C_NONE, C_NONE, 38, 4, 0, 0, 0}, {ABFM, C_VCON, C_REG, C_VCON, C_REG, 42, 4, 0, 0, 0}, @@ -473,6 +480,7 @@ var optab = []Optab{ {AVTBL, C_ARNG, C_NONE, C_LIST, C_ARNG, 100, 4, 0, 0, 0}, {AVUSHR, C_VCON, C_ARNG, C_NONE, C_ARNG, 95, 4, 0, 0, 0}, {AVZIP1, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, + {AVUXTL, C_ARNG, C_NONE, C_NONE, C_ARNG, 102, 4, 0, 0, 0}, /* conditional operations */ {ACSEL, C_COND, C_REG, C_REG, C_REG, 18, 4, 0, 0, 0}, @@ -2657,7 +2665,7 @@ func buildop(ctxt *obj.Link) { case AFCSELD: oprangeset(AFCSELS, t) - case AFMOVS, AFMOVD: + case AFMOVS, AFMOVD, AFMOVQ: break case AFCVTZSD: @@ -2740,6 +2748,9 @@ func buildop(ctxt *obj.Link) { oprangeset(AVCMEQ, t) oprangeset(AVORR, t) oprangeset(AVEOR, t) + oprangeset(AVBSL, t) + oprangeset(AVBIT, t) + oprangeset(AVCMTST, t) case AVADD: oprangeset(AVSUB, t) @@ -2787,6 +2798,9 @@ func buildop(ctxt *obj.Link) { case AVZIP1: oprangeset(AVZIP2, t) + case AVUXTL: + oprangeset(AVUXTL2, t) + case AVLD1R: oprangeset(AVLD2, t) oprangeset(AVLD2R, t) @@ -4163,7 +4177,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Add = 0 rel.Type = objabi.R_ARM64_GOTPCREL - case 72: /* vaddp/vand/vcmeq/vorr/vadd/veor/vfmla/vfmls Vm., Vn., Vd. */ + case 72: /* vaddp/vand/vcmeq/vorr/vadd/veor/vfmla/vfmls/vbit/vbsl/vcmtst/vsub Vm., Vn., Vd. */ af := int((p.From.Reg >> 5) & 15) af3 := int((p.Reg >> 5) & 15) at := int((p.To.Reg >> 5) & 15) @@ -4204,17 +4218,24 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { c.ctxt.Diag("invalid arrangement: %v", p) } - if (p.As == AVORR || p.As == AVAND || p.As == AVEOR) && - (af != ARNG_16B && af != ARNG_8B) { - c.ctxt.Diag("invalid arrangement: %v", p) - } else if (p.As == AVFMLA || p.As == AVFMLS) && - (af != ARNG_2D && af != ARNG_2S && af != ARNG_4S) { - c.ctxt.Diag("invalid arrangement: %v", p) - } else if p.As == AVORR { - size = 2 - } else if p.As == AVAND || p.As == AVEOR { + switch p.As { + case AVORR, AVAND, AVEOR, AVBIT, AVBSL: + if af != ARNG_16B && af != ARNG_8B { + c.ctxt.Diag("invalid arrangement: %v", p) + } + case AVFMLA, AVFMLS: + if af != ARNG_2D && af != ARNG_2S && af != ARNG_4S { + c.ctxt.Diag("invalid arrangement: %v", p) + } + } + switch p.As { + case AVAND, AVEOR: size = 0 - } else if p.As == AVFMLA || p.As == AVFMLS { + case AVBSL: + size = 1 + case AVORR, AVBIT: + size = 2 + case AVFMLA, AVFMLS: if af == ARNG_2D { size = 1 } else { @@ -5096,6 +5117,59 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = q<<30 | 0xe<<24 | len<<13 o1 |= (uint32(rf&31) << 16) | uint32(offset&31)<<5 | uint32(rt&31) + case 101: // FOMVQ/FMOVD $vcon, Vd -> load from constant pool. + o1 = c.omovlit(p.As, p, &p.From, int(p.To.Reg)) + + case 102: // VUXTL{2} Vn., Vd. + af := int((p.From.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + var Q, immh uint32 + switch at { + case ARNG_8H: + if af == ARNG_8B { + immh = 1 + Q = 0 + } else if af == ARNG_16B { + immh = 1 + Q = 1 + } else { + c.ctxt.Diag("operand mismatch: %v\n", p) + } + case ARNG_4S: + if af == ARNG_4H { + immh = 2 + Q = 0 + } else if af == ARNG_8H { + immh = 2 + Q = 1 + } else { + c.ctxt.Diag("operand mismatch: %v\n", p) + } + case ARNG_2D: + if af == ARNG_2S { + immh = 4 + Q = 0 + } else if af == ARNG_4S { + immh = 4 + Q = 1 + } else { + c.ctxt.Diag("operand mismatch: %v\n", p) + } + default: + c.ctxt.Diag("operand mismatch: %v\n", p) + } + + if p.As == AVUXTL && Q == 1 { + c.ctxt.Diag("operand mismatch: %v\n", p) + } + if p.As == AVUXTL2 && Q == 0 { + c.ctxt.Diag("operand mismatch: %v\n", p) + } + + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + o1 |= Q<<30 | immh<<19 | uint32((rf&31)<<5) | uint32(rt&31) } out[0] = o1 out[1] = o2 @@ -5662,6 +5736,9 @@ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { case AVADD: return 7<<25 | 1<<21 | 1<<15 | 1<<10 + case AVSUB: + return 0x17<<25 | 1<<21 | 1<<15 | 1<<10 + case AVADDP: return 7<<25 | 1<<21 | 1<<15 | 15<<10 @@ -5724,6 +5801,18 @@ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { case AVLD2R, AVLD4R: return 0xD<<24 | 3<<21 + + case AVBIT: + return 1<<29 | 0x75<<21 | 7<<10 + + case AVBSL: + return 1<<29 | 0x73<<21 | 7<<10 + + case AVCMTST: + return 0xE<<24 | 1<<21 | 0x23<<10 + + case AVUXTL, AVUXTL2: + return 0x5e<<23 | 0x29<<10 } c.ctxt.Diag("%v: bad rrr %d %v", p, a, a) @@ -6566,6 +6655,10 @@ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { fp = 1 w = 1 /* 64-bit SIMD/FP */ + case AFMOVQ: + fp = 1 + w = 2 /* 128-bit SIMD/FP */ + case AMOVD: if p.Pool.As == ADWORD { w = 1 /* 64-bit */ -- cgit v1.2.3-54-g00ecf From 07d19b2597af253ed78ef43ba6e7a49db9a8f4ba Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 10 Sep 2020 13:21:41 +0700 Subject: all: check GOROOT_BOOTSTRAP executable before bootsrappping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we’d still be writing that we're building cmd/dist even if GOROOT_BOOTSTRAP we’re wrongly set. Change-Id: I940e18c3cebae5664f85babc9919e9eb215d5093 Reviewed-on: https://go-review.googlesource.com/c/go/+/253877 Run-TryBot: Cuong Manh Le TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke --- src/make.bash | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/make.bash b/src/make.bash index 880a0f43d5..3a84658c72 100755 --- a/src/make.bash +++ b/src/make.bash @@ -162,16 +162,16 @@ IFS=$'\n'; for go_exe in $(type -ap go); do fi fi done; unset IFS -GOROOT_BOOTSTRAP_VERSION=$($GOROOT_BOOTSTRAP/bin/go version | sed 's/go version //') -echo "Building Go cmd/dist using $GOROOT_BOOTSTRAP. ($GOROOT_BOOTSTRAP_VERSION)" -if $verbose; then - echo cmd/dist -fi if [ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]; then echo "ERROR: Cannot find $GOROOT_BOOTSTRAP/bin/go." >&2 echo "Set \$GOROOT_BOOTSTRAP to a working Go tree >= Go 1.4." >&2 exit 1 fi +GOROOT_BOOTSTRAP_VERSION=$($GOROOT_BOOTSTRAP/bin/go version | sed 's/go version //') +echo "Building Go cmd/dist using $GOROOT_BOOTSTRAP. ($GOROOT_BOOTSTRAP_VERSION)" +if $verbose; then + echo cmd/dist +fi if [ "$GOROOT_BOOTSTRAP" = "$GOROOT" ]; then echo "ERROR: \$GOROOT_BOOTSTRAP must not be set to \$GOROOT" >&2 echo "Set \$GOROOT_BOOTSTRAP to a working Go tree >= Go 1.4." >&2 -- cgit v1.2.3-54-g00ecf From 8098dbb30e3d0d0b4d467f823c4bbdb8dcefc92f Mon Sep 17 00:00:00 2001 From: ipriver Date: Thu, 10 Sep 2020 10:08:21 +0000 Subject: runtime: update docs for GOMAXPROCS https://github.com/golang/go/blob/master/doc/effective_go.html#L3211 is used to update the docs comment for `GOMAXPROCS` function. Fixes #41275 Change-Id: I39f58e93a267c6e9f3ac6638ed51acbe5284ada2 GitHub-Last-Rev: e45c8ac5873979397d747838fd8d41e252aec489 GitHub-Pull-Request: golang/go#41276 Reviewed-on: https://go-review.googlesource.com/c/go/+/253537 Reviewed-by: Keith Randall --- src/runtime/debug.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/runtime/debug.go b/src/runtime/debug.go index 76eeb2e41a..f411b22676 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -10,9 +10,8 @@ import ( ) // GOMAXPROCS sets the maximum number of CPUs that can be executing -// simultaneously and returns the previous setting. If n < 1, it does not -// change the current setting. -// The number of logical CPUs on the local machine can be queried with NumCPU. +// simultaneously and returns the previous setting. It defaults to +// the value of runtime.NumCPU. If n < 1, it does not change the current setting. // This call will go away when the scheduler improves. func GOMAXPROCS(n int) int { if GOARCH == "wasm" && n > 1 { -- cgit v1.2.3-54-g00ecf From 9b2df72b63ff977004756e9b847f926b4fb8d8a8 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 10 Sep 2020 11:07:48 -0400 Subject: cmd/link: add copyright header Change-Id: I44f57019bb8e659d4aa3da8b13e8bd9a20b9d2e1 Reviewed-on: https://go-review.googlesource.com/c/go/+/253920 Reviewed-by: Than McIntosh --- src/cmd/link/link_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 72ff01c932..98798be465 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package main import ( -- cgit v1.2.3-54-g00ecf From d7ab277eed4d2e5ede4f3361adf42d4ad76ced8f Mon Sep 17 00:00:00 2001 From: Junchen Li Date: Mon, 31 Aug 2020 13:32:33 +0800 Subject: cmd/asm: add more SIMD instructions on arm64 This CL adds USHLL, USHLL2, UZP1, UZP2, and BIF instructions requested by #40725. And since UXTL* are aliases of USHLL*, this CL also merges them into one case. Updates #40725 Change-Id: I404a4fdaf953319f72eea548175bec1097a2a816 Reviewed-on: https://go-review.googlesource.com/c/go/+/253659 Reviewed-by: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot --- src/cmd/asm/internal/asm/testdata/arm64.s | 20 +++++ src/cmd/asm/internal/asm/testdata/arm64error.s | 8 ++ src/cmd/internal/obj/arm64/a.out.go | 9 +- src/cmd/internal/obj/arm64/anames.go | 9 +- src/cmd/internal/obj/arm64/asm7.go | 109 +++++++++++++------------ 5 files changed, 100 insertions(+), 55 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 451ca749ba..e106ff2ae1 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -156,6 +156,26 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VCMTST V2.B8, V29.B8, V2.B8 // a28f220e VCMTST V2.D2, V23.D2, V3.D2 // e38ee24e VSUB V2.B8, V30.B8, V30.B8 // de87222e + VUZP1 V0.B8, V30.B8, V1.B8 // c11b000e + VUZP1 V1.B16, V29.B16, V2.B16 // a21b014e + VUZP1 V2.H4, V28.H4, V3.H4 // 831b420e + VUZP1 V3.H8, V27.H8, V4.H8 // 641b434e + VUZP1 V28.S2, V2.S2, V5.S2 // 45189c0e + VUZP1 V29.S4, V1.S4, V6.S4 // 26189d4e + VUZP1 V30.D2, V0.D2, V7.D2 // 0718de4e + VUZP2 V0.D2, V30.D2, V1.D2 // c15bc04e + VUZP2 V30.D2, V0.D2, V29.D2 // 1d58de4e + VUSHLL $0, V30.B8, V30.H8 // dea7082f + VUSHLL $0, V30.H4, V29.S4 // dda7102f + VUSHLL $0, V29.S2, V2.D2 // a2a7202f + VUSHLL2 $0, V30.B16, V2.H8 // c2a7086f + VUSHLL2 $0, V30.H8, V30.S4 // dea7106f + VUSHLL2 $0, V29.S4, V2.D2 // a2a7206f + VUSHLL $7, V30.B8, V30.H8 // dea70f2f + VUSHLL $15, V30.H4, V29.S4 // dda71f2f + VUSHLL2 $31, V30.S4, V2.D2 // c2a73f6f + VBIF V0.B8, V30.B8, V1.B8 // c11fe02e + VBIF V30.B16, V0.B16, V2.B16 // 021cfe6e MOVD (R2)(R6.SXTW), R4 // 44c866f8 MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8 MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8 diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index 2a911b4cce..20b1f3e9f0 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -345,4 +345,12 @@ TEXT errors(SB),$0 VUXTL V30.D2, V30.H8 // ERROR "operand mismatch" VUXTL2 V20.B8, V21.H8 // ERROR "operand mismatch" VUXTL V3.D2, V4.B8 // ERROR "operand mismatch" + VUZP1 V0.B8, V30.B8, V1.B16 // ERROR "operand mismatch" + VUZP2 V0.Q1, V30.Q1, V1.Q1 // ERROR "invalid arrangement" + VUSHLL $0, V30.D2, V30.H8 // ERROR "operand mismatch" + VUSHLL2 $0, V20.B8, V21.H8 // ERROR "operand mismatch" + VUSHLL $8, V30.B8, V30.H8 // ERROR "shift amount out of range" + VUSHLL2 $32, V30.S4, V2.D2 // ERROR "shift amount out of range" + VBIF V0.B8, V1.B8, V2.B16 // ERROR "operand mismatch" + VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement" RET diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index ab065e07e5..2839da1437 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -954,6 +954,7 @@ const ( AVADD AVADDP AVAND + AVBIF AVCMEQ AVCNT AVEOR @@ -986,6 +987,12 @@ const ( AVEXT AVRBIT AVUSHR + AVUSHLL + AVUSHLL2 + AVUXTL + AVUXTL2 + AVUZP1 + AVUZP2 AVSHL AVSRI AVBSL @@ -994,8 +1001,6 @@ const ( AVZIP1 AVZIP2 AVCMTST - AVUXTL - AVUXTL2 ALAST AB = obj.AJMP ABL = obj.ACALL diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 8961f04b0c..48c066abfd 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -461,6 +461,7 @@ var Anames = []string{ "VADD", "VADDP", "VAND", + "VBIF", "VCMEQ", "VCNT", "VEOR", @@ -493,6 +494,12 @@ var Anames = []string{ "VEXT", "VRBIT", "VUSHR", + "VUSHLL", + "VUSHLL2", + "VUXTL", + "VUXTL2", + "VUZP1", + "VUZP2", "VSHL", "VSRI", "VBSL", @@ -501,7 +508,5 @@ var Anames = []string{ "VZIP1", "VZIP2", "VCMTST", - "VUXTL", - "VUXTL2", "LAST", } diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 7ce18d0f13..df4bbbbd35 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -480,6 +480,7 @@ var optab = []Optab{ {AVTBL, C_ARNG, C_NONE, C_LIST, C_ARNG, 100, 4, 0, 0, 0}, {AVUSHR, C_VCON, C_ARNG, C_NONE, C_ARNG, 95, 4, 0, 0, 0}, {AVZIP1, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, + {AVUSHLL, C_VCON, C_ARNG, C_NONE, C_ARNG, 102, 4, 0, 0, 0}, {AVUXTL, C_ARNG, C_NONE, C_NONE, C_ARNG, 102, 4, 0, 0, 0}, /* conditional operations */ @@ -2751,6 +2752,9 @@ func buildop(ctxt *obj.Link) { oprangeset(AVBSL, t) oprangeset(AVBIT, t) oprangeset(AVCMTST, t) + oprangeset(AVUZP1, t) + oprangeset(AVUZP2, t) + oprangeset(AVBIF, t) case AVADD: oprangeset(AVSUB, t) @@ -2801,6 +2805,9 @@ func buildop(ctxt *obj.Link) { case AVUXTL: oprangeset(AVUXTL2, t) + case AVUSHLL: + oprangeset(AVUSHLL2, t) + case AVLD1R: oprangeset(AVLD2, t) oprangeset(AVLD2R, t) @@ -4177,7 +4184,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Add = 0 rel.Type = objabi.R_ARM64_GOTPCREL - case 72: /* vaddp/vand/vcmeq/vorr/vadd/veor/vfmla/vfmls/vbit/vbsl/vcmtst/vsub Vm., Vn., Vd. */ + case 72: /* vaddp/vand/vcmeq/vorr/vadd/veor/vfmla/vfmls/vbit/vbsl/vcmtst/vsub/vbif/vuzip1/vuzip2 Vm., Vn., Vd. */ af := int((p.From.Reg >> 5) & 15) af3 := int((p.Reg >> 5) & 15) at := int((p.To.Reg >> 5) & 15) @@ -4219,7 +4226,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } switch p.As { - case AVORR, AVAND, AVEOR, AVBIT, AVBSL: + case AVORR, AVAND, AVEOR, AVBIT, AVBSL, AVBIF: if af != ARNG_16B && af != ARNG_8B { c.ctxt.Diag("invalid arrangement: %v", p) } @@ -4233,7 +4240,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { size = 0 case AVBSL: size = 1 - case AVORR, AVBIT: + case AVORR, AVBIT, AVBIF: size = 2 case AVFMLA, AVFMLS: if af == ARNG_2D { @@ -5120,56 +5127,44 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { case 101: // FOMVQ/FMOVD $vcon, Vd -> load from constant pool. o1 = c.omovlit(p.As, p, &p.From, int(p.To.Reg)) - case 102: // VUXTL{2} Vn., Vd. - af := int((p.From.Reg >> 5) & 15) - at := int((p.To.Reg >> 5) & 15) - var Q, immh uint32 - switch at { - case ARNG_8H: - if af == ARNG_8B { - immh = 1 - Q = 0 - } else if af == ARNG_16B { - immh = 1 - Q = 1 - } else { - c.ctxt.Diag("operand mismatch: %v\n", p) - } - case ARNG_4S: - if af == ARNG_4H { - immh = 2 - Q = 0 - } else if af == ARNG_8H { - immh = 2 - Q = 1 - } else { - c.ctxt.Diag("operand mismatch: %v\n", p) - } - case ARNG_2D: - if af == ARNG_2S { - immh = 4 - Q = 0 - } else if af == ARNG_4S { - immh = 4 - Q = 1 - } else { - c.ctxt.Diag("operand mismatch: %v\n", p) - } + case 102: /* vushll, vushll2, vuxtl, vuxtl2 */ + o1 = c.opirr(p, p.As) + rf := p.Reg + af := uint8((p.Reg >> 5) & 15) + at := uint8((p.To.Reg >> 5) & 15) + shift := int(p.From.Offset) + if p.As == AVUXTL || p.As == AVUXTL2 { + rf = p.From.Reg + af = uint8((p.From.Reg >> 5) & 15) + shift = 0 + } + + pack := func(q, x, y uint8) uint32 { + return uint32(q)<<16 | uint32(x)<<8 | uint32(y) + } + + var Q uint8 = uint8(o1>>30) & 1 + var immh, width uint8 + switch pack(Q, af, at) { + case pack(0, ARNG_8B, ARNG_8H): + immh, width = 1, 8 + case pack(1, ARNG_16B, ARNG_8H): + immh, width = 1, 8 + case pack(0, ARNG_4H, ARNG_4S): + immh, width = 2, 16 + case pack(1, ARNG_8H, ARNG_4S): + immh, width = 2, 16 + case pack(0, ARNG_2S, ARNG_2D): + immh, width = 4, 32 + case pack(1, ARNG_4S, ARNG_2D): + immh, width = 4, 32 default: c.ctxt.Diag("operand mismatch: %v\n", p) } - - if p.As == AVUXTL && Q == 1 { - c.ctxt.Diag("operand mismatch: %v\n", p) + if !(0 <= shift && shift <= int(width-1)) { + c.ctxt.Diag("shift amount out of range: %v\n", p) } - if p.As == AVUXTL2 && Q == 0 { - c.ctxt.Diag("operand mismatch: %v\n", p) - } - - o1 = c.oprrr(p, p.As) - rf := int((p.From.Reg) & 31) - rt := int((p.To.Reg) & 31) - o1 |= Q<<30 | immh<<19 | uint32((rf&31)<<5) | uint32(rt&31) + o1 |= uint32(immh)<<19 | uint32(shift)<<16 | uint32(rf&31)<<5 | uint32(p.To.Reg&31) } out[0] = o1 out[1] = o2 @@ -5802,6 +5797,9 @@ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { case AVLD2R, AVLD4R: return 0xD<<24 | 3<<21 + case AVBIF: + return 1<<29 | 7<<25 | 7<<21 | 7<<10 + case AVBIT: return 1<<29 | 0x75<<21 | 7<<10 @@ -5811,8 +5809,11 @@ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { case AVCMTST: return 0xE<<24 | 1<<21 | 0x23<<10 - case AVUXTL, AVUXTL2: - return 0x5e<<23 | 0x29<<10 + case AVUZP1: + return 7<<25 | 3<<11 + + case AVUZP2: + return 7<<25 | 1<<14 | 3<<11 } c.ctxt.Diag("%v: bad rrr %d %v", p, a, a) @@ -6011,6 +6012,12 @@ func (c *ctxt7) opirr(p *obj.Prog, a obj.As) uint32 { case AVSRI: return 0x5E<<23 | 17<<10 + + case AVUSHLL, AVUXTL: + return 1<<29 | 15<<24 | 0x29<<10 + + case AVUSHLL2, AVUXTL2: + return 3<<29 | 15<<24 | 0x29<<10 } c.ctxt.Diag("%v: bad irr %v", p, a) -- cgit v1.2.3-54-g00ecf From a1762c2cc67822d86cb37747a56f0d4a07d24ced Mon Sep 17 00:00:00 2001 From: eric fang Date: Wed, 13 May 2020 06:38:39 +0000 Subject: unicode/utf8: refactor benchmarks for FullRune function BenchmarkFullASCIIRune tests the performance of function utf8.FullRune, which will be inlined in BenchmarkFullASCIIRune. Since the return value of FullRune is not referenced, it will be removed as dead code. This CL makes the FullRune functions return value referenced by a global variable to avoid this point. In addition, this CL adds one more benchmark to cover more code paths, and puts them together as sub benchmarks of BenchmarkFullRune. Change-Id: I6e79f4c087adf70e351498a4b58d7482dcd1ec4a Reviewed-on: https://go-review.googlesource.com/c/go/+/233979 Run-TryBot: eric fang TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/unicode/utf8/utf8_test.go | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/unicode/utf8/utf8_test.go b/src/unicode/utf8/utf8_test.go index 359461bd05..eaf1b5ffee 100644 --- a/src/unicode/utf8/utf8_test.go +++ b/src/unicode/utf8/utf8_test.go @@ -597,16 +597,24 @@ func BenchmarkDecodeJapaneseRune(b *testing.B) { } } -func BenchmarkFullASCIIRune(b *testing.B) { - a := []byte{'a'} - for i := 0; i < b.N; i++ { - FullRune(a) - } -} - -func BenchmarkFullJapaneseRune(b *testing.B) { - nihon := []byte("本") - for i := 0; i < b.N; i++ { - FullRune(nihon) +// boolSink is used to reference the return value of benchmarked +// functions to avoid dead code elimination. +var boolSink bool + +func BenchmarkFullRune(b *testing.B) { + benchmarks := []struct { + name string + data []byte + }{ + {"ASCII", []byte("a")}, + {"Incomplete", []byte("\xf0\x90\x80")}, + {"Japanese", []byte("本")}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + boolSink = FullRune(bm.data) + } + }) } } -- cgit v1.2.3-54-g00ecf From d277a361231485999cc2b7433e3244e559c7d7da Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 10 Sep 2020 17:18:57 -0400 Subject: runtime: remove darwin/arm specifc code That port is gone. Change-Id: I212d435e290d1890d6cd5531be98bb692650595e Reviewed-on: https://go-review.googlesource.com/c/go/+/254077 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/runtime/stack.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 403b3c313e..821c2e8436 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -66,7 +66,7 @@ const ( // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. - _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024 + _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm64*1024 // The minimum size of stack used by Go code _StackMin = 2048 -- cgit v1.2.3-54-g00ecf From 03a686069191e3515c7f27f6d90b66d272e0e3a2 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 9 Sep 2020 15:58:05 -0400 Subject: cmd/go: update tests to work with -mod=readonly on by default For #40728 Change-Id: Ic2b025ff75c6e73c0cb58c1737e44e2a41c71571 Reviewed-on: https://go-review.googlesource.com/c/go/+/253837 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- .../mod/example.com_retract_missingmod_v1.0.0.txt | 2 ++ src/cmd/go/testdata/script/mod_auth.txt | 2 +- src/cmd/go/testdata/script/mod_case.txt | 7 ++++++- src/cmd/go/testdata/script/mod_concurrent.txt | 1 + src/cmd/go/testdata/script/mod_doc.txt | 2 ++ src/cmd/go/testdata/script/mod_domain_root.txt | 2 +- src/cmd/go/testdata/script/mod_download.txt | 2 +- src/cmd/go/testdata/script/mod_download_partial.txt | 9 +++++++-- src/cmd/go/testdata/script/mod_get_incompatible.txt | 2 +- src/cmd/go/testdata/script/mod_get_indirect.txt | 2 +- .../go/testdata/script/mod_get_latest_pseudo.txt | 2 +- .../go/testdata/script/mod_get_trailing_slash.txt | 3 +++ src/cmd/go/testdata/script/mod_import.txt | 2 +- src/cmd/go/testdata/script/mod_in_testdata_dir.txt | 6 +++--- src/cmd/go/testdata/script/mod_init_dep.txt | 21 ++++----------------- .../go/testdata/script/mod_install_versioned.txt | 2 ++ src/cmd/go/testdata/script/mod_internal.txt | 14 ++++++++++---- src/cmd/go/testdata/script/mod_invalid_version.txt | 1 + src/cmd/go/testdata/script/mod_list.txt | 8 ++++---- src/cmd/go/testdata/script/mod_list_dir.txt | 5 +++++ src/cmd/go/testdata/script/mod_list_direct.txt | 2 +- src/cmd/go/testdata/script/mod_list_replace_dir.txt | 12 ++++++++++-- src/cmd/go/testdata/script/mod_list_upgrade.txt | 4 ++++ src/cmd/go/testdata/script/mod_load_badchain.txt | 4 ++-- src/cmd/go/testdata/script/mod_load_badmod.txt | 7 +++---- src/cmd/go/testdata/script/mod_load_badzip.txt | 4 +--- .../testdata/script/mod_missingpkg_prerelease.txt | 2 +- src/cmd/go/testdata/script/mod_modinfo.txt | 1 + src/cmd/go/testdata/script/mod_multirepo.txt | 1 + src/cmd/go/testdata/script/mod_notall.txt | 1 + src/cmd/go/testdata/script/mod_permissions.txt | 2 +- src/cmd/go/testdata/script/mod_query.txt | 10 ++++++++++ src/cmd/go/testdata/script/mod_replace.txt | 4 ++-- src/cmd/go/testdata/script/mod_replace_gopkgin.txt | 1 + src/cmd/go/testdata/script/mod_replace_import.txt | 1 + src/cmd/go/testdata/script/mod_require_exclude.txt | 8 ++++---- src/cmd/go/testdata/script/mod_retention.txt | 6 +++--- src/cmd/go/testdata/script/mod_retract.txt | 5 ++++- src/cmd/go/testdata/script/mod_retract_replace.txt | 14 ++++++++++++-- src/cmd/go/testdata/script/mod_sum_lookup.txt | 5 +++-- src/cmd/go/testdata/script/mod_sumdb_golang.txt | 4 ++-- src/cmd/go/testdata/script/mod_symlink.txt | 5 ++++- src/cmd/go/testdata/script/mod_test.txt | 1 + src/cmd/go/testdata/script/mod_tidy_replace.txt | 1 + src/cmd/go/testdata/script/mod_upgrade_patch.txt | 1 + src/cmd/go/testdata/script/mod_vcs_missing.txt | 4 ++-- src/cmd/go/testdata/script/mod_vendor_build.txt | 3 +++ src/cmd/go/testdata/script/mod_verify.txt | 2 +- src/cmd/go/testdata/script/mod_why.txt | 3 +++ src/cmd/go/testdata/script/modfile_flag.txt | 6 +++--- src/cmd/go/testdata/script/version.txt | 1 + src/cmd/go/testdata/script/version_replace.txt | 2 +- 52 files changed, 146 insertions(+), 76 deletions(-) diff --git a/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt index 2023c7b096..1d8d81071e 100644 --- a/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt +++ b/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt @@ -6,3 +6,5 @@ module example.com/retract/missingmod go 1.14 -- .info -- {"Version":"v1.0.0"} +-- missingmod.go -- +package missingmod diff --git a/src/cmd/go/testdata/script/mod_auth.txt b/src/cmd/go/testdata/script/mod_auth.txt index 5bcbcd1a18..544acbc1f8 100644 --- a/src/cmd/go/testdata/script/mod_auth.txt +++ b/src/cmd/go/testdata/script/mod_auth.txt @@ -7,7 +7,7 @@ env GOSUMDB=off # Without credentials, downloading a module from a path that requires HTTPS # basic auth should fail. env NETRC=$WORK/empty -! go list all +! go mod tidy stderr '^\tserver response: ACCESS DENIED, buddy$' stderr '^\tserver response: File\? What file\?$' diff --git a/src/cmd/go/testdata/script/mod_case.txt b/src/cmd/go/testdata/script/mod_case.txt index ee818c2c07..6f8d869c44 100644 --- a/src/cmd/go/testdata/script/mod_case.txt +++ b/src/cmd/go/testdata/script/mod_case.txt @@ -1,6 +1,6 @@ env GO111MODULE=on -go get rsc.io/QUOTE +go get -d go list -m all stdout '^rsc.io/quote v1.5.2' stdout '^rsc.io/QUOTE v1.5.2' @@ -18,3 +18,8 @@ stdout '!q!u!o!t!e@v1.5.3-!p!r!e' -- go.mod -- module x + +-- use.go -- +package use + +import _ "rsc.io/QUOTE/QUOTE" diff --git a/src/cmd/go/testdata/script/mod_concurrent.txt b/src/cmd/go/testdata/script/mod_concurrent.txt index e03e5e5edb..8c21525158 100644 --- a/src/cmd/go/testdata/script/mod_concurrent.txt +++ b/src/cmd/go/testdata/script/mod_concurrent.txt @@ -1,6 +1,7 @@ env GO111MODULE=on # Concurrent builds should succeed, even if they need to download modules. +go get -d ./x ./y go build ./x & go build ./y wait diff --git a/src/cmd/go/testdata/script/mod_doc.txt b/src/cmd/go/testdata/script/mod_doc.txt index aac3db00be..595ad679fc 100644 --- a/src/cmd/go/testdata/script/mod_doc.txt +++ b/src/cmd/go/testdata/script/mod_doc.txt @@ -1,6 +1,7 @@ # go doc should find module documentation env GO111MODULE=on +env GOFLAGS=-mod=mod [short] skip # Check when module x is inside GOPATH/src. @@ -48,6 +49,7 @@ stderr '^doc: cannot find module providing package example.com/hello: module loo # path used in source code, not to the absolute path relative to GOROOT. cd $GOROOT/src +env GOFLAGS= go doc cryptobyte stdout '// import "golang.org/x/crypto/cryptobyte"' diff --git a/src/cmd/go/testdata/script/mod_domain_root.txt b/src/cmd/go/testdata/script/mod_domain_root.txt index e34cc29fa6..14745b5812 100644 --- a/src/cmd/go/testdata/script/mod_domain_root.txt +++ b/src/cmd/go/testdata/script/mod_domain_root.txt @@ -2,7 +2,7 @@ # (example.com not example.com/something) env GO111MODULE=on -go build +go get -d -- go.mod -- module x diff --git a/src/cmd/go/testdata/script/mod_download.txt b/src/cmd/go/testdata/script/mod_download.txt index 5acb83266b..b9bf67cad5 100644 --- a/src/cmd/go/testdata/script/mod_download.txt +++ b/src/cmd/go/testdata/script/mod_download.txt @@ -46,7 +46,7 @@ go mod edit -require rsc.io/quote@v1.5.3-pre1 ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip # module loading will page in the info and mod files -go list -m all +go list -m -mod=mod all exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.info exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.mod ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.3-pre1.zip diff --git a/src/cmd/go/testdata/script/mod_download_partial.txt b/src/cmd/go/testdata/script/mod_download_partial.txt index 4978982dab..8d31970160 100644 --- a/src/cmd/go/testdata/script/mod_download_partial.txt +++ b/src/cmd/go/testdata/script/mod_download_partial.txt @@ -1,5 +1,5 @@ -# Download a module -go mod download -modcacherw rsc.io/quote +# Download modules and populate go.sum. +go get -d -modcacherw exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2/go.mod # 'go mod verify' should fail if we delete a file. @@ -61,4 +61,9 @@ go 1.14 require rsc.io/quote v1.5.2 +-- use.go -- +package use + +import _ "rsc.io/quote" + -- empty -- diff --git a/src/cmd/go/testdata/script/mod_get_incompatible.txt b/src/cmd/go/testdata/script/mod_get_incompatible.txt index b210715a5d..b28718a694 100644 --- a/src/cmd/go/testdata/script/mod_get_incompatible.txt +++ b/src/cmd/go/testdata/script/mod_get_incompatible.txt @@ -1,6 +1,6 @@ env GO111MODULE=on -go list x +go get -d x go list -m all stdout 'rsc.io/breaker v2.0.0\+incompatible' diff --git a/src/cmd/go/testdata/script/mod_get_indirect.txt b/src/cmd/go/testdata/script/mod_get_indirect.txt index f25e170a49..e1cc1ab411 100644 --- a/src/cmd/go/testdata/script/mod_get_indirect.txt +++ b/src/cmd/go/testdata/script/mod_get_indirect.txt @@ -27,7 +27,7 @@ grep 'golang.org/x/text v0.3.0 // indirect$' go.mod # indirect tag should be removed upon seeing direct import. cp $WORK/tmp/uselang.go x.go -go list +go get -d grep 'rsc.io/quote v1.5.2$' go.mod grep 'golang.org/x/text [v0-9a-f\.-]+$' go.mod diff --git a/src/cmd/go/testdata/script/mod_get_latest_pseudo.txt b/src/cmd/go/testdata/script/mod_get_latest_pseudo.txt index 825ee8cf89..241a0c2f0d 100644 --- a/src/cmd/go/testdata/script/mod_get_latest_pseudo.txt +++ b/src/cmd/go/testdata/script/mod_get_latest_pseudo.txt @@ -5,6 +5,6 @@ env GO111MODULE=on go mod init m -go list example.com/notags +go get -d example.com/notags go list -m all stdout '^example.com/notags v0.0.0-20190507143103-cc8cbe209b64$' diff --git a/src/cmd/go/testdata/script/mod_get_trailing_slash.txt b/src/cmd/go/testdata/script/mod_get_trailing_slash.txt index 7b5d90c50b..3b38d8ba7d 100644 --- a/src/cmd/go/testdata/script/mod_get_trailing_slash.txt +++ b/src/cmd/go/testdata/script/mod_get_trailing_slash.txt @@ -1,3 +1,6 @@ +# Populate go.sum +go mod download + # go list should succeed to load a package ending with ".go" if the path does # not correspond to an existing local file. Listing a pattern ending with # ".go/" should try to list a package regardless of whether a file exists at the diff --git a/src/cmd/go/testdata/script/mod_import.txt b/src/cmd/go/testdata/script/mod_import.txt index 3985b43144..28358b5b0c 100644 --- a/src/cmd/go/testdata/script/mod_import.txt +++ b/src/cmd/go/testdata/script/mod_import.txt @@ -1,7 +1,7 @@ env GO111MODULE=on # latest rsc.io/quote should be v1.5.2 not v1.5.3-pre1 -go list +go get -d go list -m all stdout 'rsc.io/quote v1.5.2' diff --git a/src/cmd/go/testdata/script/mod_in_testdata_dir.txt b/src/cmd/go/testdata/script/mod_in_testdata_dir.txt index f582569798..66f79faa6d 100644 --- a/src/cmd/go/testdata/script/mod_in_testdata_dir.txt +++ b/src/cmd/go/testdata/script/mod_in_testdata_dir.txt @@ -8,8 +8,8 @@ env GO111MODULE=on cd $WORK/testdata go mod init testdata.tld/foo -# Building a package within that module should resolve its dependencies. -go build +# Getting a package within that module should resolve its dependencies. +go get -d grep 'rsc.io/quote' go.mod # Tidying the module should preserve those dependencies. @@ -26,7 +26,7 @@ exists vendor/rsc.io/quote cd $WORK/_ignored go mod init testdata.tld/foo -go build +go get grep 'rsc.io/quote' go.mod go mod tidy diff --git a/src/cmd/go/testdata/script/mod_init_dep.txt b/src/cmd/go/testdata/script/mod_init_dep.txt index 755076eae8..f8cf1d563a 100644 --- a/src/cmd/go/testdata/script/mod_init_dep.txt +++ b/src/cmd/go/testdata/script/mod_init_dep.txt @@ -1,24 +1,14 @@ env GO111MODULE=on +env GOFLAGS=-mod=mod # modconv uses git directly to examine what old 'go get' would [!net] skip [!exec:git] skip -# go build should populate go.mod from Gopkg.lock -cp go.mod1 go.mod -go build +# go mod init should populate go.mod from Gopkg.lock +go mod init x stderr 'copying requirements from Gopkg.lock' go list -m all -! stderr 'copying requirements from Gopkg.lock' -stdout 'rsc.io/sampler v1.0.0' - -# go list should populate go.mod from Gopkg.lock -cp go.mod1 go.mod -go list -stderr 'copying requirements from Gopkg.lock' -go list -! stderr 'copying requirements from Gopkg.lock' -go list -m all stdout 'rsc.io/sampler v1.0.0' # test dep replacement @@ -26,9 +16,6 @@ cd y go mod init cmpenv go.mod go.mod.replace --- go.mod1 -- -module x - -- x.go -- package x @@ -54,4 +41,4 @@ go $goversion replace z v1.0.0 => rsc.io/quote v1.0.0 -require rsc.io/quote v1.0.0 \ No newline at end of file +require rsc.io/quote v1.0.0 diff --git a/src/cmd/go/testdata/script/mod_install_versioned.txt b/src/cmd/go/testdata/script/mod_install_versioned.txt index 03986d06a0..c6bce418b4 100644 --- a/src/cmd/go/testdata/script/mod_install_versioned.txt +++ b/src/cmd/go/testdata/script/mod_install_versioned.txt @@ -1,9 +1,11 @@ env GO111MODULE=on +go get -d rsc.io/fortune go list -f '{{.Target}}' rsc.io/fortune ! stdout fortune@v1 stdout 'fortune(\.exe)?$' +go get -d rsc.io/fortune/v2 go list -f '{{.Target}}' rsc.io/fortune/v2 ! stdout v2 stdout 'fortune(\.exe)?$' diff --git a/src/cmd/go/testdata/script/mod_internal.txt b/src/cmd/go/testdata/script/mod_internal.txt index 1193d528ec..687269d18f 100644 --- a/src/cmd/go/testdata/script/mod_internal.txt +++ b/src/cmd/go/testdata/script/mod_internal.txt @@ -3,30 +3,34 @@ env GO111MODULE=on # golang.org/x/internal should be importable from other golang.org/x modules. go mod edit -module=golang.org/x/anything -go build . +go get -d . # ...and their tests... go test stdout PASS # ...but that should not leak into other modules. +go get -d ./baddep ! go build ./baddep stderr golang.org[/\\]notx[/\\]useinternal stderr 'use of internal package golang.org/x/.* not allowed' # Internal packages in the standard library should not leak into modules. +go get -d ./fromstd ! go build ./fromstd stderr 'use of internal package internal/testenv not allowed' # Dependencies should be able to use their own internal modules... go mod edit -module=golang.org/notx -go build ./throughdep +go get -d ./throughdep # ... but other modules should not, even if they have transitive dependencies. +go get -d . ! go build . stderr 'use of internal package golang.org/x/.* not allowed' # And transitive dependencies still should not leak. +go get -d ./baddep ! go build ./baddep stderr golang.org[/\\]notx[/\\]useinternal stderr 'use of internal package golang.org/x/.* not allowed' @@ -34,15 +38,17 @@ stderr 'use of internal package golang.org/x/.* not allowed' # Replacing an internal module should keep it internal to the same paths. go mod edit -module=golang.org/notx go mod edit -replace golang.org/x/internal=./replace/golang.org/notx/internal -go build ./throughdep +go get -d ./throughdep +go get -d ./baddep ! go build ./baddep stderr golang.org[/\\]notx[/\\]useinternal stderr 'use of internal package golang.org/x/.* not allowed' go mod edit -replace golang.org/x/internal=./vendor/golang.org/x/internal -go build ./throughdep +go get -d ./throughdep +go get -d ./baddep ! go build ./baddep stderr golang.org[/\\]notx[/\\]useinternal stderr 'use of internal package golang.org/x/.* not allowed' diff --git a/src/cmd/go/testdata/script/mod_invalid_version.txt b/src/cmd/go/testdata/script/mod_invalid_version.txt index 6dddd4b036..f9dfdd6346 100644 --- a/src/cmd/go/testdata/script/mod_invalid_version.txt +++ b/src/cmd/go/testdata/script/mod_invalid_version.txt @@ -4,6 +4,7 @@ env GO111MODULE=on env GOPROXY=direct env GOSUMDB=off +env GOFLAGS=-mod=mod # Regression test for golang.org/issue/27173: if the user (or go.mod file) # requests a pseudo-version that does not match both the module path and commit diff --git a/src/cmd/go/testdata/script/mod_list.txt b/src/cmd/go/testdata/script/mod_list.txt index 17b33fcc7b..1ba6d7c910 100644 --- a/src/cmd/go/testdata/script/mod_list.txt +++ b/src/cmd/go/testdata/script/mod_list.txt @@ -2,12 +2,12 @@ env GO111MODULE=on [short] skip # list {{.Dir}} shows main module and go.mod but not not-yet-downloaded dependency dir. -go list -m -f '{{.Path}} {{.Main}} {{.GoMod}} {{.Dir}}' all +go list -mod=mod -m -f '{{.Path}} {{.Main}} {{.GoMod}} {{.Dir}}' all stdout '^x true .*[\\/]src[\\/]go.mod .*[\\/]src$' stdout '^rsc.io/quote false .*[\\/]v1.5.2.mod $' # list {{.Dir}} shows dependency after download (and go list without -m downloads it) -go list -f '{{.Dir}}' rsc.io/quote +go list -mod=mod -f '{{.Dir}}' rsc.io/quote stdout '.*mod[\\/]rsc.io[\\/]quote@v1.5.2$' # downloaded dependencies are read-only @@ -20,7 +20,7 @@ go clean -modcache # list {{.Dir}} shows replaced directories cp go.mod2 go.mod -go list -f {{.Dir}} rsc.io/quote +go list -mod=mod -f {{.Dir}} rsc.io/quote go list -m -f '{{.Path}} {{.Version}} {{.Dir}}{{with .Replace}} {{.GoMod}} => {{.Version}} {{.Dir}} {{.GoMod}}{{end}}' all stdout 'mod[\\/]rsc.io[\\/]quote@v1.5.1' stdout 'v1.3.0.*mod[\\/]rsc.io[\\/]sampler@v1.3.1 .*[\\/]v1.3.1.mod => v1.3.1.*sampler@v1.3.1 .*[\\/]v1.3.1.mod' @@ -30,7 +30,7 @@ go list std stdout ^math/big # rsc.io/quote/buggy should be listable as a package -go list rsc.io/quote/buggy +go list -mod=mod rsc.io/quote/buggy # rsc.io/quote/buggy should not be listable as a module go list -m -e -f '{{.Error.Err}}' nonexist rsc.io/quote/buggy diff --git a/src/cmd/go/testdata/script/mod_list_dir.txt b/src/cmd/go/testdata/script/mod_list_dir.txt index 6653435a06..1adab8f027 100644 --- a/src/cmd/go/testdata/script/mod_list_dir.txt +++ b/src/cmd/go/testdata/script/mod_list_dir.txt @@ -2,6 +2,9 @@ # go list with path to directory should work +# populate go.sum +go get -d + env GO111MODULE=off go list -f '{{.ImportPath}}' $GOROOT/src/math stdout ^math$ @@ -29,3 +32,5 @@ require rsc.io/quote v1.5.2 -- x.go -- package x + +import _ "rsc.io/quote" diff --git a/src/cmd/go/testdata/script/mod_list_direct.txt b/src/cmd/go/testdata/script/mod_list_direct.txt index 8f85871189..62a472f475 100644 --- a/src/cmd/go/testdata/script/mod_list_direct.txt +++ b/src/cmd/go/testdata/script/mod_list_direct.txt @@ -10,7 +10,7 @@ env GOSUMDB=off # For a while, (*modfetch.codeRepo).Stat was not checking for a go.mod file, # which would produce a hard error at the subsequent call to GoMod. -go list all +go get -d -- go.mod -- module example.com diff --git a/src/cmd/go/testdata/script/mod_list_replace_dir.txt b/src/cmd/go/testdata/script/mod_list_replace_dir.txt index cad7fe2528..f2f2d2b2bb 100644 --- a/src/cmd/go/testdata/script/mod_list_replace_dir.txt +++ b/src/cmd/go/testdata/script/mod_list_replace_dir.txt @@ -2,8 +2,11 @@ # module within the module cache. # Verifies golang.org/issue/29548 -env GO111MODULE=on -go mod download rsc.io/quote@v1.5.1 rsc.io/quote@v1.5.2 +# Populate go.sum and download dependencies. +go get -d + +# Ensure v1.5.2 is also in the cache so we can list it. +go mod download rsc.io/quote@v1.5.2 ! go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 stderr '^directory ..[/\\]pkg[/\\]mod[/\\]rsc.io[/\\]quote@v1.5.2 outside available modules$' @@ -17,3 +20,8 @@ module example.com/quoter require rsc.io/quote v1.5.2 replace rsc.io/quote => rsc.io/quote v1.5.1 + +-- use.go -- +package use + +import _ "rsc.io/quote" diff --git a/src/cmd/go/testdata/script/mod_list_upgrade.txt b/src/cmd/go/testdata/script/mod_list_upgrade.txt index 474df0dc26..0cef04b89a 100644 --- a/src/cmd/go/testdata/script/mod_list_upgrade.txt +++ b/src/cmd/go/testdata/script/mod_list_upgrade.txt @@ -1,5 +1,9 @@ env GO111MODULE=on +# Populate go.sum +go list -m -mod=mod all + +# Check for upgrades. go list -m -u all stdout 'rsc.io/quote v1.2.0 \[v1\.5\.2\]' diff --git a/src/cmd/go/testdata/script/mod_load_badchain.txt b/src/cmd/go/testdata/script/mod_load_badchain.txt index 67d9a1584f..e943179c54 100644 --- a/src/cmd/go/testdata/script/mod_load_badchain.txt +++ b/src/cmd/go/testdata/script/mod_load_badchain.txt @@ -28,10 +28,10 @@ cmp stderr list-expected # Try listing a package that imports a package # in a module without a requirement. go mod edit -droprequire example.com/badchain/a -! go list m/use +! go list -mod=mod m/use cmp stderr list-missing-expected -! go list -test m/testuse +! go list -mod=mod -test m/testuse cmp stderr list-missing-test-expected -- go.mod.orig -- diff --git a/src/cmd/go/testdata/script/mod_load_badmod.txt b/src/cmd/go/testdata/script/mod_load_badmod.txt index 68c8b3792b..fa22e1808b 100644 --- a/src/cmd/go/testdata/script/mod_load_badmod.txt +++ b/src/cmd/go/testdata/script/mod_load_badmod.txt @@ -1,14 +1,13 @@ # Unknown lines should be ignored in dependency go.mod files. -env GO111MODULE=on -go list -m all +go list -m -mod=mod all # ... and in replaced dependency go.mod files. cp go.mod go.mod.usesub -go list -m all +go list -m -mod=mod all # ... but not in the main module. cp go.mod.bad go.mod -! go list -m all +! go list -m -mod=mod all stderr 'unknown directive: hello' -- go.mod -- diff --git a/src/cmd/go/testdata/script/mod_load_badzip.txt b/src/cmd/go/testdata/script/mod_load_badzip.txt index c5ba18e9f0..65374d2a6d 100644 --- a/src/cmd/go/testdata/script/mod_load_badzip.txt +++ b/src/cmd/go/testdata/script/mod_load_badzip.txt @@ -5,10 +5,8 @@ env GO111MODULE=on stderr 'zip for rsc.io/badzip@v1.0.0 has unexpected file rsc.io/badzip@v1.0.0.txt' ! grep rsc.io/badzip go.mod -# TODO(golang.org/issue/31730): 'go build' should print the error below if the -# requirement is not present. go mod edit -require rsc.io/badzip@v1.0.0 -! go build rsc.io/badzip +! go build -mod=mod rsc.io/badzip stderr 'zip for rsc.io/badzip@v1.0.0 has unexpected file rsc.io/badzip@v1.0.0.txt' -- go.mod -- diff --git a/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt b/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt index 1ba8d3d22a..9c250e7d1c 100644 --- a/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt +++ b/src/cmd/go/testdata/script/mod_missingpkg_prerelease.txt @@ -1,6 +1,6 @@ env GO111MODULE=on -! go list -deps use.go +! go list -mod=mod -deps use.go stderr '^use.go:4:2: package example.com/missingpkg/deprecated provided by example.com/missingpkg at latest version v1.0.0 but not at required version v1.0.1-beta$' -- go.mod -- diff --git a/src/cmd/go/testdata/script/mod_modinfo.txt b/src/cmd/go/testdata/script/mod_modinfo.txt index fb31f9e43b..d9e9fdec21 100644 --- a/src/cmd/go/testdata/script/mod_modinfo.txt +++ b/src/cmd/go/testdata/script/mod_modinfo.txt @@ -6,6 +6,7 @@ env GO111MODULE=on cd x go mod edit -require=rsc.io/quote@v1.5.2 go mod edit -replace=rsc.io/quote@v1.5.2=rsc.io/quote@v1.0.0 +go mod tidy # populate go.sum # Build a binary and ensure that it can output its own debug info. # The debug info should be accessible before main starts (golang.org/issue/29628). diff --git a/src/cmd/go/testdata/script/mod_multirepo.txt b/src/cmd/go/testdata/script/mod_multirepo.txt index 7f977e80f6..0f335a11f0 100644 --- a/src/cmd/go/testdata/script/mod_multirepo.txt +++ b/src/cmd/go/testdata/script/mod_multirepo.txt @@ -7,6 +7,7 @@ go list -deps -f {{.Dir}} # v2 import should use a downloaded module # both without an explicit go.mod entry ... cp tmp/use_v2.go x.go +go get -d . go list -deps -f {{.Dir}} stdout 'pkg[\\/]mod[\\/]rsc.io[\\/]quote[\\/]v2@v2.0.1$' diff --git a/src/cmd/go/testdata/script/mod_notall.txt b/src/cmd/go/testdata/script/mod_notall.txt index 29ca6066fa..1657c8d2d0 100644 --- a/src/cmd/go/testdata/script/mod_notall.txt +++ b/src/cmd/go/testdata/script/mod_notall.txt @@ -5,6 +5,7 @@ # module, but not should not include test dependencies of packages imported only # by other root patterns. +env GOFLAGS=-mod=mod cp go.mod go.mod.orig go list -deps all x/otherroot diff --git a/src/cmd/go/testdata/script/mod_permissions.txt b/src/cmd/go/testdata/script/mod_permissions.txt index 11fb4754f8..2d32dcd10f 100644 --- a/src/cmd/go/testdata/script/mod_permissions.txt +++ b/src/cmd/go/testdata/script/mod_permissions.txt @@ -12,7 +12,7 @@ chmod 0640 go.mod chmod 0604 go.sum go mod edit -module=golang.org/issue/34634 -go build . +go get -d cmp go.mod go.mod.want cmp go.sum go.sum.want diff --git a/src/cmd/go/testdata/script/mod_query.txt b/src/cmd/go/testdata/script/mod_query.txt index e87ca302f0..e10185709d 100644 --- a/src/cmd/go/testdata/script/mod_query.txt +++ b/src/cmd/go/testdata/script/mod_query.txt @@ -1,5 +1,10 @@ env GO111MODULE=on +# Populate go.sum. +# TODO(golang.org/issue/41297): we shouldn't need go.sum. None of the commands +# below depend on the build list. +go mod download + go list -m -versions rsc.io/quote stdout '^rsc.io/quote v1.0.0 v1.1.0 v1.2.0 v1.2.1 v1.3.0 v1.4.0 v1.5.0 v1.5.1 v1.5.2 v1.5.3-pre1$' @@ -30,3 +35,8 @@ stdout 'no matching versions for query ">v1.5.3"' -- go.mod -- module x require rsc.io/quote v1.0.0 + +-- use.go -- +package use + +import _ "rsc.io/quote" diff --git a/src/cmd/go/testdata/script/mod_replace.txt b/src/cmd/go/testdata/script/mod_replace.txt index c21f172002..dc9667f1d0 100644 --- a/src/cmd/go/testdata/script/mod_replace.txt +++ b/src/cmd/go/testdata/script/mod_replace.txt @@ -4,7 +4,7 @@ env GO111MODULE=on cp go.mod go.mod.orig # Make sure the test builds without replacement. -go build -o a1.exe . +go build -mod=mod -o a1.exe . exec ./a1.exe stdout 'Don''t communicate by sharing memory' @@ -32,7 +32,7 @@ stderr 'rsc.io/quote/v3@v3.0.0 used for two different module paths \(not-rsc.io/ # Modules that do not (yet) exist upstream can be replaced too. cp go.mod.orig go.mod go mod edit -replace=not-rsc.io/quote/v3@v3.1.0=./local/rsc.io/quote/v3 -go build -o a5.exe ./usenewmodule +go build -mod=mod -o a5.exe ./usenewmodule ! stderr 'finding not-rsc.io/quote/v3' grep 'not-rsc.io/quote/v3 v3.1.0' go.mod exec ./a5.exe diff --git a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt index 674c99cb0c..df752d9716 100644 --- a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt +++ b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt @@ -11,6 +11,7 @@ env GO111MODULE=on env GOPROXY=direct env GOSUMDB=off +env GOFLAGS=-mod=mod # Replacing gopkg.in/[…].vN with a repository with a root go.mod file # specifying […].vN and a compatible version should succeed, even if diff --git a/src/cmd/go/testdata/script/mod_replace_import.txt b/src/cmd/go/testdata/script/mod_replace_import.txt index 54b1a12448..b4de5c50f7 100644 --- a/src/cmd/go/testdata/script/mod_replace_import.txt +++ b/src/cmd/go/testdata/script/mod_replace_import.txt @@ -7,6 +7,7 @@ cp go.mod go.mod.orig cmp go.mod go.mod.orig # 'go list' should resolve imports using replacements. +go get -d go list all stdout 'example.com/a/b$' stdout 'example.com/x/v3$' diff --git a/src/cmd/go/testdata/script/mod_require_exclude.txt b/src/cmd/go/testdata/script/mod_require_exclude.txt index 1a0fc3097b..9156d4ce5d 100644 --- a/src/cmd/go/testdata/script/mod_require_exclude.txt +++ b/src/cmd/go/testdata/script/mod_require_exclude.txt @@ -20,7 +20,7 @@ cmp go.mod go.mod.orig # With the selected version excluded, commands that load only modules should # drop the excluded module. -go list -m all +go list -m -mod=mod all stderr '^go: dropping requirement on excluded version rsc.io/sampler v1\.99\.99$' stdout '^x$' ! stdout '^rsc.io/sampler' @@ -30,7 +30,7 @@ cmp go.mod go.moddrop # from the next-highest version. cp go.mod.orig go.mod -go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all +go list -mod=mod -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all stderr '^go: dropping requirement on excluded version rsc.io/sampler v1\.99\.99$' stdout '^x $' ! stdout '^rsc.io/sampler v1.99.99' @@ -38,13 +38,13 @@ stdout '^rsc.io/sampler v1.3.0' # build with newer version available cp go.mod2 go.mod -go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all +go list -mod=mod -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all stderr '^go: dropping requirement on excluded version rsc.io/quote v1\.5\.1$' stdout 'rsc.io/quote v1.5.2' # build with excluded newer version cp go.mod3 go.mod -go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all +go list -mod=mod -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all ! stderr '^go: dropping requirement' stdout 'rsc.io/quote v1.5.1' diff --git a/src/cmd/go/testdata/script/mod_retention.txt b/src/cmd/go/testdata/script/mod_retention.txt index 1d83e6c07e..a4441c4b3c 100644 --- a/src/cmd/go/testdata/script/mod_retention.txt +++ b/src/cmd/go/testdata/script/mod_retention.txt @@ -7,7 +7,7 @@ env GO111MODULE=on # Control case: verify that go.mod.tidy is actually tidy. cp go.mod.tidy go.mod -go list all +go list -mod=mod all cmp go.mod go.mod.tidy @@ -35,7 +35,7 @@ cmp go.mod go.mod.tidy # "// indirect" comments should be removed if direct dependencies are seen. # changes. cp go.mod.indirect go.mod -go list all +go list -mod=mod all cmp go.mod go.mod.tidy # "// indirect" comments should be added if appropriate. @@ -63,7 +63,7 @@ cmp go.mod go.mod.tidy # A missing "go" version directive should be added. # However, that should not remove other redundant requirements. cp go.mod.nogo go.mod -go list all +go list -mod=mod all cmpenv go.mod go.mod.currentgo diff --git a/src/cmd/go/testdata/script/mod_retract.txt b/src/cmd/go/testdata/script/mod_retract.txt index 5d21902043..a52e05bc72 100644 --- a/src/cmd/go/testdata/script/mod_retract.txt +++ b/src/cmd/go/testdata/script/mod_retract.txt @@ -1,5 +1,8 @@ cp go.mod go.mod.orig +# Populate go.sum. +go mod download + # 'go list pkg' does not report an error when a retracted version is used. go list -e -f '{{if .Error}}{{.Error}}{{end}}' ./use ! stdout . @@ -17,7 +20,7 @@ exists $GOPATH/pkg/mod/cache/download/example.com/retract/@v/v1.0.0-bad.mod # Importing a package from a module with a retracted latest version will # select the latest non-retracted version. -go list ./use_self_prev +go get -d ./use_self_prev go list -m example.com/retract/self/prev stdout '^example.com/retract/self/prev v1.1.0$' exists $GOPATH/pkg/mod/cache/download/example.com/retract/self/prev/@v/v1.9.0.mod diff --git a/src/cmd/go/testdata/script/mod_retract_replace.txt b/src/cmd/go/testdata/script/mod_retract_replace.txt index b710485fa7..7aec438dda 100644 --- a/src/cmd/go/testdata/script/mod_retract_replace.txt +++ b/src/cmd/go/testdata/script/mod_retract_replace.txt @@ -1,6 +1,9 @@ # If the latest unretracted version of a module is replaced, 'go list' should # obtain retractions from the replacement. +# Populate go.sum. +go get -d + # The latest version, v1.9.0, is not available on the proxy. ! go list -m -retracted example.com/retract/missingmod stderr '^go list -m: loading module retractions: example.com/retract/missingmod@v1.9.0:.*404 Not Found$' @@ -24,9 +27,9 @@ go list -m -retracted -f '{{range .Retracted}}{{.}}{{end}}' example.com/retract go list -m -retracted -f '{{if .Replace}}replaced{{end}}' example.com/retract ! stdout . go mod edit -replace example.com/retract@v1.0.0-good=example.com/retract@v1.0.0-bad -go list -m -retracted -f '{{range .Retracted}}{{.}}{{end}}' example.com/retract +go list -m -mod=mod -retracted -f '{{range .Retracted}}{{.}}{{end}}' example.com/retract stdout '^bad$' -go list -m -retracted -f '{{with .Replace}}{{range .Retracted}}{{.}}{{end}}{{end}}' example.com/retract +go list -m -mod=mod -retracted -f '{{with .Replace}}{{range .Retracted}}{{.}}{{end}}{{end}}' example.com/retract stdout '^bad$' -- go.mod -- @@ -38,6 +41,13 @@ require ( example.com/retract v1.0.0-good example.com/retract/missingmod v1.0.0 ) +-- use.go -- +package use + +import ( + _ "example.com/retract" + _ "example.com/retract/missingmod" +) -- missingmod-v1.0.0/go.mod -- module example.com/retract/missingmod diff --git a/src/cmd/go/testdata/script/mod_sum_lookup.txt b/src/cmd/go/testdata/script/mod_sum_lookup.txt index ed80a44984..e021921380 100644 --- a/src/cmd/go/testdata/script/mod_sum_lookup.txt +++ b/src/cmd/go/testdata/script/mod_sum_lookup.txt @@ -1,13 +1,14 @@ # When we attempt to resolve an import that doesn't exist, we should not save # hashes for downloaded modules. # Verifies golang.org/issue/36260. -go list -e -tags=ignore ./noexist +# TODO(golang.org/issue/26603): use 'go mod tidy -e' when implemented. +go list -e -mod=mod -tags=ignore ./noexist ! exists go.sum # When an import is resolved successfully, we should only save hashes for # the module that provides the package, not for other modules looked up. # Verifies golang.org/issue/31580. -go list ./exist +go get -d ./exist grep '^example.com/join v1.1.0 h1:' go.sum ! grep '^example.com/join/subpkg' go.sum cp go.sum go.list.sum diff --git a/src/cmd/go/testdata/script/mod_sumdb_golang.txt b/src/cmd/go/testdata/script/mod_sumdb_golang.txt index d9fb63acb0..cc0b0da474 100644 --- a/src/cmd/go/testdata/script/mod_sumdb_golang.txt +++ b/src/cmd/go/testdata/script/mod_sumdb_golang.txt @@ -34,7 +34,7 @@ cmp go.sum saved.sum # Should use the checksum database to validate new go.sum lines, # but not need to fetch any new data from the proxy. rm go.sum -go list -x rsc.io/quote +go list -mod=mod -x rsc.io/quote ! stderr github ! stderr proxy.golang.org/rsc.io/quote stderr sum.golang.org/tile @@ -45,7 +45,7 @@ cmp go.sum saved.sum env TESTGOPROXY404=1 go clean -modcache rm go.sum -go list -x rsc.io/quote +go list -mod=mod -x rsc.io/quote stderr 'proxy.golang.org.*404 testing' stderr github.com/rsc cmp go.sum saved.sum diff --git a/src/cmd/go/testdata/script/mod_symlink.txt b/src/cmd/go/testdata/script/mod_symlink.txt index 49bece2b84..dbc23fb8f0 100644 --- a/src/cmd/go/testdata/script/mod_symlink.txt +++ b/src/cmd/go/testdata/script/mod_symlink.txt @@ -1,16 +1,19 @@ env GO111MODULE=on [!symlink] skip -# 'go list' should resolve modules of imported packages. +# 'go get -d' should resolve modules of imported packages. +go get -d go list -deps -f '{{.Module}}' . stdout golang.org/x/text +go get -d ./subpkg go list -deps -f '{{.Module}}' ./subpkg stdout golang.org/x/text # Create a copy of the module using symlinks in src/links. mkdir links symlink links/go.mod -> $GOPATH/src/go.mod +symlink links/go.sum -> $GOPATH/src/go.sum symlink links/issue.go -> $GOPATH/src/issue.go mkdir links/subpkg symlink links/subpkg/issue.go -> $GOPATH/src/subpkg/issue.go diff --git a/src/cmd/go/testdata/script/mod_test.txt b/src/cmd/go/testdata/script/mod_test.txt index 8f2da2f2a5..50f00355c1 100644 --- a/src/cmd/go/testdata/script/mod_test.txt +++ b/src/cmd/go/testdata/script/mod_test.txt @@ -1,4 +1,5 @@ env GO111MODULE=on +env GOFLAGS=-mod=mod [short] skip # TODO(bcmills): Convert the 'go test' calls below to 'go list -test' once 'go diff --git a/src/cmd/go/testdata/script/mod_tidy_replace.txt b/src/cmd/go/testdata/script/mod_tidy_replace.txt index c3158f8610..7b00bf1384 100644 --- a/src/cmd/go/testdata/script/mod_tidy_replace.txt +++ b/src/cmd/go/testdata/script/mod_tidy_replace.txt @@ -1,4 +1,5 @@ env GO111MODULE=on +env GOFLAGS=-mod=mod [short] skip # golang.org/issue/30166: 'go mod tidy' should not crash if a replaced module is diff --git a/src/cmd/go/testdata/script/mod_upgrade_patch.txt b/src/cmd/go/testdata/script/mod_upgrade_patch.txt index 3939e54c1b..1ef25b9aef 100644 --- a/src/cmd/go/testdata/script/mod_upgrade_patch.txt +++ b/src/cmd/go/testdata/script/mod_upgrade_patch.txt @@ -2,6 +2,7 @@ env GO111MODULE=on [short] skip # Initially, we are at v1.0.0 for all dependencies. +go get -d cp go.mod go.mod.orig go list -m all stdout '^patch.example.com/direct v1.0.0' diff --git a/src/cmd/go/testdata/script/mod_vcs_missing.txt b/src/cmd/go/testdata/script/mod_vcs_missing.txt index a755935b53..f8be43cf4c 100644 --- a/src/cmd/go/testdata/script/mod_vcs_missing.txt +++ b/src/cmd/go/testdata/script/mod_vcs_missing.txt @@ -5,14 +5,14 @@ env GO111MODULE=on env GOPROXY=direct cd empty -! go list launchpad.net/gocheck +! go get -d launchpad.net/gocheck stderr '"bzr": executable file not found' cd .. # 1.11 used to give the cryptic error "cannot find module for path" here, but # only for a main package. cd main -! go build +! go build -mod=mod stderr '"bzr": executable file not found' cd .. diff --git a/src/cmd/go/testdata/script/mod_vendor_build.txt b/src/cmd/go/testdata/script/mod_vendor_build.txt index 0c359cea6e..4efda55e08 100644 --- a/src/cmd/go/testdata/script/mod_vendor_build.txt +++ b/src/cmd/go/testdata/script/mod_vendor_build.txt @@ -1,6 +1,9 @@ env GO111MODULE=on [short] skip +# Populate go.mod and go.sum. +go mod tidy + # initial conditions: using sampler v1.3.0, not listed in go.mod. go list -deps stdout rsc.io/sampler diff --git a/src/cmd/go/testdata/script/mod_verify.txt b/src/cmd/go/testdata/script/mod_verify.txt index 3918400435..43812d069f 100644 --- a/src/cmd/go/testdata/script/mod_verify.txt +++ b/src/cmd/go/testdata/script/mod_verify.txt @@ -56,7 +56,7 @@ go mod tidy # Packages below module root should not be mentioned in go.sum. rm go.sum go mod edit -droprequire rsc.io/quote -go list rsc.io/quote/buggy # re-resolves import path and updates go.mod +go get -d rsc.io/quote/buggy grep '^rsc.io/quote v1.5.2/go.mod ' go.sum ! grep buggy go.sum diff --git a/src/cmd/go/testdata/script/mod_why.txt b/src/cmd/go/testdata/script/mod_why.txt index 10a4f9fbea..c0ff4647a7 100644 --- a/src/cmd/go/testdata/script/mod_why.txt +++ b/src/cmd/go/testdata/script/mod_why.txt @@ -1,6 +1,9 @@ env GO111MODULE=on [short] skip +# Populate go.sum. +go mod tidy + go list -test all stdout rsc.io/quote stdout golang.org/x/text/language diff --git a/src/cmd/go/testdata/script/modfile_flag.txt b/src/cmd/go/testdata/script/modfile_flag.txt index f05bf03fbf..0ad0880817 100644 --- a/src/cmd/go/testdata/script/modfile_flag.txt +++ b/src/cmd/go/testdata/script/modfile_flag.txt @@ -37,10 +37,10 @@ go mod why rsc.io/quote # 'go list' and other commands with build flags should work. # They should update the alternate go.mod when a dependency is missing. go mod edit -droprequire rsc.io/quote -go list . +go list -mod=mod . grep rsc.io/quote go.alt.mod -go build -n . -go test -n . +go build -n -mod=mod . +go test -n -mod=mod . go get -d rsc.io/quote diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt index 0123ac6d53..81ca698620 100644 --- a/src/cmd/go/testdata/script/version.txt +++ b/src/cmd/go/testdata/script/version.txt @@ -14,6 +14,7 @@ env GO111MODULE=on [short] skip # Check that 'go version' and 'go version -m' work on a binary built in module mode. +go get -d rsc.io/fortune go build -o fortune.exe rsc.io/fortune go version fortune.exe stdout '^fortune.exe: .+' diff --git a/src/cmd/go/testdata/script/version_replace.txt b/src/cmd/go/testdata/script/version_replace.txt index b657086f09..ec98f4e3f3 100644 --- a/src/cmd/go/testdata/script/version_replace.txt +++ b/src/cmd/go/testdata/script/version_replace.txt @@ -1,7 +1,7 @@ [short] skip go mod download example.com/printversion@v0.1.0 example.com/printversion@v1.0.0 - +go get -d example.com/printversion@v0.1.0 go install example.com/printversion go run example.com/printversion -- cgit v1.2.3-54-g00ecf From b22af9b407dc29d1a733976484904ad0ab168466 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 10 Sep 2020 22:41:29 -0400 Subject: cmd/link: record only the first occurance in Reachparent graph In the deadcode pass, a type symbol may be marked twice, one without UsedInIface, one with. For the second time, don't update the Reachparent graph, so it only records the path of the first time the symbol is reached. This ensures the Reachparent graph is acyclic. TODO: add a test. (This only affects GOEXPERIMENT=fieldtrack) Change-Id: I68e8a1a69c3830bc8aee5df946151dc22dcb2b29 Reviewed-on: https://go-review.googlesource.com/c/go/+/254297 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/deadcode.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 0269429723..35545f950e 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -209,7 +209,7 @@ func (d *deadcodePass) mark(symIdx, parent loader.Sym) { if symIdx != 0 && !d.ldr.AttrReachable(symIdx) { d.wq.push(symIdx) d.ldr.SetAttrReachable(symIdx, true) - if objabi.Fieldtrack_enabled != 0 { + if objabi.Fieldtrack_enabled != 0 && d.ldr.Reachparent[symIdx] == 0 { d.ldr.Reachparent[symIdx] = parent } if *flagDumpDep { -- cgit v1.2.3-54-g00ecf From 6e3df749b1058ecfaf5f6601f6f8678c0971da8e Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 9 Sep 2020 16:35:56 -0400 Subject: cmd/go: refactor -mod flag parsing Keep track of whether the -mod flag was set explicitly. When -mod=readonly is the default, we'll want to adjust our error messages if it's set explicitly. Also, register the -mod, -modcacherw, and -modfile flags in functions in internal/base instead of internal/work. 'go mod' commands that don't load packages shouldn't depend on internal/work. For #40728 Change-Id: I272aea9e19908ba37e151baac4ea8630e90f241f Reviewed-on: https://go-review.googlesource.com/c/go/+/253744 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/base/flag.go | 35 +++++++++++++++++++++++++++++++--- src/cmd/go/internal/cfg/cfg.go | 3 ++- src/cmd/go/internal/fmtcmd/fmt.go | 3 ++- src/cmd/go/internal/modcmd/download.go | 3 +-- src/cmd/go/internal/modcmd/edit.go | 3 +-- src/cmd/go/internal/modcmd/graph.go | 3 +-- src/cmd/go/internal/modcmd/init.go | 3 +-- src/cmd/go/internal/modcmd/tidy.go | 3 +-- src/cmd/go/internal/modcmd/vendor.go | 3 +-- src/cmd/go/internal/modcmd/verify.go | 3 +-- src/cmd/go/internal/modcmd/why.go | 3 +-- src/cmd/go/internal/modload/init.go | 23 ++++++++++++---------- src/cmd/go/internal/work/build.go | 14 +++----------- src/cmd/go/internal/work/init.go | 2 +- 14 files changed, 61 insertions(+), 43 deletions(-) diff --git a/src/cmd/go/internal/base/flag.go b/src/cmd/go/internal/base/flag.go index 6727196816..c97c744520 100644 --- a/src/cmd/go/internal/base/flag.go +++ b/src/cmd/go/internal/base/flag.go @@ -28,13 +28,42 @@ func (v *StringsFlag) String() string { return "" } +// explicitStringFlag is like a regular string flag, but it also tracks whether +// the string was set explicitly to a non-empty value. +type explicitStringFlag struct { + value *string + explicit *bool +} + +func (f explicitStringFlag) String() string { + if f.value == nil { + return "" + } + return *f.value +} + +func (f explicitStringFlag) Set(v string) error { + *f.value = v + if v != "" { + *f.explicit = true + } + return nil +} + // AddBuildFlagsNX adds the -n and -x build flags to the flag set. func AddBuildFlagsNX(flags *flag.FlagSet) { flags.BoolVar(&cfg.BuildN, "n", false, "") flags.BoolVar(&cfg.BuildX, "x", false, "") } -// AddLoadFlags adds the -mod build flag to the flag set. -func AddLoadFlags(flags *flag.FlagSet) { - flags.StringVar(&cfg.BuildMod, "mod", "", "") +// AddModFlag adds the -mod build flag to the flag set. +func AddModFlag(flags *flag.FlagSet) { + flags.Var(explicitStringFlag{value: &cfg.BuildMod, explicit: &cfg.BuildModExplicit}, "mod", "") +} + +// AddModCommonFlags adds the module-related flags common to build commands +// and 'go mod' subcommands. +func AddModCommonFlags(flags *flag.FlagSet) { + flags.BoolVar(&cfg.ModCacheRW, "modcacherw", false, "") + flags.StringVar(&cfg.ModFile, "modfile", "", "") } diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index f9bbcd9180..f874b880a6 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -27,7 +27,8 @@ var ( BuildBuildmode string // -buildmode flag BuildContext = defaultContext() BuildMod string // -mod flag - BuildModReason string // reason -mod flag is set, if set by default + BuildModExplicit bool // whether -mod was set explicitly + BuildModReason string // reason -mod was set, if set by default BuildI bool // -i flag BuildLinkshared bool // -linkshared flag BuildMSan bool // -msan flag diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index f96cff429c..b0c1c59b40 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -23,7 +23,8 @@ import ( func init() { base.AddBuildFlagsNX(&CmdFmt.Flag) - base.AddLoadFlags(&CmdFmt.Flag) + base.AddModFlag(&CmdFmt.Flag) + base.AddModCommonFlags(&CmdFmt.Flag) } var CmdFmt = &base.Command{ diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 41f294d475..0ea5638e70 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -14,7 +14,6 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/modfetch" "cmd/go/internal/modload" - "cmd/go/internal/work" "golang.org/x/mod/module" ) @@ -64,7 +63,7 @@ func init() { // TODO(jayconrod): https://golang.org/issue/35849 Apply -x to other 'go mod' commands. cmdDownload.Flag.BoolVar(&cfg.BuildX, "x", false, "") - work.AddModCommonFlags(cmdDownload) + base.AddModCommonFlags(&cmdDownload.Flag) } type moduleJSON struct { diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index 18bdd34cd0..03a774b824 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -19,7 +19,6 @@ import ( "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch" "cmd/go/internal/modload" - "cmd/go/internal/work" "golang.org/x/mod/modfile" "golang.org/x/mod/module" @@ -154,7 +153,7 @@ func init() { cmdEdit.Flag.Var(flagFunc(flagRetract), "retract", "") cmdEdit.Flag.Var(flagFunc(flagDropRetract), "dropretract", "") - work.AddModCommonFlags(cmdEdit) + base.AddModCommonFlags(&cmdEdit.Flag) base.AddBuildFlagsNX(&cmdEdit.Flag) } diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 513536a010..a149b65605 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -15,7 +15,6 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/modload" - "cmd/go/internal/work" "golang.org/x/mod/module" ) @@ -33,7 +32,7 @@ path@version, except for the main module, which has no @version suffix. } func init() { - work.AddModCommonFlags(cmdGraph) + base.AddModCommonFlags(&cmdGraph.Flag) } func runGraph(ctx context.Context, cmd *base.Command, args []string) { diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go index b6cffd332d..21b235653e 100644 --- a/src/cmd/go/internal/modcmd/init.go +++ b/src/cmd/go/internal/modcmd/init.go @@ -9,7 +9,6 @@ package modcmd import ( "cmd/go/internal/base" "cmd/go/internal/modload" - "cmd/go/internal/work" "context" "os" "strings" @@ -30,7 +29,7 @@ To override this guess, supply the module path as an argument. } func init() { - work.AddModCommonFlags(cmdInit) + base.AddModCommonFlags(&cmdInit.Flag) } func runInit(ctx context.Context, cmd *base.Command, args []string) { diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index 4dcb62e02f..30df674ef6 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -10,7 +10,6 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/modload" - "cmd/go/internal/work" "context" ) @@ -32,7 +31,7 @@ to standard error. func init() { cmdTidy.Run = runTidy // break init cycle cmdTidy.Flag.BoolVar(&cfg.BuildV, "v", false, "") - work.AddModCommonFlags(cmdTidy) + base.AddModCommonFlags(&cmdTidy.Flag) } func runTidy(ctx context.Context, cmd *base.Command, args []string) { diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index 30334f3a42..91d2509452 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -19,7 +19,6 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/imports" "cmd/go/internal/modload" - "cmd/go/internal/work" "golang.org/x/mod/module" "golang.org/x/mod/semver" @@ -41,7 +40,7 @@ modules and packages to standard error. func init() { cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "") - work.AddModCommonFlags(cmdVendor) + base.AddModCommonFlags(&cmdVendor.Flag) } func runVendor(ctx context.Context, cmd *base.Command, args []string) { diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index d542825823..7700588bde 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -17,7 +17,6 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/modfetch" "cmd/go/internal/modload" - "cmd/go/internal/work" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/dirhash" @@ -38,7 +37,7 @@ non-zero status. } func init() { - work.AddModCommonFlags(cmdVerify) + base.AddModCommonFlags(&cmdVerify.Flag) } func runVerify(ctx context.Context, cmd *base.Command, args []string) { diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 30b15fc153..8454fdfec6 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -11,7 +11,6 @@ import ( "cmd/go/internal/base" "cmd/go/internal/modload" - "cmd/go/internal/work" "golang.org/x/mod/module" ) @@ -58,7 +57,7 @@ var ( func init() { cmdWhy.Run = runWhy // break init cycle - work.AddModCommonFlags(cmdWhy) + base.AddModCommonFlags(&cmdWhy.Flag) } func runWhy(ctx context.Context, cmd *base.Command, args []string) { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 8e8fb9e6a1..1f50dcb11c 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -518,17 +518,20 @@ func modFileToBuildList() { // setDefaultBuildMod sets a default value for cfg.BuildMod // if it is currently empty. func setDefaultBuildMod() { - if cfg.BuildMod != "" { + if cfg.BuildModExplicit { // Don't override an explicit '-mod=' argument. return } - cfg.BuildMod = "mod" + if cfg.CmdName == "get" || strings.HasPrefix(cfg.CmdName, "mod ") { - // Don't set -mod implicitly for commands whose purpose is to - // manipulate the build list. + // 'get' and 'go mod' commands may update go.mod automatically. + // TODO(jayconrod): should this narrower? Should 'go mod download' or + // 'go mod graph' update go.mod by default? + cfg.BuildMod = "mod" return } if modRoot == "" { + cfg.BuildMod = "mod" return } @@ -546,18 +549,18 @@ func setDefaultBuildMod() { } } - // Since a vendor directory exists, we have a non-trivial reason for - // choosing -mod=mod, although it probably won't be used for anything. - // Record the reason anyway for consistency. - // It may be overridden if we switch to mod=readonly below. - cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s.", modGo) + // Since a vendor directory exists, we should record why we didn't use it. + // This message won't normally be shown, but it may appear with import errors. + cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s, so vendor directory was not used.", modGo) } p := ModFilePath() if fi, err := os.Stat(p); err == nil && !hasWritePerm(p, fi) { cfg.BuildMod = "readonly" cfg.BuildModReason = "go.mod file is read-only." + return } + cfg.BuildMod = "mod" } func legacyModInit() { @@ -857,7 +860,7 @@ func WriteGoMod() { // prefer to report a dirty go.mod over a dirty go.sum if cfg.BuildModReason != "" { base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly\n\t(%s)", cfg.BuildModReason) - } else { + } else if cfg.BuildModExplicit { base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") } } diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index d020aa6e9f..e99982ed36 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -240,13 +240,12 @@ const ( // AddBuildFlags adds the flags common to the build, clean, get, // install, list, run, and test commands. func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) { + base.AddBuildFlagsNX(&cmd.Flag) cmd.Flag.BoolVar(&cfg.BuildA, "a", false, "") - cmd.Flag.BoolVar(&cfg.BuildN, "n", false, "") cmd.Flag.IntVar(&cfg.BuildP, "p", cfg.BuildP, "") if mask&OmitVFlag == 0 { cmd.Flag.BoolVar(&cfg.BuildV, "v", false, "") } - cmd.Flag.BoolVar(&cfg.BuildX, "x", false, "") cmd.Flag.Var(&load.BuildAsmflags, "asmflags", "") cmd.Flag.Var(buildCompiler{}, "compiler", "") @@ -254,10 +253,10 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) { cmd.Flag.Var(&load.BuildGcflags, "gcflags", "") cmd.Flag.Var(&load.BuildGccgoflags, "gccgoflags", "") if mask&OmitModFlag == 0 { - cmd.Flag.StringVar(&cfg.BuildMod, "mod", "", "") + base.AddModFlag(&cmd.Flag) } if mask&OmitModCommonFlags == 0 { - AddModCommonFlags(cmd) + base.AddModCommonFlags(&cmd.Flag) } cmd.Flag.StringVar(&cfg.BuildContext.InstallSuffix, "installsuffix", "", "") cmd.Flag.Var(&load.BuildLdflags, "ldflags", "") @@ -275,13 +274,6 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) { cmd.Flag.StringVar(&cfg.DebugTrace, "debug-trace", "", "") } -// AddModCommonFlags adds the module-related flags common to build commands -// and 'go mod' subcommands. -func AddModCommonFlags(cmd *base.Command) { - cmd.Flag.BoolVar(&cfg.ModCacheRW, "modcacherw", false, "") - cmd.Flag.StringVar(&cfg.ModFile, "modfile", "", "") -} - // tagsFlag is the implementation of the -tags flag. type tagsFlag []string diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index dad3b10111..f78020032c 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -252,7 +252,7 @@ func buildModeInit() { switch cfg.BuildMod { case "": - // ok + // Behavior will be determined automatically, as if no flag were passed. case "readonly", "vendor", "mod": if !cfg.ModulesEnabled && !inGOFLAGS("-mod") { base.Fatalf("build flag -mod=%s only valid when using modules", cfg.BuildMod) -- cgit v1.2.3-54-g00ecf From 9214677e7df1e6130249bc83d721130b00d829c4 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 9 Sep 2020 16:41:55 -0400 Subject: cmd/go: refactor modload.Import for better -mod=readonly errors When -mod=readonly is set, Import will now allow imports from replacements without explicit requirements. With -mod=mod, this would add a new requirement but does not trigger a module lookup, so it's determinisitic. Before reporting an error for an unknown import with -mod=readonly, check whether the import is valid. If there's a typo in the import, that's more relevant. For #40728 Change-Id: I05e138ff76ba3d0eb2e3010c15589fa363deb8d3 Reviewed-on: https://go-review.googlesource.com/c/go/+/253745 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Michael Matloob Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modload/import.go | 53 +++++++++++++++++------ src/cmd/go/testdata/script/mod_build_info_err.txt | 2 +- 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index c625184b8b..10b1e7f4b8 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -107,6 +107,25 @@ func (e *AmbiguousImportError) Error() string { var _ load.ImportPathError = &AmbiguousImportError{} +type invalidImportError struct { + importPath string + err error +} + +func (e *invalidImportError) ImportPath() string { + return e.importPath +} + +func (e *invalidImportError) Error() string { + return e.err.Error() +} + +func (e *invalidImportError) Unwrap() error { + return e.err +} + +var _ load.ImportPathError = &invalidImportError{} + // importFromBuildList finds the module and directory in the build list // containing the package with the given import path. The answer must be unique: // importFromBuildList returns an error if multiple modules attempt to provide @@ -207,17 +226,6 @@ func importFromBuildList(ctx context.Context, path string) (m module.Version, di func queryImport(ctx context.Context, path string) (module.Version, error) { pathIsStd := search.IsStandardImportPath(path) - if cfg.BuildMod == "readonly" { - var queryErr error - if !pathIsStd { - if cfg.BuildModReason == "" { - queryErr = fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod) - } else { - queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) - } - } - return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr} - } if modRoot == "" && !allowMissingModuleImports { return module.Version{}, &ImportMissingError{ Path: path, @@ -226,8 +234,9 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { } // Not on build list. - // To avoid spurious remote fetches, next try the latest replacement for each module. - // (golang.org/issue/26241) + // To avoid spurious remote fetches, next try the latest replacement for each + // module (golang.org/issue/26241). This should give a useful message + // in -mod=readonly, and it will allow us to add a requirement with -mod=mod. if modFile != nil { latest := map[string]string{} // path -> version for _, r := range modFile.Replace { @@ -288,6 +297,11 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { } } + // Before any further lookup, check that the path is valid. + if err := module.CheckImportPath(path); err != nil { + return module.Version{}, &invalidImportError{importPath: path, err: err} + } + if pathIsStd { // This package isn't in the standard library, isn't in any module already // in the build list, and isn't in any other module that the user has @@ -299,6 +313,19 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { return module.Version{}, &ImportMissingError{Path: path} } + if cfg.BuildMod == "readonly" { + var queryErr error + if cfg.BuildModExplicit { + queryErr = fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod) + } else if cfg.BuildModReason != "" { + queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) + } + return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr} + } + + // Look up module containing the package, for addition to the build list. + // Goal is to determine the module, download it to dir, + // and return m, dir, ImpportMissingError. fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) candidates, err := QueryPackage(ctx, path, "latest", CheckAllowed) diff --git a/src/cmd/go/testdata/script/mod_build_info_err.txt b/src/cmd/go/testdata/script/mod_build_info_err.txt index 87a099b219..a6853b5c86 100644 --- a/src/cmd/go/testdata/script/mod_build_info_err.txt +++ b/src/cmd/go/testdata/script/mod_build_info_err.txt @@ -2,7 +2,7 @@ # Verifies golang.org/issue/34393. go list -e -deps -f '{{with .Error}}{{.Pos}}: {{.Err}}{{end}}' ./main -stdout 'bad[/\\]bad.go:3:8: malformed module path "🐧.example.com/string": invalid char ''🐧''' +stdout 'bad[/\\]bad.go:3:8: malformed import path "🐧.example.com/string": invalid char ''🐧''' -- go.mod -- module m -- cgit v1.2.3-54-g00ecf From a531bd5a59177dfef354df8b5b5b529a2a55d015 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 8 Sep 2020 20:41:51 -0400 Subject: cmd/internal/objfile: recognize Mach-O __DATA_CONST segment as read-only data Updates #38830. Change-Id: I826c6b0a42bc8e48fcda556250ca4a95c73987eb Reviewed-on: https://go-review.googlesource.com/c/go/+/253918 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh --- src/cmd/internal/objfile/macho.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/internal/objfile/macho.go b/src/cmd/internal/objfile/macho.go index fdb7e76dfc..1d6963f7c4 100644 --- a/src/cmd/internal/objfile/macho.go +++ b/src/cmd/internal/objfile/macho.go @@ -60,7 +60,7 @@ func (f *machoFile) symbols() ([]Sym, error) { } else if int(s.Sect) <= len(f.macho.Sections) { sect := f.macho.Sections[s.Sect-1] switch sect.Seg { - case "__TEXT": + case "__TEXT", "__DATA_CONST": sym.Code = 'R' case "__DATA": sym.Code = 'D' -- cgit v1.2.3-54-g00ecf From ffd95aadcddc34ec2c83971346f04cf7234e0fca Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 3 Sep 2020 12:59:09 -0400 Subject: cmd/link: put read-only data in __DATA_CONST segment On darwin, we put read-only data in __TEXT segment on AMD64 in exe (non-PIE) buildmode, and in __DATA on everywhere else. This is not ideal: things in __DATA segment are not read-only, and being mapped R/W may use more run-time resources. In fact, newer darwin systems support a __DATA_CONST segment, which the dynamic linker will map it read-only after applying relocations. Use that. Fixes #38830. Change-Id: Ic281e6c6ca8ef5fec4bb7c5b71c50dd5393e78ae Reviewed-on: https://go-review.googlesource.com/c/go/+/253919 Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/data.go | 20 ++++++++++++------- src/cmd/link/internal/ld/macho.go | 41 +++++++++++++++++++++++--------------- src/cmd/link/internal/ld/target.go | 6 +++++- 3 files changed, 43 insertions(+), 24 deletions(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 8324a98a26..a730125cf2 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -930,7 +930,7 @@ func writeBlock(ctxt *Link, out *OutBuf, ldr *loader.Loader, syms []loader.Sym, break } if val < addr { - ldr.Errorf(s, "phase error: addr=%#x but sym=%#x type=%d", addr, val, ldr.SymType(s)) + ldr.Errorf(s, "phase error: addr=%#x but sym=%#x type=%v sect=%v", addr, val, ldr.SymType(s), ldr.SymSect(s).Name) errorexit() } if addr < val { @@ -1308,9 +1308,9 @@ func (state *dodataState) makeRelroForSharedLib(target *Link) { // relro Type before it reaches here. isRelro = true case sym.SFUNCTAB: - if target.IsAIX() && ldr.SymName(s) == "runtime.etypes" { + if ldr.SymName(s) == "runtime.etypes" { // runtime.etypes must be at the end of - // the relro datas. + // the relro data. isRelro = true } } @@ -1706,7 +1706,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.ebss", 0), sect) bssGcEnd := state.datsize - int64(sect.Vaddr) - // Emit gcdata for bcc symbols now that symbol values have been assigned. + // Emit gcdata for bss symbols now that symbol values have been assigned. gcsToEmit := []struct { symName string symKind sym.SymKind @@ -1826,13 +1826,16 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { const fallbackPerm = 04 relroSecPerm := fallbackPerm genrelrosecname := func(suffix string) string { + if suffix == "" { + return ".rodata" + } return suffix } seg := segro if ctxt.UseRelro() { segrelro := &Segrelrodata - if ctxt.LinkMode == LinkExternal && ctxt.HeadType != objabi.Haix { + if ctxt.LinkMode == LinkExternal && !ctxt.IsAIX() && !ctxt.IsDarwin() { // Using a separate segment with an external // linker results in some programs moving // their data sections unexpectedly, which @@ -1845,9 +1848,12 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { state.datsize = 0 } - genrelrosecname = func(suffix string) string { - return ".data.rel.ro" + suffix + if !ctxt.IsDarwin() { // We don't need the special names on darwin. + genrelrosecname = func(suffix string) string { + return ".data.rel.ro" + suffix + } } + relroReadOnly := []sym.SymKind{} for _, symnro := range sym.ReadOnly { symn := sym.RelROMap[symnro] diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index f6356729a6..9765ce18d3 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -499,16 +499,7 @@ func machoadddynlib(lib string, linkmode LinkMode) { func machoshbits(ctxt *Link, mseg *MachoSeg, sect *sym.Section, segname string) { buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1) - var msect *MachoSect - if sect.Rwx&1 == 0 && segname != "__DWARF" && (ctxt.Arch.Family == sys.ARM64 || - (ctxt.Arch.Family == sys.AMD64 && ctxt.BuildMode != BuildModeExe)) { - // Darwin external linker on arm64, and on amd64 in c-shared/c-archive buildmode - // complains about absolute relocs in __TEXT, so if the section is not - // executable, put it in __DATA segment. - msect = newMachoSect(mseg, buf, "__DATA") - } else { - msect = newMachoSect(mseg, buf, segname) - } + msect := newMachoSect(mseg, buf, segname) if sect.Rellen > 0 { msect.reloc = uint32(sect.Reloff) @@ -633,13 +624,28 @@ func asmbMacho(ctxt *Link) { machoshbits(ctxt, ms, sect, "__TEXT") } + /* rodata */ + if ctxt.LinkMode != LinkExternal && Segrelrodata.Length > 0 { + ms = newMachoSeg("__DATA_CONST", 20) + ms.vaddr = Segrelrodata.Vaddr + ms.vsize = Segrelrodata.Length + ms.fileoffset = Segrelrodata.Fileoff + ms.filesize = Segrelrodata.Filelen + ms.prot1 = 3 + ms.prot2 = 3 + ms.flag = 0x10 // SG_READ_ONLY + } + + for _, sect := range Segrelrodata.Sections { + machoshbits(ctxt, ms, sect, "__DATA_CONST") + } + /* data */ if ctxt.LinkMode != LinkExternal { - w := int64(Segdata.Length) ms = newMachoSeg("__DATA", 20) - ms.vaddr = uint64(va) + uint64(v) - ms.vsize = uint64(w) - ms.fileoffset = uint64(v) + ms.vaddr = Segdata.Vaddr + ms.vsize = Segdata.Length + ms.fileoffset = Segdata.Fileoff ms.filesize = Segdata.Filelen ms.prot1 = 3 ms.prot2 = 3 @@ -695,7 +701,7 @@ func asmbMacho(ctxt *Link) { if ctxt.LinkMode != LinkExternal { ms := newMachoSeg("__LINKEDIT", 0) - ms.vaddr = uint64(va) + uint64(v) + uint64(Rnd(int64(Segdata.Length), int64(*FlagRound))) + ms.vaddr = uint64(Rnd(int64(Segdata.Vaddr+Segdata.Length), int64(*FlagRound))) ms.vsize = uint64(s1) + uint64(s2) + uint64(s3) + uint64(s4) ms.fileoffset = uint64(linkoff) ms.filesize = ms.vsize @@ -1008,7 +1014,7 @@ func doMachoLink(ctxt *Link) int64 { size := int(ldr.SymSize(s1) + ldr.SymSize(s2) + ldr.SymSize(s3) + ldr.SymSize(s4)) if size > 0 { - linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound)) + Rnd(int64(Segdata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdwarf.Filelen), int64(*FlagRound)) + linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound)) + Rnd(int64(Segrelrodata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdwarf.Filelen), int64(*FlagRound)) ctxt.Out.SeekSet(linkoff) ctxt.Out.Write(ldr.Data(s1)) @@ -1086,6 +1092,9 @@ func machoEmitReloc(ctxt *Link) { for _, sect := range Segtext.Sections[1:] { relocSect(ctxt, sect, ctxt.datap) } + for _, sect := range Segrelrodata.Sections { + relocSect(ctxt, sect, ctxt.datap) + } for _, sect := range Segdata.Sections { relocSect(ctxt, sect, ctxt.datap) } diff --git a/src/cmd/link/internal/ld/target.go b/src/cmd/link/internal/ld/target.go index 102b6c5436..f68de8fff1 100644 --- a/src/cmd/link/internal/ld/target.go +++ b/src/cmd/link/internal/ld/target.go @@ -74,8 +74,12 @@ func (t *Target) IsDynlinkingGo() bool { func (t *Target) UseRelro() bool { switch t.BuildMode { case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePIE, BuildModePlugin: - return t.IsELF || t.HeadType == objabi.Haix + return t.IsELF || t.HeadType == objabi.Haix || t.HeadType == objabi.Hdarwin default: + if t.HeadType == objabi.Hdarwin && t.IsARM64() { + // On darwin/ARM64, everything is PIE. + return true + } return t.linkShared || (t.HeadType == objabi.Haix && t.LinkMode == LinkExternal) } } -- cgit v1.2.3-54-g00ecf From 1ed4f12f4a6b9d783cf9a6fc3a292a433b8539c6 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 10 Sep 2020 11:14:27 -0400 Subject: cmd/link: add a test to test RODATA is indeed read-only Updates #38830. Change-Id: Ie1f6ccef40a773f038aac587dfc26bf70a1a8536 Reviewed-on: https://go-review.googlesource.com/c/go/+/253921 Run-TryBot: Cherry Zhang Reviewed-by: Than McIntosh TryBot-Result: Gobot Gobot --- src/cmd/link/link_test.go | 14 ++++++++++++++ src/cmd/link/testdata/testRO/x.go | 22 ++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 src/cmd/link/testdata/testRO/x.go diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 98798be465..4e60996d8e 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -800,3 +800,17 @@ func TestContentAddressableSymbols(t *testing.T) { t.Errorf("command %s failed: %v\n%s", cmd, err, out) } } + +func TestReadOnly(t *testing.T) { + // Test that read-only data is indeed read-only. + testenv.MustHaveGoBuild(t) + + t.Parallel() + + src := filepath.Join("testdata", "testRO", "x.go") + cmd := exec.Command(testenv.GoToolPath(t), "run", src) + out, err := cmd.CombinedOutput() + if err == nil { + t.Errorf("running test program did not fail. output:\n%s", out) + } +} diff --git a/src/cmd/link/testdata/testRO/x.go b/src/cmd/link/testdata/testRO/x.go new file mode 100644 index 0000000000..d77db6d563 --- /dev/null +++ b/src/cmd/link/testdata/testRO/x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that read-only data is indeed read-only. This +// program attempts to modify read-only data, and it +// should fail. + +package main + +import "unsafe" + +var s = "hello" + +func main() { + println(s) + *(*struct { + p *byte + l int + })(unsafe.Pointer(&s)).p = 'H' + println(s) +} -- cgit v1.2.3-54-g00ecf From b459bc8152210c14b66e23351690ff774cd68d2c Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Fri, 11 Sep 2020 09:31:30 -0400 Subject: cmd/go: make 'go mod download' update go.sum after downloads are complete 'go mod download' calls WriteGoMod once via modload.ListModules when it loads the build list. This saves sums for go.mod files needed by MVS, but the write occurs before any zip files are downloaded. With this change, 'go mod download' calls WriteGoMod again (and thus, modfetch.WriteGoSum) after downloading and verifying module zip files, so the sums of the zip files will be saved, too. Fixes #41341 Change-Id: I7d56754aa255256ed45fd93cb154c2e6ea5f45a9 Reviewed-on: https://go-review.googlesource.com/c/go/+/254357 Run-TryBot: Jay Conrod TryBot-Result: Gobot Gobot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modcmd/download.go | 3 +++ src/cmd/go/testdata/script/mod_download.txt | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 0ea5638e70..6227fd9f33 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -187,4 +187,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } base.ExitIfErrors() } + + // Update go.mod and especially go.sum if needed. + modload.WriteGoMod() } diff --git a/src/cmd/go/testdata/script/mod_download.txt b/src/cmd/go/testdata/script/mod_download.txt index b9bf67cad5..c53bbe4567 100644 --- a/src/cmd/go/testdata/script/mod_download.txt +++ b/src/cmd/go/testdata/script/mod_download.txt @@ -107,6 +107,14 @@ stderr '^go mod download: skipping argument m that resolves to the main module\n go mod download m@latest stderr '^go mod download: skipping argument m@latest that resolves to the main module\n' +# download updates go.mod and populates go.sum +cd update +! exists go.sum +go mod download +grep '^rsc.io/sampler v1.3.0 ' go.sum +go list -m rsc.io/sampler +stdout '^rsc.io/sampler v1.3.0$' + # allow go mod download without go.mod env GO111MODULE=auto rm go.mod @@ -122,3 +130,13 @@ stderr 'get '$GOPROXY -- go.mod -- module m + +-- update/go.mod -- +module m + +go 1.16 + +require ( + rsc.io/quote v1.5.2 + rsc.io/sampler v1.2.1 // older version than in build list +) -- cgit v1.2.3-54-g00ecf From 86ee84c40e2770ff189b6a4d835849107d9c749a Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 2 Sep 2020 13:25:11 -0400 Subject: cmd/go: move get.Insecure to cfg.Insecure to break dependency cycle Change-Id: If9c73ff5adc7e080a48ecc6b35ce40822193d66f Reviewed-on: https://go-review.googlesource.com/c/go/+/254363 Run-TryBot: Jay Conrod Reviewed-by: Bryan C. Mills Reviewed-by: Michael Matloob TryBot-Result: Gobot Gobot --- src/cmd/go/internal/cfg/cfg.go | 2 ++ src/cmd/go/internal/get/get.go | 6 ++---- src/cmd/go/internal/modfetch/insecure.go | 3 +-- src/cmd/go/internal/modfetch/sumdb.go | 3 +-- src/cmd/go/internal/modget/get.go | 6 +++--- 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index f874b880a6..9bf1db73ef 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -49,6 +49,8 @@ var ( ModCacheRW bool // -modcacherw flag ModFile string // -modfile flag + Insecure bool // -insecure flag + CmdName string // "build", "install", "list", "mod tidy", etc. DebugActiongraph string // -debug-actiongraph flag (undocumented, unstable) diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index d0be3fe1e7..9e4825eb37 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -108,14 +108,12 @@ var ( getT = CmdGet.Flag.Bool("t", false, "") getU = CmdGet.Flag.Bool("u", false, "") getFix = CmdGet.Flag.Bool("fix", false, "") - - Insecure bool ) func init() { work.AddBuildFlags(CmdGet, work.OmitModFlag|work.OmitModCommonFlags) CmdGet.Run = runGet // break init loop - CmdGet.Flag.BoolVar(&Insecure, "insecure", Insecure, "") + CmdGet.Flag.BoolVar(&cfg.Insecure, "insecure", cfg.Insecure, "") } func runGet(ctx context.Context, cmd *base.Command, args []string) { @@ -431,7 +429,7 @@ func downloadPackage(p *load.Package) error { return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) } security := web.SecureOnly - if Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) { + if cfg.Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) { security = web.Insecure } diff --git a/src/cmd/go/internal/modfetch/insecure.go b/src/cmd/go/internal/modfetch/insecure.go index b692669cba..012d05f29d 100644 --- a/src/cmd/go/internal/modfetch/insecure.go +++ b/src/cmd/go/internal/modfetch/insecure.go @@ -6,12 +6,11 @@ package modfetch import ( "cmd/go/internal/cfg" - "cmd/go/internal/get" "golang.org/x/mod/module" ) // allowInsecure reports whether we are allowed to fetch this path in an insecure manner. func allowInsecure(path string) bool { - return get.Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, path) + return cfg.Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, path) } diff --git a/src/cmd/go/internal/modfetch/sumdb.go b/src/cmd/go/internal/modfetch/sumdb.go index 783c4a433b..47a2571531 100644 --- a/src/cmd/go/internal/modfetch/sumdb.go +++ b/src/cmd/go/internal/modfetch/sumdb.go @@ -22,7 +22,6 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" - "cmd/go/internal/get" "cmd/go/internal/lockedfile" "cmd/go/internal/web" @@ -33,7 +32,7 @@ import ( // useSumDB reports whether to use the Go checksum database for the given module. func useSumDB(mod module.Version) bool { - return cfg.GOSUMDB != "off" && !get.Insecure && !module.MatchPrefixPatterns(cfg.GONOSUMDB, mod.Path) + return cfg.GOSUMDB != "off" && !cfg.Insecure && !module.MatchPrefixPatterns(cfg.GONOSUMDB, mod.Path) } // lookupSumDB returns the Go checksum database's go.sum lines for the given module, diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index a2a8287d84..829cfe055a 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -17,7 +17,7 @@ import ( "sync" "cmd/go/internal/base" - "cmd/go/internal/get" + "cmd/go/internal/cfg" "cmd/go/internal/imports" "cmd/go/internal/load" "cmd/go/internal/modload" @@ -181,7 +181,7 @@ var ( getM = CmdGet.Flag.Bool("m", false, "") getT = CmdGet.Flag.Bool("t", false, "") getU upgradeFlag - // -insecure is get.Insecure + // -insecure is cfg.Insecure // -v is cfg.BuildV ) @@ -206,7 +206,7 @@ func (v *upgradeFlag) String() string { return "" } func init() { work.AddBuildFlags(CmdGet, work.OmitModFlag) CmdGet.Run = runGet // break init loop - CmdGet.Flag.BoolVar(&get.Insecure, "insecure", get.Insecure, "") + CmdGet.Flag.BoolVar(&cfg.Insecure, "insecure", cfg.Insecure, "") CmdGet.Flag.Var(&getU, "u", "") } -- cgit v1.2.3-54-g00ecf From 07c1788357cfe6a4ee5f6f6a54d4fe9f579fa844 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Wed, 2 Sep 2020 14:53:02 -0400 Subject: cmd/go: move repository resolution from internal/get to internal/vcs This is a refactoring intended to break the dependency from internal/modfetch to internal/get. No change in functionality is intended. Change-Id: If51aba7139cc0b62ecc9ba454c055c99e8f36f0f Reviewed-on: https://go-review.googlesource.com/c/go/+/254364 Run-TryBot: Jay Conrod Reviewed-by: Bryan C. Mills Reviewed-by: Michael Matloob TryBot-Result: Gobot Gobot --- src/cmd/go/internal/get/discovery.go | 97 --- src/cmd/go/internal/get/get.go | 35 +- src/cmd/go/internal/get/pkg_test.go | 131 ---- src/cmd/go/internal/get/vcs.go | 1182 ---------------------------- src/cmd/go/internal/get/vcs_test.go | 475 ------------ src/cmd/go/internal/modfetch/repo.go | 12 +- src/cmd/go/internal/str/str_test.go | 27 + src/cmd/go/internal/vcs/discovery.go | 97 +++ src/cmd/go/internal/vcs/discovery_test.go | 110 +++ src/cmd/go/internal/vcs/vcs.go | 1187 +++++++++++++++++++++++++++++ src/cmd/go/internal/vcs/vcs_test.go | 475 ++++++++++++ 11 files changed, 1920 insertions(+), 1908 deletions(-) delete mode 100644 src/cmd/go/internal/get/discovery.go delete mode 100644 src/cmd/go/internal/get/pkg_test.go delete mode 100644 src/cmd/go/internal/get/vcs.go delete mode 100644 src/cmd/go/internal/get/vcs_test.go create mode 100644 src/cmd/go/internal/str/str_test.go create mode 100644 src/cmd/go/internal/vcs/discovery.go create mode 100644 src/cmd/go/internal/vcs/discovery_test.go create mode 100644 src/cmd/go/internal/vcs/vcs.go create mode 100644 src/cmd/go/internal/vcs/vcs_test.go diff --git a/src/cmd/go/internal/get/discovery.go b/src/cmd/go/internal/get/discovery.go deleted file mode 100644 index afa6ef455f..0000000000 --- a/src/cmd/go/internal/get/discovery.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package get - -import ( - "encoding/xml" - "fmt" - "io" - "strings" -) - -// charsetReader returns a reader that converts from the given charset to UTF-8. -// Currently it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful -// error which is printed by go get, so the user can find why the package -// wasn't downloaded if the encoding is not supported. Note that, in -// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters -// greater than 0x7f are not rejected). -func charsetReader(charset string, input io.Reader) (io.Reader, error) { - switch strings.ToLower(charset) { - case "utf-8", "ascii": - return input, nil - default: - return nil, fmt.Errorf("can't decode XML document using charset %q", charset) - } -} - -// parseMetaGoImports returns meta imports from the HTML in r. -// Parsing ends at the end of the section or the beginning of the . -func parseMetaGoImports(r io.Reader, mod ModuleMode) ([]metaImport, error) { - d := xml.NewDecoder(r) - d.CharsetReader = charsetReader - d.Strict = false - var imports []metaImport - for { - t, err := d.RawToken() - if err != nil { - if err != io.EOF && len(imports) == 0 { - return nil, err - } - break - } - if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { - break - } - if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { - break - } - e, ok := t.(xml.StartElement) - if !ok || !strings.EqualFold(e.Name.Local, "meta") { - continue - } - if attrValue(e.Attr, "name") != "go-import" { - continue - } - if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 { - imports = append(imports, metaImport{ - Prefix: f[0], - VCS: f[1], - RepoRoot: f[2], - }) - } - } - - // Extract mod entries if we are paying attention to them. - var list []metaImport - var have map[string]bool - if mod == PreferMod { - have = make(map[string]bool) - for _, m := range imports { - if m.VCS == "mod" { - have[m.Prefix] = true - list = append(list, m) - } - } - } - - // Append non-mod entries, ignoring those superseded by a mod entry. - for _, m := range imports { - if m.VCS != "mod" && !have[m.Prefix] { - list = append(list, m) - } - } - return list, nil -} - -// attrValue returns the attribute value for the case-insensitive key -// `name', or the empty string if nothing is found. -func attrValue(attrs []xml.Attr, name string) string { - for _, a := range attrs { - if strings.EqualFold(a.Name.Local, name) { - return a.Value - } - } - return "" -} diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index 9e4825eb37..3f7a66384a 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -18,6 +18,7 @@ import ( "cmd/go/internal/load" "cmd/go/internal/search" "cmd/go/internal/str" + "cmd/go/internal/vcs" "cmd/go/internal/web" "cmd/go/internal/work" @@ -406,7 +407,7 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) // to make the first copy of or update a copy of the given package. func downloadPackage(p *load.Package) error { var ( - vcs *vcsCmd + vcsCmd *vcs.Cmd repo, rootPath string err error blindRepo bool // set if the repo has unusual configuration @@ -435,16 +436,16 @@ func downloadPackage(p *load.Package) error { if p.Internal.Build.SrcRoot != "" { // Directory exists. Look for checkout along path to src. - vcs, rootPath, err = vcsFromDir(p.Dir, p.Internal.Build.SrcRoot) + vcsCmd, rootPath, err = vcs.FromDir(p.Dir, p.Internal.Build.SrcRoot) if err != nil { return err } repo = "" // should be unused; make distinctive // Double-check where it came from. - if *getU && vcs.remoteRepo != nil { + if *getU && vcsCmd.RemoteRepo != nil { dir := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath)) - remote, err := vcs.remoteRepo(vcs, dir) + remote, err := vcsCmd.RemoteRepo(vcsCmd, dir) if err != nil { // Proceed anyway. The package is present; we likely just don't understand // the repo configuration (e.g. unusual remote protocol). @@ -452,10 +453,10 @@ func downloadPackage(p *load.Package) error { } repo = remote if !*getF && err == nil { - if rr, err := RepoRootForImportPath(importPrefix, IgnoreMod, security); err == nil { + if rr, err := vcs.RepoRootForImportPath(importPrefix, vcs.IgnoreMod, security); err == nil { repo := rr.Repo - if rr.vcs.resolveRepo != nil { - resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo) + if rr.VCS.ResolveRepo != nil { + resolved, err := rr.VCS.ResolveRepo(rr.VCS, dir, repo) if err == nil { repo = resolved } @@ -469,13 +470,13 @@ func downloadPackage(p *load.Package) error { } else { // Analyze the import path to determine the version control system, // repository, and the import path for the root of the repository. - rr, err := RepoRootForImportPath(importPrefix, IgnoreMod, security) + rr, err := vcs.RepoRootForImportPath(importPrefix, vcs.IgnoreMod, security) if err != nil { return err } - vcs, repo, rootPath = rr.vcs, rr.Repo, rr.Root + vcsCmd, repo, rootPath = rr.VCS, rr.Repo, rr.Root } - if !blindRepo && !vcs.isSecure(repo) && security != web.Insecure { + if !blindRepo && !vcsCmd.IsSecure(repo) && security != web.Insecure { return fmt.Errorf("cannot download, %v uses insecure protocol", repo) } @@ -498,7 +499,7 @@ func downloadPackage(p *load.Package) error { } root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath)) - if err := checkNestedVCS(vcs, root, p.Internal.Build.SrcRoot); err != nil { + if err := vcs.CheckNested(vcsCmd, root, p.Internal.Build.SrcRoot); err != nil { return err } @@ -514,7 +515,7 @@ func downloadPackage(p *load.Package) error { // Check that this is an appropriate place for the repo to be checked out. // The target directory must either not exist or have a repo checked out already. - meta := filepath.Join(root, "."+vcs.cmd) + meta := filepath.Join(root, "."+vcsCmd.Cmd) if _, err := os.Stat(meta); err != nil { // Metadata file or directory does not exist. Prepare to checkout new copy. // Some version control tools require the target directory not to exist. @@ -535,12 +536,12 @@ func downloadPackage(p *load.Package) error { fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.Internal.Build.Root) } - if err = vcs.create(root, repo); err != nil { + if err = vcsCmd.Create(root, repo); err != nil { return err } } else { // Metadata directory does exist; download incremental updates. - if err = vcs.download(root); err != nil { + if err = vcsCmd.Download(root); err != nil { return err } } @@ -549,12 +550,12 @@ func downloadPackage(p *load.Package) error { // Do not show tag sync in -n; it's noise more than anything, // and since we're not running commands, no tag will be found. // But avoid printing nothing. - fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcs.cmd) + fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcsCmd.Cmd) return nil } // Select and sync to appropriate version of the repository. - tags, err := vcs.tags(root) + tags, err := vcsCmd.Tags(root) if err != nil { return err } @@ -562,7 +563,7 @@ func downloadPackage(p *load.Package) error { if i := strings.Index(vers, " "); i >= 0 { vers = vers[:i] } - if err := vcs.tagSync(root, selectTag(vers, tags)); err != nil { + if err := vcsCmd.TagSync(root, selectTag(vers, tags)); err != nil { return err } diff --git a/src/cmd/go/internal/get/pkg_test.go b/src/cmd/go/internal/get/pkg_test.go deleted file mode 100644 index fc6a179c2e..0000000000 --- a/src/cmd/go/internal/get/pkg_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package get - -import ( - "cmd/go/internal/str" - "reflect" - "strings" - "testing" -) - -var foldDupTests = []struct { - list []string - f1, f2 string -}{ - {str.StringList("math/rand", "math/big"), "", ""}, - {str.StringList("math", "strings"), "", ""}, - {str.StringList("strings"), "", ""}, - {str.StringList("strings", "strings"), "strings", "strings"}, - {str.StringList("Rand", "rand", "math", "math/rand", "math/Rand"), "Rand", "rand"}, -} - -func TestFoldDup(t *testing.T) { - for _, tt := range foldDupTests { - f1, f2 := str.FoldDup(tt.list) - if f1 != tt.f1 || f2 != tt.f2 { - t.Errorf("foldDup(%q) = %q, %q, want %q, %q", tt.list, f1, f2, tt.f1, tt.f2) - } - } -} - -var parseMetaGoImportsTests = []struct { - in string - mod ModuleMode - out []metaImport -}{ - { - ``, - IgnoreMod, - []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, - }, - { - ` - `, - IgnoreMod, - []metaImport{ - {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, - {"baz/quux", "git", "http://github.com/rsc/baz/quux"}, - }, - }, - { - ` - `, - IgnoreMod, - []metaImport{ - {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, - }, - }, - { - ` - `, - IgnoreMod, - []metaImport{ - {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, - }, - }, - { - ` - `, - PreferMod, - []metaImport{ - {"foo/bar", "mod", "http://github.com/rsc/baz/quux"}, - }, - }, - { - ` - - `, - IgnoreMod, - []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, - }, - { - ` - `, - IgnoreMod, - []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, - }, - { - ``, - IgnoreMod, - []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, - }, - { - // XML doesn't like
. - `Page Not Found
DRAFT
`, - IgnoreMod, - []metaImport{{"chitin.io/chitin", "git", "https://github.com/chitin-io/chitin"}}, - }, - { - ` - - `, - IgnoreMod, - []metaImport{{"myitcv.io", "git", "https://github.com/myitcv/x"}}, - }, - { - ` - - `, - PreferMod, - []metaImport{ - {"myitcv.io/blah2", "mod", "https://raw.githubusercontent.com/myitcv/pubx/master"}, - {"myitcv.io", "git", "https://github.com/myitcv/x"}, - }, - }, -} - -func TestParseMetaGoImports(t *testing.T) { - for i, tt := range parseMetaGoImportsTests { - out, err := parseMetaGoImports(strings.NewReader(tt.in), tt.mod) - if err != nil { - t.Errorf("test#%d: %v", i, err) - continue - } - if !reflect.DeepEqual(out, tt.out) { - t.Errorf("test#%d:\n\thave %q\n\twant %q", i, out, tt.out) - } - } -} diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go deleted file mode 100644 index 24c32935d0..0000000000 --- a/src/cmd/go/internal/get/vcs.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package get - -import ( - "encoding/json" - "errors" - "fmt" - "internal/lazyregexp" - "internal/singleflight" - "log" - urlpkg "net/url" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - "sync" - - "cmd/go/internal/base" - "cmd/go/internal/cfg" - "cmd/go/internal/load" - "cmd/go/internal/web" -) - -// A vcsCmd describes how to use a version control system -// like Mercurial, Git, or Subversion. -type vcsCmd struct { - name string - cmd string // name of binary to invoke command - - createCmd []string // commands to download a fresh copy of a repository - downloadCmd []string // commands to download updates into an existing repository - - tagCmd []tagCmd // commands to list tags - tagLookupCmd []tagCmd // commands to lookup tags before running tagSyncCmd - tagSyncCmd []string // commands to sync to specific tag - tagSyncDefault []string // commands to sync to default tag - - scheme []string - pingCmd string - - remoteRepo func(v *vcsCmd, rootDir string) (remoteRepo string, err error) - resolveRepo func(v *vcsCmd, rootDir, remoteRepo string) (realRepo string, err error) -} - -var defaultSecureScheme = map[string]bool{ - "https": true, - "git+ssh": true, - "bzr+ssh": true, - "svn+ssh": true, - "ssh": true, -} - -func (v *vcsCmd) isSecure(repo string) bool { - u, err := urlpkg.Parse(repo) - if err != nil { - // If repo is not a URL, it's not secure. - return false - } - return v.isSecureScheme(u.Scheme) -} - -func (v *vcsCmd) isSecureScheme(scheme string) bool { - switch v.cmd { - case "git": - // GIT_ALLOW_PROTOCOL is an environment variable defined by Git. It is a - // colon-separated list of schemes that are allowed to be used with git - // fetch/clone. Any scheme not mentioned will be considered insecure. - if allow := os.Getenv("GIT_ALLOW_PROTOCOL"); allow != "" { - for _, s := range strings.Split(allow, ":") { - if s == scheme { - return true - } - } - return false - } - } - return defaultSecureScheme[scheme] -} - -// A tagCmd describes a command to list available tags -// that can be passed to tagSyncCmd. -type tagCmd struct { - cmd string // command to list tags - pattern string // regexp to extract tags from list -} - -// vcsList lists the known version control systems -var vcsList = []*vcsCmd{ - vcsHg, - vcsGit, - vcsSvn, - vcsBzr, - vcsFossil, -} - -// vcsByCmd returns the version control system for the given -// command name (hg, git, svn, bzr). -func vcsByCmd(cmd string) *vcsCmd { - for _, vcs := range vcsList { - if vcs.cmd == cmd { - return vcs - } - } - return nil -} - -// vcsHg describes how to use Mercurial. -var vcsHg = &vcsCmd{ - name: "Mercurial", - cmd: "hg", - - createCmd: []string{"clone -U -- {repo} {dir}"}, - downloadCmd: []string{"pull"}, - - // We allow both tag and branch names as 'tags' - // for selecting a version. This lets people have - // a go.release.r60 branch and a go1 branch - // and make changes in both, without constantly - // editing .hgtags. - tagCmd: []tagCmd{ - {"tags", `^(\S+)`}, - {"branches", `^(\S+)`}, - }, - tagSyncCmd: []string{"update -r {tag}"}, - tagSyncDefault: []string{"update default"}, - - scheme: []string{"https", "http", "ssh"}, - pingCmd: "identify -- {scheme}://{repo}", - remoteRepo: hgRemoteRepo, -} - -func hgRemoteRepo(vcsHg *vcsCmd, rootDir string) (remoteRepo string, err error) { - out, err := vcsHg.runOutput(rootDir, "paths default") - if err != nil { - return "", err - } - return strings.TrimSpace(string(out)), nil -} - -// vcsGit describes how to use Git. -var vcsGit = &vcsCmd{ - name: "Git", - cmd: "git", - - createCmd: []string{"clone -- {repo} {dir}", "-go-internal-cd {dir} submodule update --init --recursive"}, - downloadCmd: []string{"pull --ff-only", "submodule update --init --recursive"}, - - tagCmd: []tagCmd{ - // tags/xxx matches a git tag named xxx - // origin/xxx matches a git branch named xxx on the default remote repository - {"show-ref", `(?:tags|origin)/(\S+)$`}, - }, - tagLookupCmd: []tagCmd{ - {"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`}, - }, - tagSyncCmd: []string{"checkout {tag}", "submodule update --init --recursive"}, - // both createCmd and downloadCmd update the working dir. - // No need to do more here. We used to 'checkout master' - // but that doesn't work if the default branch is not named master. - // DO NOT add 'checkout master' here. - // See golang.org/issue/9032. - tagSyncDefault: []string{"submodule update --init --recursive"}, - - scheme: []string{"git", "https", "http", "git+ssh", "ssh"}, - - // Leave out the '--' separator in the ls-remote command: git 2.7.4 does not - // support such a separator for that command, and this use should be safe - // without it because the {scheme} value comes from the predefined list above. - // See golang.org/issue/33836. - pingCmd: "ls-remote {scheme}://{repo}", - - remoteRepo: gitRemoteRepo, -} - -// scpSyntaxRe matches the SCP-like addresses used by Git to access -// repositories by SSH. -var scpSyntaxRe = lazyregexp.New(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`) - -func gitRemoteRepo(vcsGit *vcsCmd, rootDir string) (remoteRepo string, err error) { - cmd := "config remote.origin.url" - errParse := errors.New("unable to parse output of git " + cmd) - errRemoteOriginNotFound := errors.New("remote origin not found") - outb, err := vcsGit.run1(rootDir, cmd, nil, false) - if err != nil { - // if it doesn't output any message, it means the config argument is correct, - // but the config value itself doesn't exist - if outb != nil && len(outb) == 0 { - return "", errRemoteOriginNotFound - } - return "", err - } - out := strings.TrimSpace(string(outb)) - - var repoURL *urlpkg.URL - if m := scpSyntaxRe.FindStringSubmatch(out); m != nil { - // Match SCP-like syntax and convert it to a URL. - // Eg, "git@github.com:user/repo" becomes - // "ssh://git@github.com/user/repo". - repoURL = &urlpkg.URL{ - Scheme: "ssh", - User: urlpkg.User(m[1]), - Host: m[2], - Path: m[3], - } - } else { - repoURL, err = urlpkg.Parse(out) - if err != nil { - return "", err - } - } - - // Iterate over insecure schemes too, because this function simply - // reports the state of the repo. If we can't see insecure schemes then - // we can't report the actual repo URL. - for _, s := range vcsGit.scheme { - if repoURL.Scheme == s { - return repoURL.String(), nil - } - } - return "", errParse -} - -// vcsBzr describes how to use Bazaar. -var vcsBzr = &vcsCmd{ - name: "Bazaar", - cmd: "bzr", - - createCmd: []string{"branch -- {repo} {dir}"}, - - // Without --overwrite bzr will not pull tags that changed. - // Replace by --overwrite-tags after http://pad.lv/681792 goes in. - downloadCmd: []string{"pull --overwrite"}, - - tagCmd: []tagCmd{{"tags", `^(\S+)`}}, - tagSyncCmd: []string{"update -r {tag}"}, - tagSyncDefault: []string{"update -r revno:-1"}, - - scheme: []string{"https", "http", "bzr", "bzr+ssh"}, - pingCmd: "info -- {scheme}://{repo}", - remoteRepo: bzrRemoteRepo, - resolveRepo: bzrResolveRepo, -} - -func bzrRemoteRepo(vcsBzr *vcsCmd, rootDir string) (remoteRepo string, err error) { - outb, err := vcsBzr.runOutput(rootDir, "config parent_location") - if err != nil { - return "", err - } - return strings.TrimSpace(string(outb)), nil -} - -func bzrResolveRepo(vcsBzr *vcsCmd, rootDir, remoteRepo string) (realRepo string, err error) { - outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo) - if err != nil { - return "", err - } - out := string(outb) - - // Expect: - // ... - // (branch root|repository branch): - // ... - - found := false - for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} { - i := strings.Index(out, prefix) - if i >= 0 { - out = out[i+len(prefix):] - found = true - break - } - } - if !found { - return "", fmt.Errorf("unable to parse output of bzr info") - } - - i := strings.Index(out, "\n") - if i < 0 { - return "", fmt.Errorf("unable to parse output of bzr info") - } - out = out[:i] - return strings.TrimSpace(out), nil -} - -// vcsSvn describes how to use Subversion. -var vcsSvn = &vcsCmd{ - name: "Subversion", - cmd: "svn", - - createCmd: []string{"checkout -- {repo} {dir}"}, - downloadCmd: []string{"update"}, - - // There is no tag command in subversion. - // The branch information is all in the path names. - - scheme: []string{"https", "http", "svn", "svn+ssh"}, - pingCmd: "info -- {scheme}://{repo}", - remoteRepo: svnRemoteRepo, -} - -func svnRemoteRepo(vcsSvn *vcsCmd, rootDir string) (remoteRepo string, err error) { - outb, err := vcsSvn.runOutput(rootDir, "info") - if err != nil { - return "", err - } - out := string(outb) - - // Expect: - // - // ... - // URL: - // ... - // - // Note that we're not using the Repository Root line, - // because svn allows checking out subtrees. - // The URL will be the URL of the subtree (what we used with 'svn co') - // while the Repository Root may be a much higher parent. - i := strings.Index(out, "\nURL: ") - if i < 0 { - return "", fmt.Errorf("unable to parse output of svn info") - } - out = out[i+len("\nURL: "):] - i = strings.Index(out, "\n") - if i < 0 { - return "", fmt.Errorf("unable to parse output of svn info") - } - out = out[:i] - return strings.TrimSpace(out), nil -} - -// fossilRepoName is the name go get associates with a fossil repository. In the -// real world the file can be named anything. -const fossilRepoName = ".fossil" - -// vcsFossil describes how to use Fossil (fossil-scm.org) -var vcsFossil = &vcsCmd{ - name: "Fossil", - cmd: "fossil", - - createCmd: []string{"-go-internal-mkdir {dir} clone -- {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"}, - downloadCmd: []string{"up"}, - - tagCmd: []tagCmd{{"tag ls", `(.*)`}}, - tagSyncCmd: []string{"up tag:{tag}"}, - tagSyncDefault: []string{"up trunk"}, - - scheme: []string{"https", "http"}, - remoteRepo: fossilRemoteRepo, -} - -func fossilRemoteRepo(vcsFossil *vcsCmd, rootDir string) (remoteRepo string, err error) { - out, err := vcsFossil.runOutput(rootDir, "remote-url") - if err != nil { - return "", err - } - return strings.TrimSpace(string(out)), nil -} - -func (v *vcsCmd) String() string { - return v.name -} - -// run runs the command line cmd in the given directory. -// keyval is a list of key, value pairs. run expands -// instances of {key} in cmd into value, but only after -// splitting cmd into individual arguments. -// If an error occurs, run prints the command line and the -// command's combined stdout+stderr to standard error. -// Otherwise run discards the command's output. -func (v *vcsCmd) run(dir string, cmd string, keyval ...string) error { - _, err := v.run1(dir, cmd, keyval, true) - return err -} - -// runVerboseOnly is like run but only generates error output to standard error in verbose mode. -func (v *vcsCmd) runVerboseOnly(dir string, cmd string, keyval ...string) error { - _, err := v.run1(dir, cmd, keyval, false) - return err -} - -// runOutput is like run but returns the output of the command. -func (v *vcsCmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) { - return v.run1(dir, cmd, keyval, true) -} - -// run1 is the generalized implementation of run and runOutput. -func (v *vcsCmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) { - m := make(map[string]string) - for i := 0; i < len(keyval); i += 2 { - m[keyval[i]] = keyval[i+1] - } - args := strings.Fields(cmdline) - for i, arg := range args { - args[i] = expand(m, arg) - } - - if len(args) >= 2 && args[0] == "-go-internal-mkdir" { - var err error - if filepath.IsAbs(args[1]) { - err = os.Mkdir(args[1], os.ModePerm) - } else { - err = os.Mkdir(filepath.Join(dir, args[1]), os.ModePerm) - } - if err != nil { - return nil, err - } - args = args[2:] - } - - if len(args) >= 2 && args[0] == "-go-internal-cd" { - if filepath.IsAbs(args[1]) { - dir = args[1] - } else { - dir = filepath.Join(dir, args[1]) - } - args = args[2:] - } - - _, err := exec.LookPath(v.cmd) - if err != nil { - fmt.Fprintf(os.Stderr, - "go: missing %s command. See https://golang.org/s/gogetcmd\n", - v.name) - return nil, err - } - - cmd := exec.Command(v.cmd, args...) - cmd.Dir = dir - cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir) - if cfg.BuildX { - fmt.Fprintf(os.Stderr, "cd %s\n", dir) - fmt.Fprintf(os.Stderr, "%s %s\n", v.cmd, strings.Join(args, " ")) - } - out, err := cmd.Output() - if err != nil { - if verbose || cfg.BuildV { - fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.cmd, strings.Join(args, " ")) - if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { - os.Stderr.Write(ee.Stderr) - } else { - fmt.Fprintf(os.Stderr, err.Error()) - } - } - } - return out, err -} - -// ping pings to determine scheme to use. -func (v *vcsCmd) ping(scheme, repo string) error { - return v.runVerboseOnly(".", v.pingCmd, "scheme", scheme, "repo", repo) -} - -// create creates a new copy of repo in dir. -// The parent of dir must exist; dir must not. -func (v *vcsCmd) create(dir, repo string) error { - for _, cmd := range v.createCmd { - if err := v.run(".", cmd, "dir", dir, "repo", repo); err != nil { - return err - } - } - return nil -} - -// download downloads any new changes for the repo in dir. -func (v *vcsCmd) download(dir string) error { - for _, cmd := range v.downloadCmd { - if err := v.run(dir, cmd); err != nil { - return err - } - } - return nil -} - -// tags returns the list of available tags for the repo in dir. -func (v *vcsCmd) tags(dir string) ([]string, error) { - var tags []string - for _, tc := range v.tagCmd { - out, err := v.runOutput(dir, tc.cmd) - if err != nil { - return nil, err - } - re := regexp.MustCompile(`(?m-s)` + tc.pattern) - for _, m := range re.FindAllStringSubmatch(string(out), -1) { - tags = append(tags, m[1]) - } - } - return tags, nil -} - -// tagSync syncs the repo in dir to the named tag, -// which either is a tag returned by tags or is v.tagDefault. -func (v *vcsCmd) tagSync(dir, tag string) error { - if v.tagSyncCmd == nil { - return nil - } - if tag != "" { - for _, tc := range v.tagLookupCmd { - out, err := v.runOutput(dir, tc.cmd, "tag", tag) - if err != nil { - return err - } - re := regexp.MustCompile(`(?m-s)` + tc.pattern) - m := re.FindStringSubmatch(string(out)) - if len(m) > 1 { - tag = m[1] - break - } - } - } - - if tag == "" && v.tagSyncDefault != nil { - for _, cmd := range v.tagSyncDefault { - if err := v.run(dir, cmd); err != nil { - return err - } - } - return nil - } - - for _, cmd := range v.tagSyncCmd { - if err := v.run(dir, cmd, "tag", tag); err != nil { - return err - } - } - return nil -} - -// A vcsPath describes how to convert an import path into a -// version control system and repository name. -type vcsPath struct { - prefix string // prefix this description applies to - regexp *lazyregexp.Regexp // compiled pattern for import path - repo string // repository to use (expand with match of re) - vcs string // version control system to use (expand with match of re) - check func(match map[string]string) error // additional checks - schemelessRepo bool // if true, the repo pattern lacks a scheme -} - -// vcsFromDir inspects dir and its parents to determine the -// version control system and code repository to use. -// On return, root is the import path -// corresponding to the root of the repository. -func vcsFromDir(dir, srcRoot string) (vcs *vcsCmd, root string, err error) { - // Clean and double-check that dir is in (a subdirectory of) srcRoot. - dir = filepath.Clean(dir) - srcRoot = filepath.Clean(srcRoot) - if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { - return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) - } - - var vcsRet *vcsCmd - var rootRet string - - origDir := dir - for len(dir) > len(srcRoot) { - for _, vcs := range vcsList { - if _, err := os.Stat(filepath.Join(dir, "."+vcs.cmd)); err == nil { - root := filepath.ToSlash(dir[len(srcRoot)+1:]) - // Record first VCS we find, but keep looking, - // to detect mistakes like one kind of VCS inside another. - if vcsRet == nil { - vcsRet = vcs - rootRet = root - continue - } - // Allow .git inside .git, which can arise due to submodules. - if vcsRet == vcs && vcs.cmd == "git" { - continue - } - // Otherwise, we have one VCS inside a different VCS. - return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s", - filepath.Join(srcRoot, rootRet), vcsRet.cmd, filepath.Join(srcRoot, root), vcs.cmd) - } - } - - // Move to parent. - ndir := filepath.Dir(dir) - if len(ndir) >= len(dir) { - // Shouldn't happen, but just in case, stop. - break - } - dir = ndir - } - - if vcsRet != nil { - return vcsRet, rootRet, nil - } - - return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir) -} - -// checkNestedVCS checks for an incorrectly-nested VCS-inside-VCS -// situation for dir, checking parents up until srcRoot. -func checkNestedVCS(vcs *vcsCmd, dir, srcRoot string) error { - if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { - return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) - } - - otherDir := dir - for len(otherDir) > len(srcRoot) { - for _, otherVCS := range vcsList { - if _, err := os.Stat(filepath.Join(otherDir, "."+otherVCS.cmd)); err == nil { - // Allow expected vcs in original dir. - if otherDir == dir && otherVCS == vcs { - continue - } - // Allow .git inside .git, which can arise due to submodules. - if otherVCS == vcs && vcs.cmd == "git" { - continue - } - // Otherwise, we have one VCS inside a different VCS. - return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.cmd, otherDir, otherVCS.cmd) - } - } - // Move to parent. - newDir := filepath.Dir(otherDir) - if len(newDir) >= len(otherDir) { - // Shouldn't happen, but just in case, stop. - break - } - otherDir = newDir - } - - return nil -} - -// RepoRoot describes the repository root for a tree of source code. -type RepoRoot struct { - Repo string // repository URL, including scheme - Root string // import path corresponding to root of repo - IsCustom bool // defined by served tags (as opposed to hard-coded pattern) - VCS string // vcs type ("mod", "git", ...) - - vcs *vcsCmd // internal: vcs command access -} - -func httpPrefix(s string) string { - for _, prefix := range [...]string{"http:", "https:"} { - if strings.HasPrefix(s, prefix) { - return prefix - } - } - return "" -} - -// ModuleMode specifies whether to prefer modules when looking up code sources. -type ModuleMode int - -const ( - IgnoreMod ModuleMode = iota - PreferMod -) - -// RepoRootForImportPath analyzes importPath to determine the -// version control system, and code repository to use. -func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { - rr, err := repoRootFromVCSPaths(importPath, security, vcsPaths) - if err == errUnknownSite { - rr, err = repoRootForImportDynamic(importPath, mod, security) - if err != nil { - err = load.ImportErrorf(importPath, "unrecognized import path %q: %v", importPath, err) - } - } - if err != nil { - rr1, err1 := repoRootFromVCSPaths(importPath, security, vcsPathsAfterDynamic) - if err1 == nil { - rr = rr1 - err = nil - } - } - - // Should have been taken care of above, but make sure. - if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") { - // Do not allow wildcards in the repo root. - rr = nil - err = load.ImportErrorf(importPath, "cannot expand ... in %q", importPath) - } - return rr, err -} - -var errUnknownSite = errors.New("dynamic lookup required to find mapping") - -// repoRootFromVCSPaths attempts to map importPath to a repoRoot -// using the mappings defined in vcsPaths. -func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) { - // A common error is to use https://packagepath because that's what - // hg and git require. Diagnose this helpfully. - if prefix := httpPrefix(importPath); prefix != "" { - // The importPath has been cleaned, so has only one slash. The pattern - // ignores the slashes; the error message puts them back on the RHS at least. - return nil, fmt.Errorf("%q not allowed in import path", prefix+"//") - } - for _, srv := range vcsPaths { - if !strings.HasPrefix(importPath, srv.prefix) { - continue - } - m := srv.regexp.FindStringSubmatch(importPath) - if m == nil { - if srv.prefix != "" { - return nil, load.ImportErrorf(importPath, "invalid %s import path %q", srv.prefix, importPath) - } - continue - } - - // Build map of named subexpression matches for expand. - match := map[string]string{ - "prefix": srv.prefix, - "import": importPath, - } - for i, name := range srv.regexp.SubexpNames() { - if name != "" && match[name] == "" { - match[name] = m[i] - } - } - if srv.vcs != "" { - match["vcs"] = expand(match, srv.vcs) - } - if srv.repo != "" { - match["repo"] = expand(match, srv.repo) - } - if srv.check != nil { - if err := srv.check(match); err != nil { - return nil, err - } - } - vcs := vcsByCmd(match["vcs"]) - if vcs == nil { - return nil, fmt.Errorf("unknown version control system %q", match["vcs"]) - } - var repoURL string - if !srv.schemelessRepo { - repoURL = match["repo"] - } else { - scheme := vcs.scheme[0] // default to first scheme - repo := match["repo"] - if vcs.pingCmd != "" { - // If we know how to test schemes, scan to find one. - for _, s := range vcs.scheme { - if security == web.SecureOnly && !vcs.isSecureScheme(s) { - continue - } - if vcs.ping(s, repo) == nil { - scheme = s - break - } - } - } - repoURL = scheme + "://" + repo - } - rr := &RepoRoot{ - Repo: repoURL, - Root: match["root"], - VCS: vcs.cmd, - vcs: vcs, - } - return rr, nil - } - return nil, errUnknownSite -} - -// urlForImportPath returns a partially-populated URL for the given Go import path. -// -// The URL leaves the Scheme field blank so that web.Get will try any scheme -// allowed by the selected security mode. -func urlForImportPath(importPath string) (*urlpkg.URL, error) { - slash := strings.Index(importPath, "/") - if slash < 0 { - slash = len(importPath) - } - host, path := importPath[:slash], importPath[slash:] - if !strings.Contains(host, ".") { - return nil, errors.New("import path does not begin with hostname") - } - if len(path) == 0 { - path = "/" - } - return &urlpkg.URL{Host: host, Path: path, RawQuery: "go-get=1"}, nil -} - -// repoRootForImportDynamic finds a *RepoRoot for a custom domain that's not -// statically known by repoRootForImportPathStatic. -// -// This handles custom import paths like "name.tld/pkg/foo" or just "name.tld". -func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { - url, err := urlForImportPath(importPath) - if err != nil { - return nil, err - } - resp, err := web.Get(security, url) - if err != nil { - msg := "https fetch: %v" - if security == web.Insecure { - msg = "http/" + msg - } - return nil, fmt.Errorf(msg, err) - } - body := resp.Body - defer body.Close() - imports, err := parseMetaGoImports(body, mod) - if len(imports) == 0 { - if respErr := resp.Err(); respErr != nil { - // If the server's status was not OK, prefer to report that instead of - // an XML parse error. - return nil, respErr - } - } - if err != nil { - return nil, fmt.Errorf("parsing %s: %v", importPath, err) - } - // Find the matched meta import. - mmi, err := matchGoImport(imports, importPath) - if err != nil { - if _, ok := err.(ImportMismatchError); !ok { - return nil, fmt.Errorf("parse %s: %v", url, err) - } - return nil, fmt.Errorf("parse %s: no go-import meta tags (%s)", resp.URL, err) - } - if cfg.BuildV { - log.Printf("get %q: found meta tag %#v at %s", importPath, mmi, url) - } - // If the import was "uni.edu/bob/project", which said the - // prefix was "uni.edu" and the RepoRoot was "evilroot.com", - // make sure we don't trust Bob and check out evilroot.com to - // "uni.edu" yet (possibly overwriting/preempting another - // non-evil student). Instead, first verify the root and see - // if it matches Bob's claim. - if mmi.Prefix != importPath { - if cfg.BuildV { - log.Printf("get %q: verifying non-authoritative meta tag", importPath) - } - var imports []metaImport - url, imports, err = metaImportsForPrefix(mmi.Prefix, mod, security) - if err != nil { - return nil, err - } - metaImport2, err := matchGoImport(imports, importPath) - if err != nil || mmi != metaImport2 { - return nil, fmt.Errorf("%s and %s disagree about go-import for %s", resp.URL, url, mmi.Prefix) - } - } - - if err := validateRepoRoot(mmi.RepoRoot); err != nil { - return nil, fmt.Errorf("%s: invalid repo root %q: %v", resp.URL, mmi.RepoRoot, err) - } - vcs := vcsByCmd(mmi.VCS) - if vcs == nil && mmi.VCS != "mod" { - return nil, fmt.Errorf("%s: unknown vcs %q", resp.URL, mmi.VCS) - } - - rr := &RepoRoot{ - Repo: mmi.RepoRoot, - Root: mmi.Prefix, - IsCustom: true, - VCS: mmi.VCS, - vcs: vcs, - } - return rr, nil -} - -// validateRepoRoot returns an error if repoRoot does not seem to be -// a valid URL with scheme. -func validateRepoRoot(repoRoot string) error { - url, err := urlpkg.Parse(repoRoot) - if err != nil { - return err - } - if url.Scheme == "" { - return errors.New("no scheme") - } - if url.Scheme == "file" { - return errors.New("file scheme disallowed") - } - return nil -} - -var fetchGroup singleflight.Group -var ( - fetchCacheMu sync.Mutex - fetchCache = map[string]fetchResult{} // key is metaImportsForPrefix's importPrefix -) - -// metaImportsForPrefix takes a package's root import path as declared in a tag -// and returns its HTML discovery URL and the parsed metaImport lines -// found on the page. -// -// The importPath is of the form "golang.org/x/tools". -// It is an error if no imports are found. -// url will still be valid if err != nil. -// The returned url will be of the form "https://golang.org/x/tools?go-get=1" -func metaImportsForPrefix(importPrefix string, mod ModuleMode, security web.SecurityMode) (*urlpkg.URL, []metaImport, error) { - setCache := func(res fetchResult) (fetchResult, error) { - fetchCacheMu.Lock() - defer fetchCacheMu.Unlock() - fetchCache[importPrefix] = res - return res, nil - } - - resi, _, _ := fetchGroup.Do(importPrefix, func() (resi interface{}, err error) { - fetchCacheMu.Lock() - if res, ok := fetchCache[importPrefix]; ok { - fetchCacheMu.Unlock() - return res, nil - } - fetchCacheMu.Unlock() - - url, err := urlForImportPath(importPrefix) - if err != nil { - return setCache(fetchResult{err: err}) - } - resp, err := web.Get(security, url) - if err != nil { - return setCache(fetchResult{url: url, err: fmt.Errorf("fetching %s: %v", importPrefix, err)}) - } - body := resp.Body - defer body.Close() - imports, err := parseMetaGoImports(body, mod) - if len(imports) == 0 { - if respErr := resp.Err(); respErr != nil { - // If the server's status was not OK, prefer to report that instead of - // an XML parse error. - return setCache(fetchResult{url: url, err: respErr}) - } - } - if err != nil { - return setCache(fetchResult{url: url, err: fmt.Errorf("parsing %s: %v", resp.URL, err)}) - } - if len(imports) == 0 { - err = fmt.Errorf("fetching %s: no go-import meta tag found in %s", importPrefix, resp.URL) - } - return setCache(fetchResult{url: url, imports: imports, err: err}) - }) - res := resi.(fetchResult) - return res.url, res.imports, res.err -} - -type fetchResult struct { - url *urlpkg.URL - imports []metaImport - err error -} - -// metaImport represents the parsed tags from HTML files. -type metaImport struct { - Prefix, VCS, RepoRoot string -} - -// pathPrefix reports whether sub is a prefix of s, -// only considering entire path components. -func pathPrefix(s, sub string) bool { - // strings.HasPrefix is necessary but not sufficient. - if !strings.HasPrefix(s, sub) { - return false - } - // The remainder after the prefix must either be empty or start with a slash. - rem := s[len(sub):] - return rem == "" || rem[0] == '/' -} - -// A ImportMismatchError is returned where metaImport/s are present -// but none match our import path. -type ImportMismatchError struct { - importPath string - mismatches []string // the meta imports that were discarded for not matching our importPath -} - -func (m ImportMismatchError) Error() string { - formattedStrings := make([]string, len(m.mismatches)) - for i, pre := range m.mismatches { - formattedStrings[i] = fmt.Sprintf("meta tag %s did not match import path %s", pre, m.importPath) - } - return strings.Join(formattedStrings, ", ") -} - -// matchGoImport returns the metaImport from imports matching importPath. -// An error is returned if there are multiple matches. -// An ImportMismatchError is returned if none match. -func matchGoImport(imports []metaImport, importPath string) (metaImport, error) { - match := -1 - - errImportMismatch := ImportMismatchError{importPath: importPath} - for i, im := range imports { - if !pathPrefix(importPath, im.Prefix) { - errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix) - continue - } - - if match >= 0 { - if imports[match].VCS == "mod" && im.VCS != "mod" { - // All the mod entries precede all the non-mod entries. - // We have a mod entry and don't care about the rest, - // matching or not. - break - } - return metaImport{}, fmt.Errorf("multiple meta tags match import path %q", importPath) - } - match = i - } - - if match == -1 { - return metaImport{}, errImportMismatch - } - return imports[match], nil -} - -// expand rewrites s to replace {k} with match[k] for each key k in match. -func expand(match map[string]string, s string) string { - // We want to replace each match exactly once, and the result of expansion - // must not depend on the iteration order through the map. - // A strings.Replacer has exactly the properties we're looking for. - oldNew := make([]string, 0, 2*len(match)) - for k, v := range match { - oldNew = append(oldNew, "{"+k+"}", v) - } - return strings.NewReplacer(oldNew...).Replace(s) -} - -// vcsPaths defines the meaning of import paths referring to -// commonly-used VCS hosting sites (github.com/user/dir) -// and import paths referring to a fully-qualified importPath -// containing a VCS type (foo.com/repo.git/dir) -var vcsPaths = []*vcsPath{ - // Github - { - prefix: "github.com/", - regexp: lazyregexp.New(`^(?Pgithub\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`), - vcs: "git", - repo: "https://{root}", - check: noVCSSuffix, - }, - - // Bitbucket - { - prefix: "bitbucket.org/", - regexp: lazyregexp.New(`^(?Pbitbucket\.org/(?P[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`), - repo: "https://{root}", - check: bitbucketVCS, - }, - - // IBM DevOps Services (JazzHub) - { - prefix: "hub.jazz.net/git/", - regexp: lazyregexp.New(`^(?Phub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`), - vcs: "git", - repo: "https://{root}", - check: noVCSSuffix, - }, - - // Git at Apache - { - prefix: "git.apache.org/", - regexp: lazyregexp.New(`^(?Pgit\.apache\.org/[a-z0-9_.\-]+\.git)(/[A-Za-z0-9_.\-]+)*$`), - vcs: "git", - repo: "https://{root}", - }, - - // Git at OpenStack - { - prefix: "git.openstack.org/", - regexp: lazyregexp.New(`^(?Pgit\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/[A-Za-z0-9_.\-]+)*$`), - vcs: "git", - repo: "https://{root}", - }, - - // chiselapp.com for fossil - { - prefix: "chiselapp.com/", - regexp: lazyregexp.New(`^(?Pchiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$`), - vcs: "fossil", - repo: "https://{root}", - }, - - // General syntax for any server. - // Must be last. - { - regexp: lazyregexp.New(`(?P(?P([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\-]+)+?)\.(?Pbzr|fossil|git|hg|svn))(/~?[A-Za-z0-9_.\-]+)*$`), - schemelessRepo: true, - }, -} - -// vcsPathsAfterDynamic gives additional vcsPaths entries -// to try after the dynamic HTML check. -// This gives those sites a chance to introduce tags -// as part of a graceful transition away from the hard-coded logic. -var vcsPathsAfterDynamic = []*vcsPath{ - // Launchpad. See golang.org/issue/11436. - { - prefix: "launchpad.net/", - regexp: lazyregexp.New(`^(?Plaunchpad\.net/((?P[A-Za-z0-9_.\-]+)(?P/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`), - vcs: "bzr", - repo: "https://{root}", - check: launchpadVCS, - }, -} - -// noVCSSuffix checks that the repository name does not -// end in .foo for any version control system foo. -// The usual culprit is ".git". -func noVCSSuffix(match map[string]string) error { - repo := match["repo"] - for _, vcs := range vcsList { - if strings.HasSuffix(repo, "."+vcs.cmd) { - return fmt.Errorf("invalid version control suffix in %s path", match["prefix"]) - } - } - return nil -} - -// bitbucketVCS determines the version control system for a -// Bitbucket repository, by using the Bitbucket API. -func bitbucketVCS(match map[string]string) error { - if err := noVCSSuffix(match); err != nil { - return err - } - - var resp struct { - SCM string `json:"scm"` - } - url := &urlpkg.URL{ - Scheme: "https", - Host: "api.bitbucket.org", - Path: expand(match, "/2.0/repositories/{bitname}"), - RawQuery: "fields=scm", - } - data, err := web.GetBytes(url) - if err != nil { - if httpErr, ok := err.(*web.HTTPError); ok && httpErr.StatusCode == 403 { - // this may be a private repository. If so, attempt to determine which - // VCS it uses. See issue 5375. - root := match["root"] - for _, vcs := range []string{"git", "hg"} { - if vcsByCmd(vcs).ping("https", root) == nil { - resp.SCM = vcs - break - } - } - } - - if resp.SCM == "" { - return err - } - } else { - if err := json.Unmarshal(data, &resp); err != nil { - return fmt.Errorf("decoding %s: %v", url, err) - } - } - - if vcsByCmd(resp.SCM) != nil { - match["vcs"] = resp.SCM - if resp.SCM == "git" { - match["repo"] += ".git" - } - return nil - } - - return fmt.Errorf("unable to detect version control system for bitbucket.org/ path") -} - -// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case, -// "foo" could be a series name registered in Launchpad with its own branch, -// and it could also be the name of a directory within the main project -// branch one level up. -func launchpadVCS(match map[string]string) error { - if match["project"] == "" || match["series"] == "" { - return nil - } - url := &urlpkg.URL{ - Scheme: "https", - Host: "code.launchpad.net", - Path: expand(match, "/{project}{series}/.bzr/branch-format"), - } - _, err := web.GetBytes(url) - if err != nil { - match["root"] = expand(match, "launchpad.net/{project}") - match["repo"] = expand(match, "https://{root}") - } - return nil -} diff --git a/src/cmd/go/internal/get/vcs_test.go b/src/cmd/go/internal/get/vcs_test.go deleted file mode 100644 index 195bc231eb..0000000000 --- a/src/cmd/go/internal/get/vcs_test.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package get - -import ( - "errors" - "internal/testenv" - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - "cmd/go/internal/web" -) - -// Test that RepoRootForImportPath determines the correct RepoRoot for a given importPath. -// TODO(cmang): Add tests for SVN and BZR. -func TestRepoRootForImportPath(t *testing.T) { - testenv.MustHaveExternalNetwork(t) - - tests := []struct { - path string - want *RepoRoot - }{ - { - "github.com/golang/groupcache", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://github.com/golang/groupcache", - }, - }, - // Unicode letters in directories are not valid. - { - "github.com/user/unicode/испытание", - nil, - }, - // IBM DevOps Services tests - { - "hub.jazz.net/git/user1/pkgname", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://hub.jazz.net/git/user1/pkgname", - }, - }, - { - "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://hub.jazz.net/git/user1/pkgname", - }, - }, - { - "hub.jazz.net", - nil, - }, - { - "hubajazz.net", - nil, - }, - { - "hub2.jazz.net", - nil, - }, - { - "hub.jazz.net/someotherprefix", - nil, - }, - { - "hub.jazz.net/someotherprefix/user1/pkgname", - nil, - }, - // Spaces are not valid in user names or package names - { - "hub.jazz.net/git/User 1/pkgname", - nil, - }, - { - "hub.jazz.net/git/user1/pkg name", - nil, - }, - // Dots are not valid in user names - { - "hub.jazz.net/git/user.1/pkgname", - nil, - }, - { - "hub.jazz.net/git/user/pkg.name", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://hub.jazz.net/git/user/pkg.name", - }, - }, - // User names cannot have uppercase letters - { - "hub.jazz.net/git/USER/pkgname", - nil, - }, - // OpenStack tests - { - "git.openstack.org/openstack/swift", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://git.openstack.org/openstack/swift", - }, - }, - // Trailing .git is less preferred but included for - // compatibility purposes while the same source needs to - // be compilable on both old and new go - { - "git.openstack.org/openstack/swift.git", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://git.openstack.org/openstack/swift.git", - }, - }, - { - "git.openstack.org/openstack/swift/go/hummingbird", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://git.openstack.org/openstack/swift", - }, - }, - { - "git.openstack.org", - nil, - }, - { - "git.openstack.org/openstack", - nil, - }, - // Spaces are not valid in package name - { - "git.apache.org/package name/path/to/lib", - nil, - }, - // Should have ".git" suffix - { - "git.apache.org/package-name/path/to/lib", - nil, - }, - { - "gitbapache.org", - nil, - }, - { - "git.apache.org/package-name.git", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://git.apache.org/package-name.git", - }, - }, - { - "git.apache.org/package-name_2.x.git/path/to/lib", - &RepoRoot{ - vcs: vcsGit, - Repo: "https://git.apache.org/package-name_2.x.git", - }, - }, - { - "chiselapp.com/user/kyle/repository/fossilgg", - &RepoRoot{ - vcs: vcsFossil, - Repo: "https://chiselapp.com/user/kyle/repository/fossilgg", - }, - }, - { - // must have a user/$name/repository/$repo path - "chiselapp.com/kyle/repository/fossilgg", - nil, - }, - { - "chiselapp.com/user/kyle/fossilgg", - nil, - }, - } - - for _, test := range tests { - got, err := RepoRootForImportPath(test.path, IgnoreMod, web.SecureOnly) - want := test.want - - if want == nil { - if err == nil { - t.Errorf("RepoRootForImportPath(%q): Error expected but not received", test.path) - } - continue - } - if err != nil { - t.Errorf("RepoRootForImportPath(%q): %v", test.path, err) - continue - } - if got.vcs.name != want.vcs.name || got.Repo != want.Repo { - t.Errorf("RepoRootForImportPath(%q) = VCS(%s) Repo(%s), want VCS(%s) Repo(%s)", test.path, got.vcs, got.Repo, want.vcs, want.Repo) - } - } -} - -// Test that vcsFromDir correctly inspects a given directory and returns the right VCS and root. -func TestFromDir(t *testing.T) { - tempDir, err := ioutil.TempDir("", "vcstest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - for j, vcs := range vcsList { - dir := filepath.Join(tempDir, "example.com", vcs.name, "."+vcs.cmd) - if j&1 == 0 { - err := os.MkdirAll(dir, 0755) - if err != nil { - t.Fatal(err) - } - } else { - err := os.MkdirAll(filepath.Dir(dir), 0755) - if err != nil { - t.Fatal(err) - } - f, err := os.Create(dir) - if err != nil { - t.Fatal(err) - } - f.Close() - } - - want := RepoRoot{ - vcs: vcs, - Root: path.Join("example.com", vcs.name), - } - var got RepoRoot - got.vcs, got.Root, err = vcsFromDir(dir, tempDir) - if err != nil { - t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err) - continue - } - if got.vcs.name != want.vcs.name || got.Root != want.Root { - t.Errorf("FromDir(%q, %q) = VCS(%s) Root(%s), want VCS(%s) Root(%s)", dir, tempDir, got.vcs, got.Root, want.vcs, want.Root) - } - } -} - -func TestIsSecure(t *testing.T) { - tests := []struct { - vcs *vcsCmd - url string - secure bool - }{ - {vcsGit, "http://example.com/foo.git", false}, - {vcsGit, "https://example.com/foo.git", true}, - {vcsBzr, "http://example.com/foo.bzr", false}, - {vcsBzr, "https://example.com/foo.bzr", true}, - {vcsSvn, "http://example.com/svn", false}, - {vcsSvn, "https://example.com/svn", true}, - {vcsHg, "http://example.com/foo.hg", false}, - {vcsHg, "https://example.com/foo.hg", true}, - {vcsGit, "ssh://user@example.com/foo.git", true}, - {vcsGit, "user@server:path/to/repo.git", false}, - {vcsGit, "user@server:", false}, - {vcsGit, "server:repo.git", false}, - {vcsGit, "server:path/to/repo.git", false}, - {vcsGit, "example.com:path/to/repo.git", false}, - {vcsGit, "path/that/contains/a:colon/repo.git", false}, - {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, - {vcsFossil, "http://example.com/foo", false}, - {vcsFossil, "https://example.com/foo", true}, - } - - for _, test := range tests { - secure := test.vcs.isSecure(test.url) - if secure != test.secure { - t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure) - } - } -} - -func TestIsSecureGitAllowProtocol(t *testing.T) { - tests := []struct { - vcs *vcsCmd - url string - secure bool - }{ - // Same as TestIsSecure to verify same behavior. - {vcsGit, "http://example.com/foo.git", false}, - {vcsGit, "https://example.com/foo.git", true}, - {vcsBzr, "http://example.com/foo.bzr", false}, - {vcsBzr, "https://example.com/foo.bzr", true}, - {vcsSvn, "http://example.com/svn", false}, - {vcsSvn, "https://example.com/svn", true}, - {vcsHg, "http://example.com/foo.hg", false}, - {vcsHg, "https://example.com/foo.hg", true}, - {vcsGit, "user@server:path/to/repo.git", false}, - {vcsGit, "user@server:", false}, - {vcsGit, "server:repo.git", false}, - {vcsGit, "server:path/to/repo.git", false}, - {vcsGit, "example.com:path/to/repo.git", false}, - {vcsGit, "path/that/contains/a:colon/repo.git", false}, - {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, - // New behavior. - {vcsGit, "ssh://user@example.com/foo.git", false}, - {vcsGit, "foo://example.com/bar.git", true}, - {vcsHg, "foo://example.com/bar.hg", false}, - {vcsSvn, "foo://example.com/svn", false}, - {vcsBzr, "foo://example.com/bar.bzr", false}, - } - - defer os.Unsetenv("GIT_ALLOW_PROTOCOL") - os.Setenv("GIT_ALLOW_PROTOCOL", "https:foo") - for _, test := range tests { - secure := test.vcs.isSecure(test.url) - if secure != test.secure { - t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure) - } - } -} - -func TestMatchGoImport(t *testing.T) { - tests := []struct { - imports []metaImport - path string - mi metaImport - err error - }{ - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo", - mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo/", - mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo", - mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/fooa", - mi: metaImport{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo/bar", - err: errors.New("should not be allowed to create nested repo"), - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo/bar/baz", - err: errors.New("should not be allowed to create nested repo"), - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo/bar/baz/qux", - err: errors.New("should not be allowed to create nested repo"), - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com/user/foo/bar/baz/", - err: errors.New("should not be allowed to create nested repo"), - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "example.com", - err: errors.New("pathologically short path"), - }, - { - imports: []metaImport{ - {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, - }, - path: "different.example.com/user/foo", - err: errors.New("meta tags do not match import path"), - }, - { - imports: []metaImport{ - {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, - {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, - }, - path: "myitcv.io/blah2/foo", - mi: metaImport{Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, - }, - { - imports: []metaImport{ - {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, - {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, - }, - path: "myitcv.io/other", - mi: metaImport{Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, - }, - } - - for _, test := range tests { - mi, err := matchGoImport(test.imports, test.path) - if mi != test.mi { - t.Errorf("unexpected metaImport; got %v, want %v", mi, test.mi) - } - - got := err - want := test.err - if (got == nil) != (want == nil) { - t.Errorf("unexpected error; got %v, want %v", got, want) - } - } -} - -func TestValidateRepoRoot(t *testing.T) { - tests := []struct { - root string - ok bool - }{ - { - root: "", - ok: false, - }, - { - root: "http://", - ok: true, - }, - { - root: "git+ssh://", - ok: true, - }, - { - root: "http#://", - ok: false, - }, - { - root: "-config", - ok: false, - }, - { - root: "-config://", - ok: false, - }, - } - - for _, test := range tests { - err := validateRepoRoot(test.root) - ok := err == nil - if ok != test.ok { - want := "error" - if test.ok { - want = "nil" - } - t.Errorf("validateRepoRoot(%q) = %q, want %s", test.root, err, want) - } - } -} diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go index 34f805d58a..eed4dd4258 100644 --- a/src/cmd/go/internal/modfetch/repo.go +++ b/src/cmd/go/internal/modfetch/repo.go @@ -13,9 +13,9 @@ import ( "time" "cmd/go/internal/cfg" - "cmd/go/internal/get" "cmd/go/internal/modfetch/codehost" "cmd/go/internal/par" + "cmd/go/internal/vcs" web "cmd/go/internal/web" "golang.org/x/mod/module" @@ -261,13 +261,13 @@ func lookupDirect(path string) (Repo, error) { if allowInsecure(path) { security = web.Insecure } - rr, err := get.RepoRootForImportPath(path, get.PreferMod, security) + rr, err := vcs.RepoRootForImportPath(path, vcs.PreferMod, security) if err != nil { // We don't know where to find code for a module with this path. return nil, notExistError{err: err} } - if rr.VCS == "mod" { + if rr.VCS.Name == "mod" { // Fetch module from proxy with base URL rr.Repo. return newProxyRepo(rr.Repo, path) } @@ -279,8 +279,8 @@ func lookupDirect(path string) (Repo, error) { return newCodeRepo(code, rr.Root, path) } -func lookupCodeRepo(rr *get.RepoRoot) (codehost.Repo, error) { - code, err := codehost.NewRepo(rr.VCS, rr.Repo) +func lookupCodeRepo(rr *vcs.RepoRoot) (codehost.Repo, error) { + code, err := codehost.NewRepo(rr.VCS.Cmd, rr.Repo) if err != nil { if _, ok := err.(*codehost.VCSError); ok { return nil, err @@ -306,7 +306,7 @@ func ImportRepoRev(path, rev string) (Repo, *RevInfo, error) { if allowInsecure(path) { security = web.Insecure } - rr, err := get.RepoRootForImportPath(path, get.IgnoreMod, security) + rr, err := vcs.RepoRootForImportPath(path, vcs.IgnoreMod, security) if err != nil { return nil, nil, err } diff --git a/src/cmd/go/internal/str/str_test.go b/src/cmd/go/internal/str/str_test.go new file mode 100644 index 0000000000..147ce1a63e --- /dev/null +++ b/src/cmd/go/internal/str/str_test.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package str + +import "testing" + +var foldDupTests = []struct { + list []string + f1, f2 string +}{ + {StringList("math/rand", "math/big"), "", ""}, + {StringList("math", "strings"), "", ""}, + {StringList("strings"), "", ""}, + {StringList("strings", "strings"), "strings", "strings"}, + {StringList("Rand", "rand", "math", "math/rand", "math/Rand"), "Rand", "rand"}, +} + +func TestFoldDup(t *testing.T) { + for _, tt := range foldDupTests { + f1, f2 := FoldDup(tt.list) + if f1 != tt.f1 || f2 != tt.f2 { + t.Errorf("foldDup(%q) = %q, %q, want %q, %q", tt.list, f1, f2, tt.f1, tt.f2) + } + } +} diff --git a/src/cmd/go/internal/vcs/discovery.go b/src/cmd/go/internal/vcs/discovery.go new file mode 100644 index 0000000000..327b44cb9a --- /dev/null +++ b/src/cmd/go/internal/vcs/discovery.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "encoding/xml" + "fmt" + "io" + "strings" +) + +// charsetReader returns a reader that converts from the given charset to UTF-8. +// Currently it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "utf-8", "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} + +// parseMetaGoImports returns meta imports from the HTML in r. +// Parsing ends at the end of the section or the beginning of the . +func parseMetaGoImports(r io.Reader, mod ModuleMode) ([]metaImport, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var imports []metaImport + for { + t, err := d.RawToken() + if err != nil { + if err != io.EOF && len(imports) == 0 { + return nil, err + } + break + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + break + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + break + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "go-import" { + continue + } + if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 { + imports = append(imports, metaImport{ + Prefix: f[0], + VCS: f[1], + RepoRoot: f[2], + }) + } + } + + // Extract mod entries if we are paying attention to them. + var list []metaImport + var have map[string]bool + if mod == PreferMod { + have = make(map[string]bool) + for _, m := range imports { + if m.VCS == "mod" { + have[m.Prefix] = true + list = append(list, m) + } + } + } + + // Append non-mod entries, ignoring those superseded by a mod entry. + for _, m := range imports { + if m.VCS != "mod" && !have[m.Prefix] { + list = append(list, m) + } + } + return list, nil +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} diff --git a/src/cmd/go/internal/vcs/discovery_test.go b/src/cmd/go/internal/vcs/discovery_test.go new file mode 100644 index 0000000000..eb99fdf64c --- /dev/null +++ b/src/cmd/go/internal/vcs/discovery_test.go @@ -0,0 +1,110 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "reflect" + "strings" + "testing" +) + +var parseMetaGoImportsTests = []struct { + in string + mod ModuleMode + out []metaImport +}{ + { + ``, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + ` + `, + IgnoreMod, + []metaImport{ + {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, + {"baz/quux", "git", "http://github.com/rsc/baz/quux"}, + }, + }, + { + ` + `, + IgnoreMod, + []metaImport{ + {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, + }, + }, + { + ` + `, + IgnoreMod, + []metaImport{ + {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, + }, + }, + { + ` + `, + PreferMod, + []metaImport{ + {"foo/bar", "mod", "http://github.com/rsc/baz/quux"}, + }, + }, + { + ` + + `, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + ` + `, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + ``, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + // XML doesn't like
. + `Page Not Found
DRAFT
`, + IgnoreMod, + []metaImport{{"chitin.io/chitin", "git", "https://github.com/chitin-io/chitin"}}, + }, + { + ` + + `, + IgnoreMod, + []metaImport{{"myitcv.io", "git", "https://github.com/myitcv/x"}}, + }, + { + ` + + `, + PreferMod, + []metaImport{ + {"myitcv.io/blah2", "mod", "https://raw.githubusercontent.com/myitcv/pubx/master"}, + {"myitcv.io", "git", "https://github.com/myitcv/x"}, + }, + }, +} + +func TestParseMetaGoImports(t *testing.T) { + for i, tt := range parseMetaGoImportsTests { + out, err := parseMetaGoImports(strings.NewReader(tt.in), tt.mod) + if err != nil { + t.Errorf("test#%d: %v", i, err) + continue + } + if !reflect.DeepEqual(out, tt.out) { + t.Errorf("test#%d:\n\thave %q\n\twant %q", i, out, tt.out) + } + } +} diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go new file mode 100644 index 0000000000..e535998d89 --- /dev/null +++ b/src/cmd/go/internal/vcs/vcs.go @@ -0,0 +1,1187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "encoding/json" + "errors" + "fmt" + "internal/lazyregexp" + "internal/singleflight" + "log" + urlpkg "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/web" +) + +// A vcsCmd describes how to use a version control system +// like Mercurial, Git, or Subversion. +type Cmd struct { + Name string + Cmd string // name of binary to invoke command + + CreateCmd []string // commands to download a fresh copy of a repository + DownloadCmd []string // commands to download updates into an existing repository + + TagCmd []tagCmd // commands to list tags + TagLookupCmd []tagCmd // commands to lookup tags before running tagSyncCmd + TagSyncCmd []string // commands to sync to specific tag + TagSyncDefault []string // commands to sync to default tag + + Scheme []string + PingCmd string + + RemoteRepo func(v *Cmd, rootDir string) (remoteRepo string, err error) + ResolveRepo func(v *Cmd, rootDir, remoteRepo string) (realRepo string, err error) +} + +var defaultSecureScheme = map[string]bool{ + "https": true, + "git+ssh": true, + "bzr+ssh": true, + "svn+ssh": true, + "ssh": true, +} + +func (v *Cmd) IsSecure(repo string) bool { + u, err := urlpkg.Parse(repo) + if err != nil { + // If repo is not a URL, it's not secure. + return false + } + return v.isSecureScheme(u.Scheme) +} + +func (v *Cmd) isSecureScheme(scheme string) bool { + switch v.Cmd { + case "git": + // GIT_ALLOW_PROTOCOL is an environment variable defined by Git. It is a + // colon-separated list of schemes that are allowed to be used with git + // fetch/clone. Any scheme not mentioned will be considered insecure. + if allow := os.Getenv("GIT_ALLOW_PROTOCOL"); allow != "" { + for _, s := range strings.Split(allow, ":") { + if s == scheme { + return true + } + } + return false + } + } + return defaultSecureScheme[scheme] +} + +// A tagCmd describes a command to list available tags +// that can be passed to tagSyncCmd. +type tagCmd struct { + cmd string // command to list tags + pattern string // regexp to extract tags from list +} + +// vcsList lists the known version control systems +var vcsList = []*Cmd{ + vcsHg, + vcsGit, + vcsSvn, + vcsBzr, + vcsFossil, +} + +// vcsMod is a stub for the "mod" scheme. It's returned by +// repoRootForImportPathDynamic, but is otherwise not treated as a VCS command. +var vcsMod = &Cmd{Name: "mod"} + +// vcsByCmd returns the version control system for the given +// command name (hg, git, svn, bzr). +func vcsByCmd(cmd string) *Cmd { + for _, vcs := range vcsList { + if vcs.Cmd == cmd { + return vcs + } + } + return nil +} + +// vcsHg describes how to use Mercurial. +var vcsHg = &Cmd{ + Name: "Mercurial", + Cmd: "hg", + + CreateCmd: []string{"clone -U -- {repo} {dir}"}, + DownloadCmd: []string{"pull"}, + + // We allow both tag and branch names as 'tags' + // for selecting a version. This lets people have + // a go.release.r60 branch and a go1 branch + // and make changes in both, without constantly + // editing .hgtags. + TagCmd: []tagCmd{ + {"tags", `^(\S+)`}, + {"branches", `^(\S+)`}, + }, + TagSyncCmd: []string{"update -r {tag}"}, + TagSyncDefault: []string{"update default"}, + + Scheme: []string{"https", "http", "ssh"}, + PingCmd: "identify -- {scheme}://{repo}", + RemoteRepo: hgRemoteRepo, +} + +func hgRemoteRepo(vcsHg *Cmd, rootDir string) (remoteRepo string, err error) { + out, err := vcsHg.runOutput(rootDir, "paths default") + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +// vcsGit describes how to use Git. +var vcsGit = &Cmd{ + Name: "Git", + Cmd: "git", + + CreateCmd: []string{"clone -- {repo} {dir}", "-go-internal-cd {dir} submodule update --init --recursive"}, + DownloadCmd: []string{"pull --ff-only", "submodule update --init --recursive"}, + + TagCmd: []tagCmd{ + // tags/xxx matches a git tag named xxx + // origin/xxx matches a git branch named xxx on the default remote repository + {"show-ref", `(?:tags|origin)/(\S+)$`}, + }, + TagLookupCmd: []tagCmd{ + {"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`}, + }, + TagSyncCmd: []string{"checkout {tag}", "submodule update --init --recursive"}, + // both createCmd and downloadCmd update the working dir. + // No need to do more here. We used to 'checkout master' + // but that doesn't work if the default branch is not named master. + // DO NOT add 'checkout master' here. + // See golang.org/issue/9032. + TagSyncDefault: []string{"submodule update --init --recursive"}, + + Scheme: []string{"git", "https", "http", "git+ssh", "ssh"}, + + // Leave out the '--' separator in the ls-remote command: git 2.7.4 does not + // support such a separator for that command, and this use should be safe + // without it because the {scheme} value comes from the predefined list above. + // See golang.org/issue/33836. + PingCmd: "ls-remote {scheme}://{repo}", + + RemoteRepo: gitRemoteRepo, +} + +// scpSyntaxRe matches the SCP-like addresses used by Git to access +// repositories by SSH. +var scpSyntaxRe = lazyregexp.New(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`) + +func gitRemoteRepo(vcsGit *Cmd, rootDir string) (remoteRepo string, err error) { + cmd := "config remote.origin.url" + errParse := errors.New("unable to parse output of git " + cmd) + errRemoteOriginNotFound := errors.New("remote origin not found") + outb, err := vcsGit.run1(rootDir, cmd, nil, false) + if err != nil { + // if it doesn't output any message, it means the config argument is correct, + // but the config value itself doesn't exist + if outb != nil && len(outb) == 0 { + return "", errRemoteOriginNotFound + } + return "", err + } + out := strings.TrimSpace(string(outb)) + + var repoURL *urlpkg.URL + if m := scpSyntaxRe.FindStringSubmatch(out); m != nil { + // Match SCP-like syntax and convert it to a URL. + // Eg, "git@github.com:user/repo" becomes + // "ssh://git@github.com/user/repo". + repoURL = &urlpkg.URL{ + Scheme: "ssh", + User: urlpkg.User(m[1]), + Host: m[2], + Path: m[3], + } + } else { + repoURL, err = urlpkg.Parse(out) + if err != nil { + return "", err + } + } + + // Iterate over insecure schemes too, because this function simply + // reports the state of the repo. If we can't see insecure schemes then + // we can't report the actual repo URL. + for _, s := range vcsGit.Scheme { + if repoURL.Scheme == s { + return repoURL.String(), nil + } + } + return "", errParse +} + +// vcsBzr describes how to use Bazaar. +var vcsBzr = &Cmd{ + Name: "Bazaar", + Cmd: "bzr", + + CreateCmd: []string{"branch -- {repo} {dir}"}, + + // Without --overwrite bzr will not pull tags that changed. + // Replace by --overwrite-tags after http://pad.lv/681792 goes in. + DownloadCmd: []string{"pull --overwrite"}, + + TagCmd: []tagCmd{{"tags", `^(\S+)`}}, + TagSyncCmd: []string{"update -r {tag}"}, + TagSyncDefault: []string{"update -r revno:-1"}, + + Scheme: []string{"https", "http", "bzr", "bzr+ssh"}, + PingCmd: "info -- {scheme}://{repo}", + RemoteRepo: bzrRemoteRepo, + ResolveRepo: bzrResolveRepo, +} + +func bzrRemoteRepo(vcsBzr *Cmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsBzr.runOutput(rootDir, "config parent_location") + if err != nil { + return "", err + } + return strings.TrimSpace(string(outb)), nil +} + +func bzrResolveRepo(vcsBzr *Cmd, rootDir, remoteRepo string) (realRepo string, err error) { + outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo) + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // ... + // (branch root|repository branch): + // ... + + found := false + for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} { + i := strings.Index(out, prefix) + if i >= 0 { + out = out[i+len(prefix):] + found = true + break + } + } + if !found { + return "", fmt.Errorf("unable to parse output of bzr info") + } + + i := strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of bzr info") + } + out = out[:i] + return strings.TrimSpace(out), nil +} + +// vcsSvn describes how to use Subversion. +var vcsSvn = &Cmd{ + Name: "Subversion", + Cmd: "svn", + + CreateCmd: []string{"checkout -- {repo} {dir}"}, + DownloadCmd: []string{"update"}, + + // There is no tag command in subversion. + // The branch information is all in the path names. + + Scheme: []string{"https", "http", "svn", "svn+ssh"}, + PingCmd: "info -- {scheme}://{repo}", + RemoteRepo: svnRemoteRepo, +} + +func svnRemoteRepo(vcsSvn *Cmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsSvn.runOutput(rootDir, "info") + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // + // ... + // URL: + // ... + // + // Note that we're not using the Repository Root line, + // because svn allows checking out subtrees. + // The URL will be the URL of the subtree (what we used with 'svn co') + // while the Repository Root may be a much higher parent. + i := strings.Index(out, "\nURL: ") + if i < 0 { + return "", fmt.Errorf("unable to parse output of svn info") + } + out = out[i+len("\nURL: "):] + i = strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of svn info") + } + out = out[:i] + return strings.TrimSpace(out), nil +} + +// fossilRepoName is the name go get associates with a fossil repository. In the +// real world the file can be named anything. +const fossilRepoName = ".fossil" + +// vcsFossil describes how to use Fossil (fossil-scm.org) +var vcsFossil = &Cmd{ + Name: "Fossil", + Cmd: "fossil", + + CreateCmd: []string{"-go-internal-mkdir {dir} clone -- {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"}, + DownloadCmd: []string{"up"}, + + TagCmd: []tagCmd{{"tag ls", `(.*)`}}, + TagSyncCmd: []string{"up tag:{tag}"}, + TagSyncDefault: []string{"up trunk"}, + + Scheme: []string{"https", "http"}, + RemoteRepo: fossilRemoteRepo, +} + +func fossilRemoteRepo(vcsFossil *Cmd, rootDir string) (remoteRepo string, err error) { + out, err := vcsFossil.runOutput(rootDir, "remote-url") + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +func (v *Cmd) String() string { + return v.Name +} + +// run runs the command line cmd in the given directory. +// keyval is a list of key, value pairs. run expands +// instances of {key} in cmd into value, but only after +// splitting cmd into individual arguments. +// If an error occurs, run prints the command line and the +// command's combined stdout+stderr to standard error. +// Otherwise run discards the command's output. +func (v *Cmd) run(dir string, cmd string, keyval ...string) error { + _, err := v.run1(dir, cmd, keyval, true) + return err +} + +// runVerboseOnly is like run but only generates error output to standard error in verbose mode. +func (v *Cmd) runVerboseOnly(dir string, cmd string, keyval ...string) error { + _, err := v.run1(dir, cmd, keyval, false) + return err +} + +// runOutput is like run but returns the output of the command. +func (v *Cmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) { + return v.run1(dir, cmd, keyval, true) +} + +// run1 is the generalized implementation of run and runOutput. +func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) { + m := make(map[string]string) + for i := 0; i < len(keyval); i += 2 { + m[keyval[i]] = keyval[i+1] + } + args := strings.Fields(cmdline) + for i, arg := range args { + args[i] = expand(m, arg) + } + + if len(args) >= 2 && args[0] == "-go-internal-mkdir" { + var err error + if filepath.IsAbs(args[1]) { + err = os.Mkdir(args[1], os.ModePerm) + } else { + err = os.Mkdir(filepath.Join(dir, args[1]), os.ModePerm) + } + if err != nil { + return nil, err + } + args = args[2:] + } + + if len(args) >= 2 && args[0] == "-go-internal-cd" { + if filepath.IsAbs(args[1]) { + dir = args[1] + } else { + dir = filepath.Join(dir, args[1]) + } + args = args[2:] + } + + _, err := exec.LookPath(v.Cmd) + if err != nil { + fmt.Fprintf(os.Stderr, + "go: missing %s command. See https://golang.org/s/gogetcmd\n", + v.Name) + return nil, err + } + + cmd := exec.Command(v.Cmd, args...) + cmd.Dir = dir + cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir) + if cfg.BuildX { + fmt.Fprintf(os.Stderr, "cd %s\n", dir) + fmt.Fprintf(os.Stderr, "%s %s\n", v.Cmd, strings.Join(args, " ")) + } + out, err := cmd.Output() + if err != nil { + if verbose || cfg.BuildV { + fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " ")) + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + os.Stderr.Write(ee.Stderr) + } else { + fmt.Fprintf(os.Stderr, err.Error()) + } + } + } + return out, err +} + +// Ping pings to determine scheme to use. +func (v *Cmd) Ping(scheme, repo string) error { + return v.runVerboseOnly(".", v.PingCmd, "scheme", scheme, "repo", repo) +} + +// Create creates a new copy of repo in dir. +// The parent of dir must exist; dir must not. +func (v *Cmd) Create(dir, repo string) error { + for _, cmd := range v.CreateCmd { + if err := v.run(".", cmd, "dir", dir, "repo", repo); err != nil { + return err + } + } + return nil +} + +// Download downloads any new changes for the repo in dir. +func (v *Cmd) Download(dir string) error { + for _, cmd := range v.DownloadCmd { + if err := v.run(dir, cmd); err != nil { + return err + } + } + return nil +} + +// Tags returns the list of available tags for the repo in dir. +func (v *Cmd) Tags(dir string) ([]string, error) { + var tags []string + for _, tc := range v.TagCmd { + out, err := v.runOutput(dir, tc.cmd) + if err != nil { + return nil, err + } + re := regexp.MustCompile(`(?m-s)` + tc.pattern) + for _, m := range re.FindAllStringSubmatch(string(out), -1) { + tags = append(tags, m[1]) + } + } + return tags, nil +} + +// tagSync syncs the repo in dir to the named tag, +// which either is a tag returned by tags or is v.tagDefault. +func (v *Cmd) TagSync(dir, tag string) error { + if v.TagSyncCmd == nil { + return nil + } + if tag != "" { + for _, tc := range v.TagLookupCmd { + out, err := v.runOutput(dir, tc.cmd, "tag", tag) + if err != nil { + return err + } + re := regexp.MustCompile(`(?m-s)` + tc.pattern) + m := re.FindStringSubmatch(string(out)) + if len(m) > 1 { + tag = m[1] + break + } + } + } + + if tag == "" && v.TagSyncDefault != nil { + for _, cmd := range v.TagSyncDefault { + if err := v.run(dir, cmd); err != nil { + return err + } + } + return nil + } + + for _, cmd := range v.TagSyncCmd { + if err := v.run(dir, cmd, "tag", tag); err != nil { + return err + } + } + return nil +} + +// A vcsPath describes how to convert an import path into a +// version control system and repository name. +type vcsPath struct { + prefix string // prefix this description applies to + regexp *lazyregexp.Regexp // compiled pattern for import path + repo string // repository to use (expand with match of re) + vcs string // version control system to use (expand with match of re) + check func(match map[string]string) error // additional checks + schemelessRepo bool // if true, the repo pattern lacks a scheme +} + +// FromDir inspects dir and its parents to determine the +// version control system and code repository to use. +// On return, root is the import path +// corresponding to the root of the repository. +func FromDir(dir, srcRoot string) (vcs *Cmd, root string, err error) { + // Clean and double-check that dir is in (a subdirectory of) srcRoot. + dir = filepath.Clean(dir) + srcRoot = filepath.Clean(srcRoot) + if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { + return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) + } + + var vcsRet *Cmd + var rootRet string + + origDir := dir + for len(dir) > len(srcRoot) { + for _, vcs := range vcsList { + if _, err := os.Stat(filepath.Join(dir, "."+vcs.Cmd)); err == nil { + root := filepath.ToSlash(dir[len(srcRoot)+1:]) + // Record first VCS we find, but keep looking, + // to detect mistakes like one kind of VCS inside another. + if vcsRet == nil { + vcsRet = vcs + rootRet = root + continue + } + // Allow .git inside .git, which can arise due to submodules. + if vcsRet == vcs && vcs.Cmd == "git" { + continue + } + // Otherwise, we have one VCS inside a different VCS. + return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s", + filepath.Join(srcRoot, rootRet), vcsRet.Cmd, filepath.Join(srcRoot, root), vcs.Cmd) + } + } + + // Move to parent. + ndir := filepath.Dir(dir) + if len(ndir) >= len(dir) { + // Shouldn't happen, but just in case, stop. + break + } + dir = ndir + } + + if vcsRet != nil { + return vcsRet, rootRet, nil + } + + return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir) +} + +// CheckNested checks for an incorrectly-nested VCS-inside-VCS +// situation for dir, checking parents up until srcRoot. +func CheckNested(vcs *Cmd, dir, srcRoot string) error { + if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { + return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) + } + + otherDir := dir + for len(otherDir) > len(srcRoot) { + for _, otherVCS := range vcsList { + if _, err := os.Stat(filepath.Join(otherDir, "."+otherVCS.Cmd)); err == nil { + // Allow expected vcs in original dir. + if otherDir == dir && otherVCS == vcs { + continue + } + // Allow .git inside .git, which can arise due to submodules. + if otherVCS == vcs && vcs.Cmd == "git" { + continue + } + // Otherwise, we have one VCS inside a different VCS. + return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.Cmd, otherDir, otherVCS.Cmd) + } + } + // Move to parent. + newDir := filepath.Dir(otherDir) + if len(newDir) >= len(otherDir) { + // Shouldn't happen, but just in case, stop. + break + } + otherDir = newDir + } + + return nil +} + +// RepoRoot describes the repository root for a tree of source code. +type RepoRoot struct { + Repo string // repository URL, including scheme + Root string // import path corresponding to root of repo + IsCustom bool // defined by served tags (as opposed to hard-coded pattern) + VCS *Cmd +} + +func httpPrefix(s string) string { + for _, prefix := range [...]string{"http:", "https:"} { + if strings.HasPrefix(s, prefix) { + return prefix + } + } + return "" +} + +// ModuleMode specifies whether to prefer modules when looking up code sources. +type ModuleMode int + +const ( + IgnoreMod ModuleMode = iota + PreferMod +) + +// RepoRootForImportPath analyzes importPath to determine the +// version control system, and code repository to use. +func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { + rr, err := repoRootFromVCSPaths(importPath, security, vcsPaths) + if err == errUnknownSite { + rr, err = repoRootForImportDynamic(importPath, mod, security) + if err != nil { + err = load.ImportErrorf(importPath, "unrecognized import path %q: %v", importPath, err) + } + } + if err != nil { + rr1, err1 := repoRootFromVCSPaths(importPath, security, vcsPathsAfterDynamic) + if err1 == nil { + rr = rr1 + err = nil + } + } + + // Should have been taken care of above, but make sure. + if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") { + // Do not allow wildcards in the repo root. + rr = nil + err = load.ImportErrorf(importPath, "cannot expand ... in %q", importPath) + } + return rr, err +} + +var errUnknownSite = errors.New("dynamic lookup required to find mapping") + +// repoRootFromVCSPaths attempts to map importPath to a repoRoot +// using the mappings defined in vcsPaths. +func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) { + // A common error is to use https://packagepath because that's what + // hg and git require. Diagnose this helpfully. + if prefix := httpPrefix(importPath); prefix != "" { + // The importPath has been cleaned, so has only one slash. The pattern + // ignores the slashes; the error message puts them back on the RHS at least. + return nil, fmt.Errorf("%q not allowed in import path", prefix+"//") + } + for _, srv := range vcsPaths { + if !strings.HasPrefix(importPath, srv.prefix) { + continue + } + m := srv.regexp.FindStringSubmatch(importPath) + if m == nil { + if srv.prefix != "" { + return nil, load.ImportErrorf(importPath, "invalid %s import path %q", srv.prefix, importPath) + } + continue + } + + // Build map of named subexpression matches for expand. + match := map[string]string{ + "prefix": srv.prefix, + "import": importPath, + } + for i, name := range srv.regexp.SubexpNames() { + if name != "" && match[name] == "" { + match[name] = m[i] + } + } + if srv.vcs != "" { + match["vcs"] = expand(match, srv.vcs) + } + if srv.repo != "" { + match["repo"] = expand(match, srv.repo) + } + if srv.check != nil { + if err := srv.check(match); err != nil { + return nil, err + } + } + vcs := vcsByCmd(match["vcs"]) + if vcs == nil { + return nil, fmt.Errorf("unknown version control system %q", match["vcs"]) + } + var repoURL string + if !srv.schemelessRepo { + repoURL = match["repo"] + } else { + scheme := vcs.Scheme[0] // default to first scheme + repo := match["repo"] + if vcs.PingCmd != "" { + // If we know how to test schemes, scan to find one. + for _, s := range vcs.Scheme { + if security == web.SecureOnly && !vcs.isSecureScheme(s) { + continue + } + if vcs.Ping(s, repo) == nil { + scheme = s + break + } + } + } + repoURL = scheme + "://" + repo + } + rr := &RepoRoot{ + Repo: repoURL, + Root: match["root"], + VCS: vcs, + } + return rr, nil + } + return nil, errUnknownSite +} + +// urlForImportPath returns a partially-populated URL for the given Go import path. +// +// The URL leaves the Scheme field blank so that web.Get will try any scheme +// allowed by the selected security mode. +func urlForImportPath(importPath string) (*urlpkg.URL, error) { + slash := strings.Index(importPath, "/") + if slash < 0 { + slash = len(importPath) + } + host, path := importPath[:slash], importPath[slash:] + if !strings.Contains(host, ".") { + return nil, errors.New("import path does not begin with hostname") + } + if len(path) == 0 { + path = "/" + } + return &urlpkg.URL{Host: host, Path: path, RawQuery: "go-get=1"}, nil +} + +// repoRootForImportDynamic finds a *RepoRoot for a custom domain that's not +// statically known by repoRootForImportPathStatic. +// +// This handles custom import paths like "name.tld/pkg/foo" or just "name.tld". +func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { + url, err := urlForImportPath(importPath) + if err != nil { + return nil, err + } + resp, err := web.Get(security, url) + if err != nil { + msg := "https fetch: %v" + if security == web.Insecure { + msg = "http/" + msg + } + return nil, fmt.Errorf(msg, err) + } + body := resp.Body + defer body.Close() + imports, err := parseMetaGoImports(body, mod) + if len(imports) == 0 { + if respErr := resp.Err(); respErr != nil { + // If the server's status was not OK, prefer to report that instead of + // an XML parse error. + return nil, respErr + } + } + if err != nil { + return nil, fmt.Errorf("parsing %s: %v", importPath, err) + } + // Find the matched meta import. + mmi, err := matchGoImport(imports, importPath) + if err != nil { + if _, ok := err.(ImportMismatchError); !ok { + return nil, fmt.Errorf("parse %s: %v", url, err) + } + return nil, fmt.Errorf("parse %s: no go-import meta tags (%s)", resp.URL, err) + } + if cfg.BuildV { + log.Printf("get %q: found meta tag %#v at %s", importPath, mmi, url) + } + // If the import was "uni.edu/bob/project", which said the + // prefix was "uni.edu" and the RepoRoot was "evilroot.com", + // make sure we don't trust Bob and check out evilroot.com to + // "uni.edu" yet (possibly overwriting/preempting another + // non-evil student). Instead, first verify the root and see + // if it matches Bob's claim. + if mmi.Prefix != importPath { + if cfg.BuildV { + log.Printf("get %q: verifying non-authoritative meta tag", importPath) + } + var imports []metaImport + url, imports, err = metaImportsForPrefix(mmi.Prefix, mod, security) + if err != nil { + return nil, err + } + metaImport2, err := matchGoImport(imports, importPath) + if err != nil || mmi != metaImport2 { + return nil, fmt.Errorf("%s and %s disagree about go-import for %s", resp.URL, url, mmi.Prefix) + } + } + + if err := validateRepoRoot(mmi.RepoRoot); err != nil { + return nil, fmt.Errorf("%s: invalid repo root %q: %v", resp.URL, mmi.RepoRoot, err) + } + var vcs *Cmd + if mmi.VCS == "mod" { + vcs = vcsMod + } else { + vcs = vcsByCmd(mmi.VCS) + if vcs == nil { + return nil, fmt.Errorf("%s: unknown vcs %q", resp.URL, mmi.VCS) + } + } + + rr := &RepoRoot{ + Repo: mmi.RepoRoot, + Root: mmi.Prefix, + IsCustom: true, + VCS: vcs, + } + return rr, nil +} + +// validateRepoRoot returns an error if repoRoot does not seem to be +// a valid URL with scheme. +func validateRepoRoot(repoRoot string) error { + url, err := urlpkg.Parse(repoRoot) + if err != nil { + return err + } + if url.Scheme == "" { + return errors.New("no scheme") + } + if url.Scheme == "file" { + return errors.New("file scheme disallowed") + } + return nil +} + +var fetchGroup singleflight.Group +var ( + fetchCacheMu sync.Mutex + fetchCache = map[string]fetchResult{} // key is metaImportsForPrefix's importPrefix +) + +// metaImportsForPrefix takes a package's root import path as declared in a tag +// and returns its HTML discovery URL and the parsed metaImport lines +// found on the page. +// +// The importPath is of the form "golang.org/x/tools". +// It is an error if no imports are found. +// url will still be valid if err != nil. +// The returned url will be of the form "https://golang.org/x/tools?go-get=1" +func metaImportsForPrefix(importPrefix string, mod ModuleMode, security web.SecurityMode) (*urlpkg.URL, []metaImport, error) { + setCache := func(res fetchResult) (fetchResult, error) { + fetchCacheMu.Lock() + defer fetchCacheMu.Unlock() + fetchCache[importPrefix] = res + return res, nil + } + + resi, _, _ := fetchGroup.Do(importPrefix, func() (resi interface{}, err error) { + fetchCacheMu.Lock() + if res, ok := fetchCache[importPrefix]; ok { + fetchCacheMu.Unlock() + return res, nil + } + fetchCacheMu.Unlock() + + url, err := urlForImportPath(importPrefix) + if err != nil { + return setCache(fetchResult{err: err}) + } + resp, err := web.Get(security, url) + if err != nil { + return setCache(fetchResult{url: url, err: fmt.Errorf("fetching %s: %v", importPrefix, err)}) + } + body := resp.Body + defer body.Close() + imports, err := parseMetaGoImports(body, mod) + if len(imports) == 0 { + if respErr := resp.Err(); respErr != nil { + // If the server's status was not OK, prefer to report that instead of + // an XML parse error. + return setCache(fetchResult{url: url, err: respErr}) + } + } + if err != nil { + return setCache(fetchResult{url: url, err: fmt.Errorf("parsing %s: %v", resp.URL, err)}) + } + if len(imports) == 0 { + err = fmt.Errorf("fetching %s: no go-import meta tag found in %s", importPrefix, resp.URL) + } + return setCache(fetchResult{url: url, imports: imports, err: err}) + }) + res := resi.(fetchResult) + return res.url, res.imports, res.err +} + +type fetchResult struct { + url *urlpkg.URL + imports []metaImport + err error +} + +// metaImport represents the parsed tags from HTML files. +type metaImport struct { + Prefix, VCS, RepoRoot string +} + +// pathPrefix reports whether sub is a prefix of s, +// only considering entire path components. +func pathPrefix(s, sub string) bool { + // strings.HasPrefix is necessary but not sufficient. + if !strings.HasPrefix(s, sub) { + return false + } + // The remainder after the prefix must either be empty or start with a slash. + rem := s[len(sub):] + return rem == "" || rem[0] == '/' +} + +// A ImportMismatchError is returned where metaImport/s are present +// but none match our import path. +type ImportMismatchError struct { + importPath string + mismatches []string // the meta imports that were discarded for not matching our importPath +} + +func (m ImportMismatchError) Error() string { + formattedStrings := make([]string, len(m.mismatches)) + for i, pre := range m.mismatches { + formattedStrings[i] = fmt.Sprintf("meta tag %s did not match import path %s", pre, m.importPath) + } + return strings.Join(formattedStrings, ", ") +} + +// matchGoImport returns the metaImport from imports matching importPath. +// An error is returned if there are multiple matches. +// An ImportMismatchError is returned if none match. +func matchGoImport(imports []metaImport, importPath string) (metaImport, error) { + match := -1 + + errImportMismatch := ImportMismatchError{importPath: importPath} + for i, im := range imports { + if !pathPrefix(importPath, im.Prefix) { + errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix) + continue + } + + if match >= 0 { + if imports[match].VCS == "mod" && im.VCS != "mod" { + // All the mod entries precede all the non-mod entries. + // We have a mod entry and don't care about the rest, + // matching or not. + break + } + return metaImport{}, fmt.Errorf("multiple meta tags match import path %q", importPath) + } + match = i + } + + if match == -1 { + return metaImport{}, errImportMismatch + } + return imports[match], nil +} + +// expand rewrites s to replace {k} with match[k] for each key k in match. +func expand(match map[string]string, s string) string { + // We want to replace each match exactly once, and the result of expansion + // must not depend on the iteration order through the map. + // A strings.Replacer has exactly the properties we're looking for. + oldNew := make([]string, 0, 2*len(match)) + for k, v := range match { + oldNew = append(oldNew, "{"+k+"}", v) + } + return strings.NewReplacer(oldNew...).Replace(s) +} + +// vcsPaths defines the meaning of import paths referring to +// commonly-used VCS hosting sites (github.com/user/dir) +// and import paths referring to a fully-qualified importPath +// containing a VCS type (foo.com/repo.git/dir) +var vcsPaths = []*vcsPath{ + // Github + { + prefix: "github.com/", + regexp: lazyregexp.New(`^(?Pgithub\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + check: noVCSSuffix, + }, + + // Bitbucket + { + prefix: "bitbucket.org/", + regexp: lazyregexp.New(`^(?Pbitbucket\.org/(?P[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`), + repo: "https://{root}", + check: bitbucketVCS, + }, + + // IBM DevOps Services (JazzHub) + { + prefix: "hub.jazz.net/git/", + regexp: lazyregexp.New(`^(?Phub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + check: noVCSSuffix, + }, + + // Git at Apache + { + prefix: "git.apache.org/", + regexp: lazyregexp.New(`^(?Pgit\.apache\.org/[a-z0-9_.\-]+\.git)(/[A-Za-z0-9_.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + }, + + // Git at OpenStack + { + prefix: "git.openstack.org/", + regexp: lazyregexp.New(`^(?Pgit\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/[A-Za-z0-9_.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + }, + + // chiselapp.com for fossil + { + prefix: "chiselapp.com/", + regexp: lazyregexp.New(`^(?Pchiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$`), + vcs: "fossil", + repo: "https://{root}", + }, + + // General syntax for any server. + // Must be last. + { + regexp: lazyregexp.New(`(?P(?P([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\-]+)+?)\.(?Pbzr|fossil|git|hg|svn))(/~?[A-Za-z0-9_.\-]+)*$`), + schemelessRepo: true, + }, +} + +// vcsPathsAfterDynamic gives additional vcsPaths entries +// to try after the dynamic HTML check. +// This gives those sites a chance to introduce tags +// as part of a graceful transition away from the hard-coded logic. +var vcsPathsAfterDynamic = []*vcsPath{ + // Launchpad. See golang.org/issue/11436. + { + prefix: "launchpad.net/", + regexp: lazyregexp.New(`^(?Plaunchpad\.net/((?P[A-Za-z0-9_.\-]+)(?P/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`), + vcs: "bzr", + repo: "https://{root}", + check: launchpadVCS, + }, +} + +// noVCSSuffix checks that the repository name does not +// end in .foo for any version control system foo. +// The usual culprit is ".git". +func noVCSSuffix(match map[string]string) error { + repo := match["repo"] + for _, vcs := range vcsList { + if strings.HasSuffix(repo, "."+vcs.Cmd) { + return fmt.Errorf("invalid version control suffix in %s path", match["prefix"]) + } + } + return nil +} + +// bitbucketVCS determines the version control system for a +// Bitbucket repository, by using the Bitbucket API. +func bitbucketVCS(match map[string]string) error { + if err := noVCSSuffix(match); err != nil { + return err + } + + var resp struct { + SCM string `json:"scm"` + } + url := &urlpkg.URL{ + Scheme: "https", + Host: "api.bitbucket.org", + Path: expand(match, "/2.0/repositories/{bitname}"), + RawQuery: "fields=scm", + } + data, err := web.GetBytes(url) + if err != nil { + if httpErr, ok := err.(*web.HTTPError); ok && httpErr.StatusCode == 403 { + // this may be a private repository. If so, attempt to determine which + // VCS it uses. See issue 5375. + root := match["root"] + for _, vcs := range []string{"git", "hg"} { + if vcsByCmd(vcs).Ping("https", root) == nil { + resp.SCM = vcs + break + } + } + } + + if resp.SCM == "" { + return err + } + } else { + if err := json.Unmarshal(data, &resp); err != nil { + return fmt.Errorf("decoding %s: %v", url, err) + } + } + + if vcsByCmd(resp.SCM) != nil { + match["vcs"] = resp.SCM + if resp.SCM == "git" { + match["repo"] += ".git" + } + return nil + } + + return fmt.Errorf("unable to detect version control system for bitbucket.org/ path") +} + +// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case, +// "foo" could be a series name registered in Launchpad with its own branch, +// and it could also be the name of a directory within the main project +// branch one level up. +func launchpadVCS(match map[string]string) error { + if match["project"] == "" || match["series"] == "" { + return nil + } + url := &urlpkg.URL{ + Scheme: "https", + Host: "code.launchpad.net", + Path: expand(match, "/{project}{series}/.bzr/branch-format"), + } + _, err := web.GetBytes(url) + if err != nil { + match["root"] = expand(match, "launchpad.net/{project}") + match["repo"] = expand(match, "https://{root}") + } + return nil +} diff --git a/src/cmd/go/internal/vcs/vcs_test.go b/src/cmd/go/internal/vcs/vcs_test.go new file mode 100644 index 0000000000..5b874204f1 --- /dev/null +++ b/src/cmd/go/internal/vcs/vcs_test.go @@ -0,0 +1,475 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "errors" + "internal/testenv" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + "cmd/go/internal/web" +) + +// Test that RepoRootForImportPath determines the correct RepoRoot for a given importPath. +// TODO(cmang): Add tests for SVN and BZR. +func TestRepoRootForImportPath(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + tests := []struct { + path string + want *RepoRoot + }{ + { + "github.com/golang/groupcache", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://github.com/golang/groupcache", + }, + }, + // Unicode letters in directories are not valid. + { + "github.com/user/unicode/испытание", + nil, + }, + // IBM DevOps Services tests + { + "hub.jazz.net/git/user1/pkgname", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://hub.jazz.net/git/user1/pkgname", + }, + }, + { + "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://hub.jazz.net/git/user1/pkgname", + }, + }, + { + "hub.jazz.net", + nil, + }, + { + "hubajazz.net", + nil, + }, + { + "hub2.jazz.net", + nil, + }, + { + "hub.jazz.net/someotherprefix", + nil, + }, + { + "hub.jazz.net/someotherprefix/user1/pkgname", + nil, + }, + // Spaces are not valid in user names or package names + { + "hub.jazz.net/git/User 1/pkgname", + nil, + }, + { + "hub.jazz.net/git/user1/pkg name", + nil, + }, + // Dots are not valid in user names + { + "hub.jazz.net/git/user.1/pkgname", + nil, + }, + { + "hub.jazz.net/git/user/pkg.name", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://hub.jazz.net/git/user/pkg.name", + }, + }, + // User names cannot have uppercase letters + { + "hub.jazz.net/git/USER/pkgname", + nil, + }, + // OpenStack tests + { + "git.openstack.org/openstack/swift", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.openstack.org/openstack/swift", + }, + }, + // Trailing .git is less preferred but included for + // compatibility purposes while the same source needs to + // be compilable on both old and new go + { + "git.openstack.org/openstack/swift.git", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.openstack.org/openstack/swift.git", + }, + }, + { + "git.openstack.org/openstack/swift/go/hummingbird", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.openstack.org/openstack/swift", + }, + }, + { + "git.openstack.org", + nil, + }, + { + "git.openstack.org/openstack", + nil, + }, + // Spaces are not valid in package name + { + "git.apache.org/package name/path/to/lib", + nil, + }, + // Should have ".git" suffix + { + "git.apache.org/package-name/path/to/lib", + nil, + }, + { + "gitbapache.org", + nil, + }, + { + "git.apache.org/package-name.git", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.apache.org/package-name.git", + }, + }, + { + "git.apache.org/package-name_2.x.git/path/to/lib", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.apache.org/package-name_2.x.git", + }, + }, + { + "chiselapp.com/user/kyle/repository/fossilgg", + &RepoRoot{ + VCS: vcsFossil, + Repo: "https://chiselapp.com/user/kyle/repository/fossilgg", + }, + }, + { + // must have a user/$name/repository/$repo path + "chiselapp.com/kyle/repository/fossilgg", + nil, + }, + { + "chiselapp.com/user/kyle/fossilgg", + nil, + }, + } + + for _, test := range tests { + got, err := RepoRootForImportPath(test.path, IgnoreMod, web.SecureOnly) + want := test.want + + if want == nil { + if err == nil { + t.Errorf("RepoRootForImportPath(%q): Error expected but not received", test.path) + } + continue + } + if err != nil { + t.Errorf("RepoRootForImportPath(%q): %v", test.path, err) + continue + } + if got.VCS.Name != want.VCS.Name || got.Repo != want.Repo { + t.Errorf("RepoRootForImportPath(%q) = VCS(%s) Repo(%s), want VCS(%s) Repo(%s)", test.path, got.VCS, got.Repo, want.VCS, want.Repo) + } + } +} + +// Test that vcsFromDir correctly inspects a given directory and returns the right VCS and root. +func TestFromDir(t *testing.T) { + tempDir, err := ioutil.TempDir("", "vcstest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + for j, vcs := range vcsList { + dir := filepath.Join(tempDir, "example.com", vcs.Name, "."+vcs.Cmd) + if j&1 == 0 { + err := os.MkdirAll(dir, 0755) + if err != nil { + t.Fatal(err) + } + } else { + err := os.MkdirAll(filepath.Dir(dir), 0755) + if err != nil { + t.Fatal(err) + } + f, err := os.Create(dir) + if err != nil { + t.Fatal(err) + } + f.Close() + } + + want := RepoRoot{ + VCS: vcs, + Root: path.Join("example.com", vcs.Name), + } + var got RepoRoot + got.VCS, got.Root, err = FromDir(dir, tempDir) + if err != nil { + t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err) + continue + } + if got.VCS.Name != want.VCS.Name || got.Root != want.Root { + t.Errorf("FromDir(%q, %q) = VCS(%s) Root(%s), want VCS(%s) Root(%s)", dir, tempDir, got.VCS, got.Root, want.VCS, want.Root) + } + } +} + +func TestIsSecure(t *testing.T) { + tests := []struct { + vcs *Cmd + url string + secure bool + }{ + {vcsGit, "http://example.com/foo.git", false}, + {vcsGit, "https://example.com/foo.git", true}, + {vcsBzr, "http://example.com/foo.bzr", false}, + {vcsBzr, "https://example.com/foo.bzr", true}, + {vcsSvn, "http://example.com/svn", false}, + {vcsSvn, "https://example.com/svn", true}, + {vcsHg, "http://example.com/foo.hg", false}, + {vcsHg, "https://example.com/foo.hg", true}, + {vcsGit, "ssh://user@example.com/foo.git", true}, + {vcsGit, "user@server:path/to/repo.git", false}, + {vcsGit, "user@server:", false}, + {vcsGit, "server:repo.git", false}, + {vcsGit, "server:path/to/repo.git", false}, + {vcsGit, "example.com:path/to/repo.git", false}, + {vcsGit, "path/that/contains/a:colon/repo.git", false}, + {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, + {vcsFossil, "http://example.com/foo", false}, + {vcsFossil, "https://example.com/foo", true}, + } + + for _, test := range tests { + secure := test.vcs.IsSecure(test.url) + if secure != test.secure { + t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure) + } + } +} + +func TestIsSecureGitAllowProtocol(t *testing.T) { + tests := []struct { + vcs *Cmd + url string + secure bool + }{ + // Same as TestIsSecure to verify same behavior. + {vcsGit, "http://example.com/foo.git", false}, + {vcsGit, "https://example.com/foo.git", true}, + {vcsBzr, "http://example.com/foo.bzr", false}, + {vcsBzr, "https://example.com/foo.bzr", true}, + {vcsSvn, "http://example.com/svn", false}, + {vcsSvn, "https://example.com/svn", true}, + {vcsHg, "http://example.com/foo.hg", false}, + {vcsHg, "https://example.com/foo.hg", true}, + {vcsGit, "user@server:path/to/repo.git", false}, + {vcsGit, "user@server:", false}, + {vcsGit, "server:repo.git", false}, + {vcsGit, "server:path/to/repo.git", false}, + {vcsGit, "example.com:path/to/repo.git", false}, + {vcsGit, "path/that/contains/a:colon/repo.git", false}, + {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, + // New behavior. + {vcsGit, "ssh://user@example.com/foo.git", false}, + {vcsGit, "foo://example.com/bar.git", true}, + {vcsHg, "foo://example.com/bar.hg", false}, + {vcsSvn, "foo://example.com/svn", false}, + {vcsBzr, "foo://example.com/bar.bzr", false}, + } + + defer os.Unsetenv("GIT_ALLOW_PROTOCOL") + os.Setenv("GIT_ALLOW_PROTOCOL", "https:foo") + for _, test := range tests { + secure := test.vcs.IsSecure(test.url) + if secure != test.secure { + t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure) + } + } +} + +func TestMatchGoImport(t *testing.T) { + tests := []struct { + imports []metaImport + path string + mi metaImport + err error + }{ + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo", + mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/", + mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo", + mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/fooa", + mi: metaImport{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar/baz", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar/baz/qux", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar/baz/", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com", + err: errors.New("pathologically short path"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "different.example.com/user/foo", + err: errors.New("meta tags do not match import path"), + }, + { + imports: []metaImport{ + {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, + {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, + }, + path: "myitcv.io/blah2/foo", + mi: metaImport{Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, + }, + { + imports: []metaImport{ + {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, + {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, + }, + path: "myitcv.io/other", + mi: metaImport{Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, + }, + } + + for _, test := range tests { + mi, err := matchGoImport(test.imports, test.path) + if mi != test.mi { + t.Errorf("unexpected metaImport; got %v, want %v", mi, test.mi) + } + + got := err + want := test.err + if (got == nil) != (want == nil) { + t.Errorf("unexpected error; got %v, want %v", got, want) + } + } +} + +func TestValidateRepoRoot(t *testing.T) { + tests := []struct { + root string + ok bool + }{ + { + root: "", + ok: false, + }, + { + root: "http://", + ok: true, + }, + { + root: "git+ssh://", + ok: true, + }, + { + root: "http#://", + ok: false, + }, + { + root: "-config", + ok: false, + }, + { + root: "-config://", + ok: false, + }, + } + + for _, test := range tests { + err := validateRepoRoot(test.root) + ok := err == nil + if ok != test.ok { + want := "error" + if test.ok { + want = "nil" + } + t.Errorf("validateRepoRoot(%q) = %q, want %s", test.root, err, want) + } + } +} -- cgit v1.2.3-54-g00ecf