aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile')
-rw-r--r--src/cmd/compile/README.md49
-rw-r--r--src/cmd/compile/default.pgobin286041 -> 261857 bytes
-rw-r--r--src/cmd/compile/internal/abi/abiutils.go13
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go63
-rw-r--r--src/cmd/compile/internal/base/debug.go1
-rw-r--r--src/cmd/compile/internal/base/flag.go10
-rw-r--r--src/cmd/compile/internal/base/print.go5
-rw-r--r--src/cmd/compile/internal/compare/compare.go7
-rw-r--r--src/cmd/compile/internal/compare/compare_test.go2
-rw-r--r--src/cmd/compile/internal/devirtualize/pgo_test.go3
-rw-r--r--src/cmd/compile/internal/gc/compile.go12
-rw-r--r--src/cmd/compile/internal/gc/main.go5
-rw-r--r--src/cmd/compile/internal/importer/iimport.go3
-rw-r--r--src/cmd/compile/internal/importer/support.go3
-rw-r--r--src/cmd/compile/internal/importer/ureader.go18
-rw-r--r--src/cmd/compile/internal/inline/inl.go114
-rw-r--r--src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go5
-rw-r--r--src/cmd/compile/internal/ir/func.go46
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/liveness/mergelocals.go2
-rw-r--r--src/cmd/compile/internal/liveness/plive.go3
-rw-r--r--src/cmd/compile/internal/noder/irgen.go36
-rw-r--r--src/cmd/compile/internal/noder/reader.go55
-rw-r--r--src/cmd/compile/internal/noder/unified.go4
-rw-r--r--src/cmd/compile/internal/noder/writer.go39
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go21
-rw-r--r--src/cmd/compile/internal/rangefunc/rangefunc_test.go892
-rw-r--r--src/cmd/compile/internal/rangefunc/rewrite.go740
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64.rules10
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64Ops.go30
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64.rules138
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64Ops.go18
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules17
-rw-r--r--src/cmd/compile/internal/ssa/_gen/generic.rules4
-rw-r--r--src/cmd/compile/internal/ssa/_gen/genericOps.go24
-rw-r--r--src/cmd/compile/internal/ssa/block.go19
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go19
-rw-r--r--src/cmd/compile/internal/ssa/deadstore_test.go45
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go3
-rw-r--r--src/cmd/compile/internal/ssa/fmahash_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/func.go1
-rw-r--r--src/cmd/compile/internal/ssa/looprotate.go13
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go160
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go98
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go188
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go1814
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64latelower.go148
-rw-r--r--src/cmd/compile/internal/ssa/value.go2
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go5
-rw-r--r--src/cmd/compile/internal/ssagen/nowb.go8
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go6
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go125
-rw-r--r--src/cmd/compile/internal/syntax/parser.go2
-rw-r--r--src/cmd/compile/internal/syntax/pos.go12
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue20789.go2
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue47704.go2
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue49205.go8
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue52391.go2
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue65790.go2
-rw-r--r--src/cmd/compile/internal/syntax/type.go5
-rw-r--r--src/cmd/compile/internal/typecheck/_builtin/runtime.go4
-rw-r--r--src/cmd/compile/internal/typecheck/builtin.go403
-rw-r--r--src/cmd/compile/internal/types2/alias.go58
-rw-r--r--src/cmd/compile/internal/types2/api.go12
-rw-r--r--src/cmd/compile/internal/types2/api_test.go27
-rw-r--r--src/cmd/compile/internal/types2/builtins.go16
-rw-r--r--src/cmd/compile/internal/types2/call.go2
-rw-r--r--src/cmd/compile/internal/types2/check.go47
-rw-r--r--src/cmd/compile/internal/types2/check_test.go8
-rw-r--r--src/cmd/compile/internal/types2/compiler_internal.go50
-rw-r--r--src/cmd/compile/internal/types2/conversions.go13
-rw-r--r--src/cmd/compile/internal/types2/decl.go7
-rw-r--r--src/cmd/compile/internal/types2/expr.go3
-rw-r--r--src/cmd/compile/internal/types2/format.go33
-rw-r--r--src/cmd/compile/internal/types2/infer.go11
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go62
-rw-r--r--src/cmd/compile/internal/types2/issues_test.go12
-rw-r--r--src/cmd/compile/internal/types2/labels.go12
-rw-r--r--src/cmd/compile/internal/types2/named.go14
-rw-r--r--src/cmd/compile/internal/types2/object.go2
-rw-r--r--src/cmd/compile/internal/types2/operand.go8
-rw-r--r--src/cmd/compile/internal/types2/predicates.go7
-rw-r--r--src/cmd/compile/internal/types2/scope.go14
-rw-r--r--src/cmd/compile/internal/types2/signature.go3
-rw-r--r--src/cmd/compile/internal/types2/stdlib_test.go3
-rw-r--r--src/cmd/compile/internal/types2/stmt.go63
-rw-r--r--src/cmd/compile/internal/types2/subst.go75
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go4
-rw-r--r--src/cmd/compile/internal/types2/typeset.go8
-rw-r--r--src/cmd/compile/internal/types2/typestring.go11
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go34
-rw-r--r--src/cmd/compile/internal/types2/under.go2
-rw-r--r--src/cmd/compile/internal/types2/unify.go31
-rw-r--r--src/cmd/compile/internal/types2/universe.go42
-rw-r--r--src/cmd/compile/internal/types2/util.go2
-rw-r--r--src/cmd/compile/internal/types2/version.go19
-rw-r--r--src/cmd/compile/internal/walk/assign.go43
97 files changed, 3836 insertions, 2414 deletions
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
index 9b99a1b105..3fc7ca6ec6 100644
--- a/src/cmd/compile/README.md
+++ b/src/cmd/compile/README.md
@@ -140,6 +140,55 @@ a series of obj.Prog instructions. These are passed to the assembler
final object file. The object file will also contain reflect data, export data,
and debugging information.
+### 7a. Export
+
+In addition to writing a file of object code for the linker, the
+compiler also writes a file of "export data" for downstream
+compilation units. The export data file holds all the information
+computed during compilation of package P that may be needed when
+compiling a package Q that directly imports P. It includes type
+information for all exported declarations, IR for bodies of functions
+that are candidates for inlining, IR for bodies of generic functions
+that may be instantiated in another package, and a summary of the
+findings of escape analysis on function parameters.
+
+The format of the export data file has gone through a number of
+iterations. Its current form is called "unified", and it is a
+serialized representation of an object graph, with an index allowing
+lazy decoding of parts of the whole (since most imports are used to
+provide only a handful of symbols).
+
+The GOROOT repository contains a reader and a writer for the unified
+format; it encodes from/decodes to the compiler's IR.
+The golang.org/x/tools repository also provides a public API for an export
+data reader (using the go/types representation) that always supports the
+compiler's current file format and a small number of historic versions.
+(It is used by x/tools/go/packages in modes that require type information
+but not type-annotated syntax.)
+
+The x/tools repository also provides public APIs for reading and
+writing exported type information (but nothing more) using the older
+"indexed" format. (For example, gopls uses this version for its
+database of workspace information, which includes types.)
+
+Export data usually provides a "deep" summary, so that compilation of
+package Q can read the export data files only for each direct import,
+and be assured that these provide all necessary information about
+declarations in indirect imports, such as the methods and struct
+fields of types referred to in P's public API. Deep export data is
+simpler for build systems, since only one file is needed per direct
+dependency. However, it does have a tendency to grow as one gets
+higher up the import graph of a big repository: if there is a set of
+very commonly used types with a large API, nearly every package's
+export data will include a copy. This problem motivated the "indexed"
+design, which allowed partial loading on demand.
+(gopls does less work than the compiler for each import and is thus
+more sensitive to export data overheads. For this reason, it uses
+"shallow" export data, in which indirect information is not recorded
+at all. This demands random access to the export data files of all
+dependencies, so is not suitable for distributed build systems.)
+
+
### 8. Tips
#### Getting Started
diff --git a/src/cmd/compile/default.pgo b/src/cmd/compile/default.pgo
index 0f925ec69c..cc1498b7f7 100644
--- a/src/cmd/compile/default.pgo
+++ b/src/cmd/compile/default.pgo
Binary files differ
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
index 607d462493..e88a80d564 100644
--- a/src/cmd/compile/internal/abi/abiutils.go
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -141,7 +141,7 @@ func (pa *ABIParamAssignment) RegisterTypesAndOffsets() ([]*types.Type, []int64)
}
typs := make([]*types.Type, 0, l)
offs := make([]int64, 0, l)
- offs, _ = appendParamOffsets(offs, 0, pa.Type)
+ offs, _ = appendParamOffsets(offs, 0, pa.Type) // 0 is aligned for everything.
return appendParamTypes(typs, pa.Type), offs
}
@@ -193,8 +193,8 @@ func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type {
// appendParamOffsets appends the offset(s) of type t, starting from "at",
// to input offsets, and returns the longer slice and the next unused offset.
+// at should already be aligned for t.
func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int64) {
- at = align(at, t)
w := t.Size()
if w == 0 {
return offsets, at
@@ -210,11 +210,15 @@ func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int6
typ := t.Kind()
switch typ {
case types.TARRAY:
+ te := t.Elem()
for i := int64(0); i < t.NumElem(); i++ {
- offsets, at = appendParamOffsets(offsets, at, t.Elem())
+ at = align(at, te)
+ offsets, at = appendParamOffsets(offsets, at, te)
}
case types.TSTRUCT:
+ at0 := at
for i, f := range t.Fields() {
+ at = at0 + f.Offset // Fields may be over-aligned, see wasm32.
offsets, at = appendParamOffsets(offsets, at, f.Type)
if f.Type.Size() == 0 && i == t.NumFields()-1 {
at++ // last field has zero width
@@ -668,12 +672,13 @@ func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 {
if len(types) != nr {
panic("internal error")
}
+ offsets, _ := appendParamOffsets([]int64{}, 0, pa.Type)
off := int64(0)
for idx, t := range types {
ts := t.Size()
off += int64(ts)
if idx < len(types)-1 {
- noff := align(off, types[idx+1])
+ noff := offsets[idx+1]
if noff != off {
padding[idx] = uint64(noff - off)
}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 27b4e881c0..900e7016a3 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -781,23 +781,30 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p3.To.Type = obj.TYPE_REG
p3.To.Reg = out
- case ssa.OpARM64LoweredAtomicAnd8,
+ case ssa.OpARM64LoweredAtomicAnd64,
+ ssa.OpARM64LoweredAtomicOr64,
ssa.OpARM64LoweredAtomicAnd32,
- ssa.OpARM64LoweredAtomicOr8,
- ssa.OpARM64LoweredAtomicOr32:
- // LDAXRB/LDAXRW (Rarg0), Rout
- // AND/OR Rarg1, Rout
- // STLXRB/STLXRB Rout, (Rarg0), Rtmp
+ ssa.OpARM64LoweredAtomicOr32,
+ ssa.OpARM64LoweredAtomicAnd8,
+ ssa.OpARM64LoweredAtomicOr8:
+ // LDAXR[BW] (Rarg0), Rout
+ // AND/OR Rarg1, Rout, tmp1
+ // STLXR[BW] tmp1, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
- ld := arm64.ALDAXRB
- st := arm64.ASTLXRB
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
ld = arm64.ALDAXRW
st = arm64.ASTLXRW
}
+ if v.Op == ssa.OpARM64LoweredAtomicAnd8 || v.Op == ssa.OpARM64LoweredAtomicOr8 {
+ ld = arm64.ALDAXRB
+ st = arm64.ASTLXRB
+ }
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
+ tmp := v.RegTmp()
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
@@ -806,11 +813,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
+ p1.Reg = out
p1.To.Type = obj.TYPE_REG
- p1.To.Reg = out
+ p1.To.Reg = tmp
p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
- p2.From.Reg = out
+ p2.From.Reg = tmp
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP
@@ -819,9 +827,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
p3.To.SetTarget(p)
+
case ssa.OpARM64LoweredAtomicAnd8Variant,
- ssa.OpARM64LoweredAtomicAnd32Variant:
- atomic_clear := arm64.ALDCLRALW
+ ssa.OpARM64LoweredAtomicAnd32Variant,
+ ssa.OpARM64LoweredAtomicAnd64Variant:
+ atomic_clear := arm64.ALDCLRALD
+ if v.Op == ssa.OpARM64LoweredAtomicAnd32Variant {
+ atomic_clear = arm64.ALDCLRALW
+ }
if v.Op == ssa.OpARM64LoweredAtomicAnd8Variant {
atomic_clear = arm64.ALDCLRALB
}
@@ -836,7 +849,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
- // LDCLRALW Rtemp, (Rarg0), Rout
+ // LDCLRAL[BDW] Rtemp, (Rarg0), Rout
p1 := s.Prog(atomic_clear)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = arm64.REGTMP
@@ -844,16 +857,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p1.To.Reg = r0
p1.RegTo2 = out
- // AND Rarg1, Rout
- p2 := s.Prog(arm64.AAND)
- p2.From.Type = obj.TYPE_REG
- p2.From.Reg = r1
- p2.To.Type = obj.TYPE_REG
- p2.To.Reg = out
-
case ssa.OpARM64LoweredAtomicOr8Variant,
- ssa.OpARM64LoweredAtomicOr32Variant:
- atomic_or := arm64.ALDORALW
+ ssa.OpARM64LoweredAtomicOr32Variant,
+ ssa.OpARM64LoweredAtomicOr64Variant:
+ atomic_or := arm64.ALDORALD
+ if v.Op == ssa.OpARM64LoweredAtomicOr32Variant {
+ atomic_or = arm64.ALDORALW
+ }
if v.Op == ssa.OpARM64LoweredAtomicOr8Variant {
atomic_or = arm64.ALDORALB
}
@@ -861,7 +871,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
r1 := v.Args[1].Reg()
out := v.Reg0()
- // LDORALW Rarg1, (Rarg0), Rout
+ // LDORAL[BDW] Rarg1, (Rarg0), Rout
p := s.Prog(atomic_or)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
@@ -869,13 +879,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = r0
p.RegTo2 = out
- // ORR Rarg1, Rout
- p2 := s.Prog(arm64.AORR)
- p2.From.Type = obj.TYPE_REG
- p2.From.Reg = r1
- p2.To.Type = obj.TYPE_REG
- p2.To.Reg = out
-
case ssa.OpARM64MOVBreg,
ssa.OpARM64MOVBUreg,
ssa.OpARM64MOVHreg,
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index 672e3909e4..c1b62f27ca 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -16,6 +16,7 @@ var Debug DebugFlags
// The -d option takes a comma-separated list of settings.
// Each setting is name=value; for ints, name is short for name=1.
type DebugFlags struct {
+ AlignHot int `help:"enable hot block alignment (currently requires -pgo)" concurrent:"ok"`
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"`
Closure int `help:"print information about closure compilation"`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index 1ee3337088..8c17c5f27d 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -6,6 +6,7 @@ package base
import (
"cmd/internal/cov/covcmd"
+ "cmd/internal/telemetry"
"encoding/json"
"flag"
"fmt"
@@ -177,6 +178,7 @@ func ParseFlags() {
Debug.ConcurrentOk = true
Debug.MaxShapeLen = 500
+ Debug.AlignHot = 1
Debug.InlFuncsWithClosures = 1
Debug.InlStaticInit = 1
Debug.PGOInline = 1
@@ -193,6 +195,7 @@ func ParseFlags() {
objabi.AddVersionFlag() // -V
registerFlags()
objabi.Flagparse(usage)
+ telemetry.CountFlags("compile/flag:", *flag.CommandLine)
if gcd := os.Getenv("GOCOMPILEDEBUG"); gcd != "" {
// This will only override the flags set in gcd;
@@ -210,6 +213,8 @@ func ParseFlags() {
Flag.CompilingRuntime = true
}
+ Ctxt.Std = Flag.Std
+
// Three inputs govern loop iteration variable rewriting, hash, experiment, flag.
// The loop variable rewriting is:
// IF non-empty hash, then hash determines behavior (function+line match) (*)
@@ -358,6 +363,11 @@ func ParseFlags() {
// set via a -d flag
Ctxt.Debugpcln = Debug.PCTab
+
+ // https://golang.org/issue/67502
+ if buildcfg.GOOS == "plan9" && buildcfg.GOARCH == "386" {
+ Debug.AlignHot = 0
+ }
}
// registerFlags adds flag registrations for all the fields in Flag.
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index cc36acec4b..15256186af 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -14,6 +14,7 @@ import (
"strings"
"cmd/internal/src"
+ "cmd/internal/telemetry"
)
// An errorMsg is a queued error message, waiting to be printed.
@@ -194,6 +195,8 @@ func Fatalf(format string, args ...interface{}) {
FatalfAt(Pos, format, args...)
}
+var bugStack = telemetry.NewStackCounter("compile/bug", 16) // 16 is arbitrary; used by gopls and crashmonitor
+
// FatalfAt reports a fatal error - an internal problem - at pos and exits.
// If other errors have already been printed, then FatalfAt just quietly exits.
// (The internal problem may have been caused by incomplete information
@@ -209,6 +212,8 @@ func Fatalf(format string, args ...interface{}) {
func FatalfAt(pos src.XPos, format string, args ...interface{}) {
FlushErrors()
+ bugStack.Inc()
+
if Debug.Panic != 0 || numErrors == 0 {
fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
fmt.Printf(format, args...)
diff --git a/src/cmd/compile/internal/compare/compare.go b/src/cmd/compile/internal/compare/compare.go
index 2f137daf82..638eb37a62 100644
--- a/src/cmd/compile/internal/compare/compare.go
+++ b/src/cmd/compile/internal/compare/compare.go
@@ -147,7 +147,7 @@ func calculateCostForType(t *types.Type) int64 {
return EqStructCost(t)
case types.TSLICE:
// Slices are not comparable.
- base.Fatalf("eqStructFieldCost: unexpected slice type")
+ base.Fatalf("calculateCostForType: unexpected slice type")
case types.TARRAY:
elemCost := calculateCostForType(t.Elem())
cost = t.NumElem() * elemCost
@@ -370,6 +370,11 @@ func eqmem(p, q ir.Node, field int, size int64) ir.Node {
}
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
+ if !base.Ctxt.Arch.CanMergeLoads && t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() {
+ // We can't use larger comparisons if the value might not be aligned
+ // enough for the larger comparison. See issues 46283 and 67160.
+ size = 0
+ }
switch size {
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
diff --git a/src/cmd/compile/internal/compare/compare_test.go b/src/cmd/compile/internal/compare/compare_test.go
index 2f76165509..4271effbdb 100644
--- a/src/cmd/compile/internal/compare/compare_test.go
+++ b/src/cmd/compile/internal/compare/compare_test.go
@@ -23,8 +23,8 @@ func init() {
types.PtrSize = 8
types.RegSize = 8
types.MaxWidth = 1 << 50
- typecheck.InitUniverse()
base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}}
+ typecheck.InitUniverse()
}
func TestEqStructCost(t *testing.T) {
diff --git a/src/cmd/compile/internal/devirtualize/pgo_test.go b/src/cmd/compile/internal/devirtualize/pgo_test.go
index cff4d63d51..6153b8c5ec 100644
--- a/src/cmd/compile/internal/devirtualize/pgo_test.go
+++ b/src/cmd/compile/internal/devirtualize/pgo_test.go
@@ -13,6 +13,7 @@ import (
"cmd/internal/obj"
"cmd/internal/pgo"
"cmd/internal/src"
+ "cmd/internal/sys"
"testing"
)
@@ -23,8 +24,8 @@ func init() {
types.PtrSize = 8
types.RegSize = 8
types.MaxWidth = 1 << 50
+ base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}}
typecheck.InitUniverse()
- base.Ctxt = &obj.Link{}
base.Debug.PGODebug = 3
}
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
index 0f57f8ca82..496daacb42 100644
--- a/src/cmd/compile/internal/gc/compile.go
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -14,6 +14,7 @@ import (
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/pgoir"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/staticinit"
"cmd/compile/internal/types"
@@ -57,8 +58,13 @@ func enqueueFunc(fn *ir.Func) {
types.CalcSize(fn.Type())
a := ssagen.AbiForBodylessFuncStackMap(fn)
abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper
- liveness.WriteFuncMap(fn, abiInfo)
if fn.ABI == obj.ABI0 {
+ // The current args_stackmap generation assumes the function
+ // is ABI0, and only ABI0 assembly function can have a FUNCDATA
+ // reference to args_stackmap (see cmd/internal/obj/plist.go:Flushplist).
+ // So avoid introducing an args_stackmap if the func is not ABI0.
+ liveness.WriteFuncMap(fn, abiInfo)
+
x := ssagen.EmitArgInfo(fn, abiInfo)
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL)
}
@@ -112,7 +118,7 @@ func prepareFunc(fn *ir.Func) {
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
-func compileFunctions() {
+func compileFunctions(profile *pgoir.Profile) {
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
@@ -179,7 +185,7 @@ func compileFunctions() {
for _, fn := range fns {
fn := fn
queue(func(worker int) {
- ssagen.Compile(fn, worker)
+ ssagen.Compile(fn, worker, profile)
compile(fn.Closures)
wg.Done()
})
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 130feafb24..41f5e43ec6 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -30,6 +30,7 @@ import (
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
+ "cmd/internal/telemetry"
"flag"
"fmt"
"internal/buildcfg"
@@ -58,6 +59,8 @@ func handlePanic() {
// code, and finally writes the compiled package definition to disk.
func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "init")
+ telemetry.Start()
+ telemetry.Inc("compile/invocations")
defer handlePanic()
@@ -300,7 +303,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
// as late as possible to maximize how much work we can batch and
// process concurrently.
if len(compilequeue) != 0 {
- compileFunctions()
+ compileFunctions(profile)
continue
}
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index 498134755d..4a7fece188 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -131,6 +131,9 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
for i, pt := range predeclared {
p.typCache[uint64(i)] = pt
}
+ // Special handling for "any", whose representation may be changed by the
+ // gotypesalias GODEBUG variable.
+ p.typCache[uint64(len(predeclared))] = types2.Universe.Lookup("any").Type()
pkgList := make([]*types2.Package, r.uint64())
for i := range pkgList {
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index 5810f5e172..a443b4d862 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -130,8 +130,7 @@ var predeclared = []types2.Type{
// comparable
types2.Universe.Lookup("comparable").Type(),
- // any
- types2.Universe.Lookup("any").Type(),
+ // "any" has special handling: see usage of predeclared.
}
type anyType struct{}
diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go
index 3488f13148..d3c7d4516f 100644
--- a/src/cmd/compile/internal/importer/ureader.go
+++ b/src/cmd/compile/internal/importer/ureader.go
@@ -9,15 +9,15 @@ import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"cmd/internal/src"
- "internal/godebug"
"internal/pkgbits"
)
type pkgReader struct {
pkgbits.PkgDecoder
- ctxt *types2.Context
- imports map[string]*types2.Package
+ ctxt *types2.Context
+ imports map[string]*types2.Package
+ enableAlias bool // whether to use aliases
posBases []*syntax.PosBase
pkgs []*types2.Package
@@ -30,6 +30,9 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input
ctxt: ctxt,
imports: imports,
+ // Currently, the compiler panics when using Alias types.
+ // TODO(gri) set to true once this is fixed (issue #66873)
+ enableAlias: false,
posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)),
pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)),
@@ -410,7 +413,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) {
case pkgbits.ObjAlias:
pos := r.pos()
typ := r.typ()
- return newAliasTypeName(pos, objPkg, objName, typ)
+ return newAliasTypeName(pr.enableAlias, pos, objPkg, objName, typ)
case pkgbits.ObjConst:
pos := r.pos()
@@ -536,16 +539,13 @@ func (r *reader) ident(marker pkgbits.SyncMarker) (*types2.Package, string) {
}
// newAliasTypeName returns a new TypeName, with a materialized *types2.Alias if supported.
-func newAliasTypeName(pos syntax.Pos, pkg *types2.Package, name string, rhs types2.Type) *types2.TypeName {
+func newAliasTypeName(aliases bool, pos syntax.Pos, pkg *types2.Package, name string, rhs types2.Type) *types2.TypeName {
// Copied from x/tools/internal/aliases.NewAlias via
// GOROOT/src/go/internal/gcimporter/ureader.go.
- if gotypesalias.Value() == "1" {
+ if aliases {
tname := types2.NewTypeName(pos, pkg, name, nil)
_ = types2.NewAlias(tname, rhs) // form TypeName -> Alias cycle
return tname
}
return types2.NewTypeName(pos, pkg, name, rhs)
}
-
-// gotypesalias controls the use of Alias types.
-var gotypesalias = godebug.New("#gotypesalias")
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 3f65b2bbc9..1b438f9ef0 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -61,6 +61,9 @@ var (
// TODO(prattmic): Make this non-global.
candHotCalleeMap = make(map[*pgoir.IRNode]struct{})
+ // Set of functions that contain hot call sites.
+ hasHotCall = make(map[*ir.Func]struct{})
+
// List of all hot call sites. CallSiteInfo.Callee is always nil.
// TODO(prattmic): Make this non-global.
candHotEdgeMap = make(map[pgoir.CallSiteInfo]struct{})
@@ -78,6 +81,22 @@ var (
inlineHotMaxBudget int32 = 2000
)
+func IsPgoHotFunc(fn *ir.Func, profile *pgoir.Profile) bool {
+ if profile == nil {
+ return false
+ }
+ if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
+ _, ok := candHotCalleeMap[n]
+ return ok
+ }
+ return false
+}
+
+func HasPgoHotInline(fn *ir.Func) bool {
+ _, has := hasHotCall[fn]
+ return has
+}
+
// PGOInlinePrologue records the hot callsites from ir-graph.
func PGOInlinePrologue(p *pgoir.Profile) {
if base.Debug.PGOInlineCDFThreshold != "" {
@@ -228,14 +247,10 @@ func GarbageCollectUnreferencedHiddenClosures() {
func inlineBudget(fn *ir.Func, profile *pgoir.Profile, relaxed bool, verbose bool) int32 {
// Update the budget for profile-guided inlining.
budget := int32(inlineMaxBudget)
- if profile != nil {
- if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
- if _, ok := candHotCalleeMap[n]; ok {
- budget = inlineHotMaxBudget
- if verbose {
- fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
- }
- }
+ if IsPgoHotFunc(fn, profile) {
+ budget = inlineHotMaxBudget
+ if verbose {
+ fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
}
}
if relaxed {
@@ -493,7 +508,7 @@ opSwitch:
case "throw":
v.budget -= inlineExtraThrowCost
break opSwitch
- case "panicrangeexit":
+ case "panicrangestate":
cheap = true
}
// Special case for reflect.noescape. It does just type
@@ -545,6 +560,28 @@ opSwitch:
}
}
}
+
+ if n.Fun.Op() == ir.ONAME {
+ name := n.Fun.(*ir.Name)
+ if name.Class == ir.PFUNC {
+ // Special case: on architectures that can do unaligned loads,
+ // explicitly mark internal/byteorder methods as cheap,
+ // because in practice they are, even though our inlining
+ // budgeting system does not see that. See issue 42958.
+ if base.Ctxt.Arch.CanMergeLoads && name.Sym().Pkg.Path == "internal/byteorder" {
+ switch name.Sym().Name {
+ case "LeUint64", "LeUint32", "LeUint16",
+ "BeUint64", "BeUint32", "BeUint16",
+ "LePutUint64", "LePutUint32", "LePutUint16",
+ "BePutUint64", "BePutUint32", "BePutUint16",
+ "LeAppendUint64", "LeAppendUint32", "LeAppendUint16",
+ "BeAppendUint64", "BeAppendUint32", "BeAppendUint16":
+ cheap = true
+ }
+ }
+ }
+ }
+
if cheap {
break // treat like any other node, that is, cost of 1
}
@@ -558,7 +595,7 @@ opSwitch:
// Check whether we'd actually inline this call. Set
// log == false since we aren't actually doing inlining
// yet.
- if ok, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok {
+ if ok, _, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok {
// mkinlcall would inline this call [1], so use
// the cost of the inline body as the cost of
// the call, as that is what will actually
@@ -851,10 +888,11 @@ var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlInde
// inlineCostOK returns true if call n from caller to callee is cheap enough to
// inline. bigCaller indicates that caller is a big function.
//
-// In addition to the "cost OK" boolean, it also returns the "max
-// cost" limit used to make the decision (which may differ depending
-// on func size), and the score assigned to this specific callsite.
-func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32) {
+// In addition to the "cost OK" boolean, it also returns
+// - the "max cost" limit used to make the decision (which may differ depending on func size)
+// - the score assigned to this specific callsite
+// - whether the inlined function is "hot" according to PGO.
+func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32, bool) {
maxCost := int32(inlineMaxBudget)
if bigCaller {
// We use this to restrict inlining into very big functions.
@@ -870,19 +908,21 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool
}
}
+ lineOffset := pgoir.NodeLineOffset(n, caller)
+ csi := pgoir.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
+ _, hot := candHotEdgeMap[csi]
+
if metric <= maxCost {
// Simple case. Function is already cheap enough.
- return true, 0, metric
+ return true, 0, metric, hot
}
// We'll also allow inlining of hot functions below inlineHotMaxBudget,
// but only in small functions.
- lineOffset := pgoir.NodeLineOffset(n, caller)
- csi := pgoir.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
- if _, ok := candHotEdgeMap[csi]; !ok {
+ if !hot {
// Cold
- return false, maxCost, metric
+ return false, maxCost, metric, false
}
// Hot
@@ -891,49 +931,50 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool
if base.Debug.PGODebug > 0 {
fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
}
- return false, maxCost, metric
+ return false, maxCost, metric, false
}
if metric > inlineHotMaxBudget {
- return false, inlineHotMaxBudget, metric
+ return false, inlineHotMaxBudget, metric, false
}
if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) {
// De-selected by PGO Hash.
- return false, maxCost, metric
+ return false, maxCost, metric, false
}
if base.Debug.PGODebug > 0 {
fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
}
- return true, 0, metric
+ return true, 0, metric, hot
}
// canInlineCallExpr returns true if the call n from caller to callee
-// can be inlined, plus the score computed for the call expr in
-// question. bigCaller indicates that caller is a big function. log
+// can be inlined, plus the score computed for the call expr in question,
+// and whether the callee is hot according to PGO.
+// bigCaller indicates that caller is a big function. log
// indicates that the 'cannot inline' reason should be logged.
//
// Preconditions: CanInline(callee) has already been called.
-func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32) {
+func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32, bool) {
if callee.Inl == nil {
// callee is never inlinable.
if log && logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee)))
}
- return false, 0
+ return false, 0, false
}
- ok, maxCost, callSiteScore := inlineCostOK(n, callerfn, callee, bigCaller)
+ ok, maxCost, callSiteScore, hot := inlineCostOK(n, callerfn, callee, bigCaller)
if !ok {
// callee cost too high for this call site.
if log && logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost))
}
- return false, 0
+ return false, 0, false
}
if callee == callerfn {
@@ -941,7 +982,7 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa
if log && logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn)))
}
- return false, 0
+ return false, 0, false
}
if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) {
@@ -955,7 +996,7 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee)))
}
- return false, 0
+ return false, 0, false
}
if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) {
@@ -963,7 +1004,7 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee)))
}
- return false, 0
+ return false, 0, false
}
// Check if we've already inlined this function at this particular
@@ -986,11 +1027,11 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa
fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee)))
}
}
- return false, 0
+ return false, 0, false
}
}
- return true, callSiteScore
+ return true, callSiteScore, hot
}
// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or
@@ -1001,10 +1042,13 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa
//
// n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr {
- ok, score := canInlineCallExpr(callerfn, n, fn, bigCaller, true)
+ ok, score, hot := canInlineCallExpr(callerfn, n, fn, bigCaller, true)
if !ok {
return nil
}
+ if hot {
+ hasHotCall[callerfn] = struct{}{}
+ }
typecheck.AssertFixedCall(n)
parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
diff --git a/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
index 587eab03fc..b1cbb2bc0e 100644
--- a/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
+++ b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
@@ -5,10 +5,13 @@
package inlheur
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
+ "cmd/internal/sys"
"go/constant"
"testing"
)
@@ -21,6 +24,8 @@ func init() {
types.PtrSize = 8
types.RegSize = 8
types.MaxWidth = 1 << 50
+ base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}}
+
typecheck.InitUniverse()
local = types.NewPkg("", "")
fsym := &types.Sym{
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index d20836e006..d0c8ee359b 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -90,15 +90,19 @@ type Func struct {
Inl *Inline
- // funcLitGen and goDeferGen track how many closures have been
- // created in this function for function literals and go/defer
- // wrappers, respectively. Used by closureName for creating unique
- // function names.
- //
+ // RangeParent, if non-nil, is the first non-range body function containing
+ // the closure for the body of a range function.
+ RangeParent *Func
+
+ // funcLitGen, rangeLitGen and goDeferGen track how many closures have been
+ // created in this function for function literals, range-over-func loops,
+ // and go/defer wrappers, respectively. Used by closureName for creating
+ // unique function names.
// Tracking goDeferGen separately avoids wrappers throwing off
// function literal numbering (e.g., runtime/trace_test.TestTraceSymbolize.func11).
- funcLitGen int32
- goDeferGen int32
+ funcLitGen int32
+ rangeLitGen int32
+ goDeferGen int32
Label int32 // largest auto-generated label in this function
@@ -417,20 +421,25 @@ var globClosgen int32
// closureName generates a new unique name for a closure within outerfn at pos.
func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym {
+ if outerfn != nil && outerfn.OClosure != nil && outerfn.OClosure.Func.RangeParent != nil {
+ outerfn = outerfn.OClosure.Func.RangeParent
+ }
pkg := types.LocalPkg
outer := "glob."
- var prefix string
+ var suffix string = "."
switch why {
default:
base.FatalfAt(pos, "closureName: bad Op: %v", why)
case OCLOSURE:
if outerfn == nil || outerfn.OClosure == nil {
- prefix = "func"
+ suffix = ".func"
}
+ case ORANGE:
+ suffix = "-range"
case OGO:
- prefix = "gowrap"
+ suffix = ".gowrap"
case ODEFER:
- prefix = "deferwrap"
+ suffix = ".deferwrap"
}
gen := &globClosgen
@@ -441,9 +450,12 @@ func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym {
pkg = outerfn.Sym().Pkg
outer = FuncName(outerfn)
- if why == OCLOSURE {
+ switch why {
+ case OCLOSURE:
gen = &outerfn.funcLitGen
- } else {
+ case ORANGE:
+ gen = &outerfn.rangeLitGen
+ default:
gen = &outerfn.goDeferGen
}
}
@@ -460,7 +472,7 @@ func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym {
}
*gen++
- return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+ return pkg.Lookup(fmt.Sprintf("%s%s%d", outer, suffix, *gen))
}
// NewClosureFunc creates a new Func to represent a function literal
@@ -490,6 +502,12 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func,
clo.pos = cpos
clo.SetType(typ)
clo.SetTypecheck(1)
+ if why == ORANGE {
+ clo.Func.RangeParent = outerfn
+ if outerfn.OClosure != nil && outerfn.OClosure.Func.RangeParent != nil {
+ clo.Func.RangeParent = outerfn.OClosure.Func.RangeParent
+ }
+ }
fn.OClosure = clo
fn.Nname.Defn = fn
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index 3b6823895c..68d2865595 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 168, 288},
+ {Func{}, 176, 296},
{Name{}, 96, 168},
}
diff --git a/src/cmd/compile/internal/liveness/mergelocals.go b/src/cmd/compile/internal/liveness/mergelocals.go
index 1e65d6c1d1..017c4d1dbb 100644
--- a/src/cmd/compile/internal/liveness/mergelocals.go
+++ b/src/cmd/compile/internal/liveness/mergelocals.go
@@ -448,7 +448,7 @@ func (cs *cstate) setupHashBisection(cands []*ir.Name) {
//
// It is possible to have situations where a given ir.Name is
// non-address-taken at the source level, but whose address is
-// materialized in order to accomodate the needs of
+// materialized in order to accommodate the needs of
// architecture-dependent operations or one sort or another (examples
// include things like LoweredZero/DuffZero, etc). The issue here is
// that the SymAddr op will show up as touching a variable of
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index dd48d10bc5..708f0f2023 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -1536,7 +1536,7 @@ func isfat(t *types.Type) bool {
// inputs and outputs as the value of symbol <fn>.args_stackmap.
// If fn has outputs, two bitmaps are written, otherwise just one.
func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) {
- if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
+ if ir.FuncName(fn) == "_" {
return
}
nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize))
@@ -1551,6 +1551,7 @@ func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) {
nbitmap = 2
}
lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
+ lsym.Set(obj.AttrLinkname, true) // allow args_stackmap referenced from assembly
off := objw.Uint32(lsym, 0, uint32(nbitmap))
off = objw.Uint32(lsym, off, uint32(bv.N))
off = objw.BitVec(lsym, off, bv)
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index 34201545b5..a95fa03e17 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -22,19 +22,26 @@ var versionErrorRx = regexp.MustCompile(`requires go[0-9]+\.[0-9]+ or later`)
// checkFiles configures and runs the types2 checker on the given
// parsed source files and then returns the result.
-func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
+// The map result value indicates which closures are generated from the bodies of range function loops.
+func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info, map[*syntax.FuncLit]bool) {
if base.SyntaxErrors() != 0 {
base.ErrorExit()
}
// setup and syntax error reporting
files := make([]*syntax.File, len(noders))
- // posBaseMap maps all file pos bases back to *syntax.File
+ // fileBaseMap maps all file pos bases back to *syntax.File
// for checking Go version mismatched.
- posBaseMap := make(map[*syntax.PosBase]*syntax.File)
+ fileBaseMap := make(map[*syntax.PosBase]*syntax.File)
for i, p := range noders {
files[i] = p.file
- posBaseMap[p.file.Pos().Base()] = p.file
+ // The file.Pos() is the position of the package clause.
+ // If there's a //line directive before that, file.Pos().Base()
+ // refers to that directive, not the file itself.
+ // Make sure to consistently map back to file base, here and
+ // when we look for a file in the conf.Error handler below,
+ // otherwise the file may not be found (was go.dev/issue/67141).
+ fileBaseMap[p.file.Pos().FileBase()] = p.file
}
// typechecking
@@ -49,9 +56,7 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
IgnoreBranchErrors: true, // parser already checked via syntax.CheckBranches mode
Importer: &importer,
Sizes: types2.SizesFor("gc", buildcfg.GOARCH),
- // Currently, the compiler panics when using Alias types.
- // TODO(gri) set to true once this is fixed (issue #66873)
- EnableAlias: false,
+ EnableAlias: true,
}
if base.Flag.ErrorURL {
conf.ErrorURL = " [go.dev/e/%s]"
@@ -71,13 +76,12 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
terr := err.(types2.Error)
msg := terr.Msg
if versionErrorRx.MatchString(msg) {
- posBase := terr.Pos.Base()
- for !posBase.IsFileBase() { // line directive base
- posBase = posBase.Pos().Base()
- }
- fileVersion := info.FileVersions[posBase]
- file := posBaseMap[posBase]
- if file.GoVersion == fileVersion {
+ fileBase := terr.Pos.FileBase()
+ fileVersion := info.FileVersions[fileBase]
+ file := fileBaseMap[fileBase]
+ if file == nil {
+ // This should never happen, but be careful and don't crash.
+ } else if file.GoVersion == fileVersion {
// If we have a version error caused by //go:build, report it.
msg = fmt.Sprintf("%s (file declares //go:build %s)", msg, fileVersion)
} else {
@@ -147,9 +151,9 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
// If we do the rewrite in the back end, like between typecheck and walk,
// then the new implicit closure will not have a unified IR inline body,
// and bodyReaderFor will fail.
- rangefunc.Rewrite(pkg, info, files)
+ rangeInfo := rangefunc.Rewrite(pkg, info, files)
- return pkg, info
+ return pkg, info, rangeInfo
}
// A cycleFinder detects anonymous interface cycles (go.dev/issue/56103).
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index c33e5226f3..97865bbfb1 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -427,7 +427,9 @@ func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *type
r.dict = dict
typ := r.doTyp()
- assert(typ != nil)
+ if typ == nil {
+ base.Fatalf("doTyp returned nil for info=%v", info)
+ }
// For recursive type declarations involving interfaces and aliases,
// above r.doTyp() call may have already set pr.typs[idx], so just
@@ -741,7 +743,26 @@ func (pr *pkgReader) objIdxMayFail(idx pkgbits.Index, implicits, explicits []*ty
case pkgbits.ObjAlias:
name := do(ir.OTYPE, false)
- setType(name, r.typ())
+
+ // Clumsy dance: the r.typ() call here might recursively find this
+ // type alias name, before we've set its type (#66873). So we
+ // temporarily clear sym.Def and then restore it later, if still
+ // unset.
+ hack := sym.Def == name
+ if hack {
+ sym.Def = nil
+ }
+ typ := r.typ()
+ if hack {
+ if sym.Def != nil {
+ name = sym.Def.(*ir.Name)
+ assert(name.Type() == typ)
+ return name, nil
+ }
+ sym.Def = name
+ }
+
+ setType(name, typ)
name.SetAlias(true)
return name, nil
@@ -1176,7 +1197,18 @@ func (r *reader) linkname(name *ir.Name) {
lsym.SymIdx = int32(idx)
lsym.Set(obj.AttrIndexed, true)
} else {
- name.Sym().Linkname = r.String()
+ linkname := r.String()
+ sym := name.Sym()
+ sym.Linkname = linkname
+ if sym.Pkg == types.LocalPkg && linkname != "" {
+ // Mark linkname in the current package. We don't mark the
+ // ones that are imported and propagated (e.g. through
+ // inlining or instantiation, which are marked in their
+ // corresponding packages). So we can tell in which package
+ // the linkname is used (pulled), and the linker can
+ // make a decision for allowing or disallowing it.
+ sym.Linksym().Set(obj.AttrLinkname, true)
+ }
}
}
@@ -2672,7 +2704,7 @@ func (r *reader) syntheticClosure(origPos src.XPos, typ *types.Type, ifaceHack b
return false
}
- fn := r.inlClosureFunc(origPos, typ)
+ fn := r.inlClosureFunc(origPos, typ, ir.OCLOSURE)
fn.SetWrapper(true)
clo := fn.OClosure
@@ -3003,8 +3035,12 @@ func (r *reader) funcLit() ir.Node {
origPos := r.pos()
sig := r.signature(nil)
r.suppressInlPos--
+ why := ir.OCLOSURE
+ if r.Bool() {
+ why = ir.ORANGE
+ }
- fn := r.inlClosureFunc(origPos, sig)
+ fn := r.inlClosureFunc(origPos, sig, why)
fn.ClosureVars = make([]*ir.Name, 0, r.Len())
for len(fn.ClosureVars) < cap(fn.ClosureVars) {
@@ -3030,14 +3066,14 @@ func (r *reader) funcLit() ir.Node {
// inlClosureFunc constructs a new closure function, but correctly
// handles inlining.
-func (r *reader) inlClosureFunc(origPos src.XPos, sig *types.Type) *ir.Func {
+func (r *reader) inlClosureFunc(origPos src.XPos, sig *types.Type, why ir.Op) *ir.Func {
curfn := r.inlCaller
if curfn == nil {
curfn = r.curfn
}
// TODO(mdempsky): Remove hard-coding of typecheck.Target.
- return ir.NewClosureFunc(origPos, r.inlPos(origPos), ir.OCLOSURE, sig, curfn, typecheck.Target)
+ return ir.NewClosureFunc(origPos, r.inlPos(origPos), why, sig, curfn, typecheck.Target)
}
func (r *reader) exprList() []ir.Node {
@@ -3684,10 +3720,13 @@ func (r *reader) needWrapper(typ *types.Type) {
return
}
+ // Special case: runtime must define error even if imported packages mention it (#29304).
+ forceNeed := typ == types.ErrorType && base.Ctxt.Pkgpath == "runtime"
+
// If a type was found in an imported package, then we can assume
// that package (or one of its transitive dependencies) already
// generated method wrappers for it.
- if r.importedDef() {
+ if r.importedDef() && !forceNeed {
haveWrapperTypes = append(haveWrapperTypes, typ)
} else {
needWrapperTypes = append(needWrapperTypes, typ)
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index 2391b2f34d..a1a90cd6b5 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -304,9 +304,9 @@ func readBodies(target *ir.Package, duringInlining bool) {
// writes an export data package stub representing them,
// and returns the result.
func writePkgStub(m posMap, noders []*noder) string {
- pkg, info := checkFiles(m, noders)
+ pkg, info, otherInfo := checkFiles(m, noders)
- pw := newPkgWriter(m, pkg, info)
+ pw := newPkgWriter(m, pkg, info, otherInfo)
pw.collectDecls(noders)
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index 785176b3b5..9b33fb7c6d 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -12,6 +12,7 @@ import (
"internal/buildcfg"
"internal/pkgbits"
"os"
+ "strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -62,9 +63,10 @@ import (
type pkgWriter struct {
pkgbits.PkgEncoder
- m posMap
- curpkg *types2.Package
- info *types2.Info
+ m posMap
+ curpkg *types2.Package
+ info *types2.Info
+ rangeFuncBodyClosures map[*syntax.FuncLit]bool // non-public information, e.g., which functions are closures range function bodies?
// Indices for previously written syntax and types2 things.
@@ -89,13 +91,14 @@ type pkgWriter struct {
// newPkgWriter returns an initialized pkgWriter for the specified
// package.
-func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter {
+func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info, otherInfo map[*syntax.FuncLit]bool) *pkgWriter {
return &pkgWriter{
PkgEncoder: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
- m: m,
- curpkg: pkg,
- info: info,
+ m: m,
+ curpkg: pkg,
+ info: info,
+ rangeFuncBodyClosures: otherInfo,
pkgsIdx: make(map[*types2.Package]pkgbits.Index),
objsIdx: make(map[types2.Object]pkgbits.Index),
@@ -488,6 +491,18 @@ func (w *writer) typInfo(info typeInfo) {
// typIdx also reports whether typ is a derived type; that is, whether
// its identity depends on type parameters.
func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
+ // Strip non-global aliases, because they only appear in inline
+ // bodies anyway. Otherwise, they can cause types.Sym collisions
+ // (e.g., "main.C" for both of the local type aliases in
+ // test/fixedbugs/issue50190.go).
+ for {
+ if alias, ok := typ.(*types2.Alias); ok && !isGlobal(alias.Obj()) {
+ typ = alias.Rhs()
+ } else {
+ break
+ }
+ }
+
if idx, ok := pw.typsIdx[typ]; ok {
return typeInfo{idx: idx, derived: false}
}
@@ -569,7 +584,10 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
case *types2.Interface:
// Handle "any" as reference to its TypeName.
- if typ == anyTypeName.Type() {
+ // The underlying "any" interface is canonical, so this logic handles both
+ // GODEBUG=gotypesalias=1 (when any is represented as a types2.Alias), and
+ // gotypesalias=0.
+ if types2.Unalias(typ) == types2.Unalias(anyTypeName.Type()) {
w.Code(pkgbits.TypeNamed)
w.obj(anyTypeName, nil)
break
@@ -2320,6 +2338,7 @@ func (w *writer) funcLit(expr *syntax.FuncLit) {
w.Sync(pkgbits.SyncFuncLit)
w.pos(expr)
w.signature(sig)
+ w.Bool(w.p.rangeFuncBodyClosures[expr])
w.Len(len(closureVars))
for _, cv := range closureVars {
@@ -2595,6 +2614,10 @@ func (pw *pkgWriter) collectDecls(noders []*noder) {
pw.errorf(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
+ if strings.Contains(l.remote, "[") && strings.Contains(l.remote, "]") {
+ pw.errorf(l.pos, "//go:linkname reference of an instantiation is not allowed")
+ continue
+ }
switch obj := pw.curpkg.Scope().Lookup(l.local).(type) {
case *types2.Func, *types2.Var:
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index db420b7cb4..367fd2f6b0 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -629,18 +629,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
// Auxint holds mask
- case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR:
+ case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICLCC, ssa.OpPPC64RLDICR:
sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
switch v.Op {
- case ssa.OpPPC64RLDICL:
+ case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICLCC:
p.AddRestSourceConst(mb)
case ssa.OpPPC64RLDICR:
p.AddRestSourceConst(me)
}
p.Reg = v.Args[0].Reg()
- p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.ResultReg()}
case ssa.OpPPC64RLWNM:
_, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
@@ -691,7 +691,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpPPC64ADDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst,
- ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst:
+ ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst,
+ ssa.OpPPC64ANDconst:
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
@@ -1916,17 +1917,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpPPC64LoweredNilCheck:
if buildcfg.GOOS == "aix" {
- // CMP Rarg0, R0
+ // CMP Rarg0, $0
// BNE 2(PC)
// STW R0, 0(R0)
// NOP (so the BNE has somewhere to land)
- // CMP Rarg0, R0
+ // CMP Rarg0, $0
p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = 0
// BNE 2(PC)
p2 := s.Prog(ppc64.ABNE)
@@ -2004,8 +2005,8 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R3
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = 0
p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
index 16856c648c..97ab254395 100644
--- a/src/cmd/compile/internal/rangefunc/rangefunc_test.go
+++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
@@ -2,18 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.rangefunc
-
package rangefunc_test
import (
+ "fmt"
+ "regexp"
"slices"
"testing"
)
+type Seq[T any] func(yield func(T) bool)
type Seq2[T1, T2 any] func(yield func(T1, T2) bool)
-// OfSliceIndex returns a Seq over the elements of s. It is equivalent
+// OfSliceIndex returns a Seq2 over the elements of s. It is equivalent
// to range s.
func OfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
return func(yield func(int, T) bool) {
@@ -54,6 +55,39 @@ func VeryBadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
}
}
+// SwallowPanicOfSliceIndex hides panics and converts them to normal return
+func SwallowPanicOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ done := false
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ done = true
+ }
+ }()
+ done = !yield(i, v)
+ }()
+ if done {
+ return
+ }
+ }
+ return
+ }
+}
+
+// PanickyOfSliceIndex iterates the slice but panics if it exits the loop early
+func PanickyOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ if !yield(i, v) {
+ panic(fmt.Errorf("Panicky iterator panicking"))
+ }
+ }
+ return
+ }
+}
+
// CooperativeBadOfSliceIndex calls the loop body from a goroutine after
// a ping on a channel, and returns recover()on that same channel.
func CooperativeBadOfSliceIndex[T any, S ~[]T](s S, proceed chan any) Seq2[int, T] {
@@ -82,6 +116,22 @@ type TrickyIterator struct {
yield func(int, int) bool
}
+func (ti *TrickyIterator) iterEcho(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ for i, v := range s {
+ if !yield(i, v) {
+ ti.yield = yield
+ return
+ }
+ if ti.yield != nil && !ti.yield(i, v) {
+ return
+ }
+ }
+ ti.yield = yield
+ return
+ }
+}
+
func (ti *TrickyIterator) iterAll(s []int) Seq2[int, int] {
return func(yield func(int, int) bool) {
ti.yield = yield // Save yield for future abuse
@@ -118,36 +168,137 @@ func (ti *TrickyIterator) fail() {
}
}
-// Check wraps the function body passed to iterator forall
+const DONE = 0 // body of loop has exited in a non-panic way
+const READY = 1 // body of loop has not exited yet, is not running
+const PANIC = 2 // body of loop is either currently running, or has panicked
+const EXHAUSTED = 3 // iterator function return, i.e., sequence is "exhausted"
+
+const MISSING_PANIC = 4 // overload "READY" for panic call
+
+// Check2 wraps the function body passed to iterator forall
// in code that ensures that it cannot (successfully) be called
// either after body return false (control flow out of loop) or
// forall itself returns (the iteration is now done).
//
// Note that this can catch errors before the inserted checks.
-func Check[U, V any](forall Seq2[U, V]) Seq2[U, V] {
+func Check2[U, V any](forall Seq2[U, V]) Seq2[U, V] {
return func(body func(U, V) bool) {
- ret := true
+ state := READY
forall(func(u U, v V) bool {
- if !ret {
- panic("Checked iterator access after exit")
+ if state != READY {
+ panic(fail[state])
+ }
+ state = PANIC
+ ret := body(u, v)
+ if ret {
+ state = READY
+ } else {
+ state = DONE
+ }
+ return ret
+ })
+ if state == PANIC {
+ panic(fail[MISSING_PANIC])
+ }
+ state = EXHAUSTED
+ }
+}
+
+func Check[U any](forall Seq[U]) Seq[U] {
+ return func(body func(U) bool) {
+ state := READY
+ forall(func(u U) bool {
+ if state != READY {
+ panic(fail[state])
+ }
+ state = PANIC
+ ret := body(u)
+ if ret {
+ state = READY
+ } else {
+ state = DONE
}
- ret = body(u, v)
return ret
})
- ret = false
+ if state == PANIC {
+ panic(fail[MISSING_PANIC])
+ }
+ state = EXHAUSTED
+ }
+}
+
+func matchError(r any, x string) bool {
+ if r == nil {
+ return false
+ }
+ if x == "" {
+ return true
}
+ if p, ok := r.(errorString); ok {
+ return p.Error() == x
+ }
+ if p, ok := r.(error); ok {
+ e, err := regexp.Compile(x)
+ if err != nil {
+ panic(fmt.Errorf("Bad regexp '%s' passed to matchError", x))
+ }
+ return e.MatchString(p.Error())
+ }
+ return false
+}
+
+func matchErrorHelper(t *testing.T, r any, x string) {
+ if matchError(r, x) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v', expected '%s'", r, x)
+ }
+}
+
+// An errorString represents a runtime error described by a single string.
+type errorString string
+
+func (e errorString) Error() string {
+ return string(e)
+}
+
+const (
+ // RERR_ is for runtime error, and may be regexps/substrings, to simplify use of tests with tools
+ RERR_DONE = "runtime error: range function continued iteration after function for loop body returned false"
+ RERR_PANIC = "runtime error: range function continued iteration after loop body panic"
+ RERR_EXHAUSTED = "runtime error: range function continued iteration after whole loop exit"
+ RERR_MISSING = "runtime error: range function recovered a loop body panic and did not resume panicking"
+
+ // CERR_ is for checked errors in the Check combinator defined above, and should be literal strings
+ CERR_PFX = "checked rangefunc error: "
+ CERR_DONE = CERR_PFX + "loop iteration after body done"
+ CERR_PANIC = CERR_PFX + "loop iteration after panic"
+ CERR_EXHAUSTED = CERR_PFX + "loop iteration after iterator exit"
+ CERR_MISSING = CERR_PFX + "loop iterator swallowed panic"
+)
+
+var fail []error = []error{
+ errorString(CERR_DONE),
+ errorString(CERR_PFX + "loop iterator, unexpected error"),
+ errorString(CERR_PANIC),
+ errorString(CERR_EXHAUSTED),
+ errorString(CERR_MISSING),
}
func TestCheck(t *testing.T) {
i := 0
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, CERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
} else {
t.Error("Wanted to see a failure")
}
}()
- for _, x := range Check(BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+ for _, x := range Check2(BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
i += x
if i > 4*9 {
break
@@ -166,7 +317,11 @@ func TestCooperativeBadOfSliceIndex(t *testing.T) {
}
proceed <- true
if r := <-proceed; r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
} else {
t.Error("Wanted to see a failure")
}
@@ -177,10 +332,10 @@ func TestCooperativeBadOfSliceIndex(t *testing.T) {
}
}
-func TestCheckCooperativeBadOfSliceIndex(t *testing.T) {
+func TestCooperativeBadOfSliceIndexCheck(t *testing.T) {
i := 0
proceed := make(chan any)
- for _, x := range Check(CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed)) {
+ for _, x := range Check2(CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed)) {
i += x
if i >= 36 {
break
@@ -188,7 +343,12 @@ func TestCheckCooperativeBadOfSliceIndex(t *testing.T) {
}
proceed <- true
if r := <-proceed; r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, CERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+
} else {
t.Error("Wanted to see a failure")
}
@@ -217,7 +377,11 @@ func TestTrickyIterAll(t *testing.T) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
} else {
t.Error("Wanted to see a failure")
}
@@ -241,7 +405,11 @@ func TestTrickyIterOne(t *testing.T) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
} else {
t.Error("Wanted to see a failure")
}
@@ -265,7 +433,11 @@ func TestTrickyIterZero(t *testing.T) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
} else {
t.Error("Wanted to see a failure")
}
@@ -274,10 +446,10 @@ func TestTrickyIterZero(t *testing.T) {
trickItZero.fail()
}
-func TestCheckTrickyIterZero(t *testing.T) {
+func TestTrickyIterZeroCheck(t *testing.T) {
trickItZero := TrickyIterator{}
i := 0
- for _, x := range Check(trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+ for _, x := range Check2(trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
i += x
if i >= 36 {
break
@@ -289,7 +461,11 @@ func TestCheckTrickyIterZero(t *testing.T) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, CERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
} else {
t.Error("Wanted to see a failure")
}
@@ -298,6 +474,78 @@ func TestCheckTrickyIterZero(t *testing.T) {
trickItZero.fail()
}
+func TestTrickyIterEcho(t *testing.T) {
+ trickItAll := TrickyIterator{}
+ i := 0
+ for _, x := range trickItAll.iterAll([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ t.Logf("first loop i=%d", i)
+ i += x
+ if i >= 10 {
+ break
+ }
+ }
+
+ if i != 10 {
+ t.Errorf("Expected i == 10, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, RERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ i = 0
+ for _, x := range trickItAll.iterEcho([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ t.Logf("second loop i=%d", i)
+ if x >= 5 {
+ break
+ }
+ }
+
+}
+
+func TestTrickyIterEcho2(t *testing.T) {
+ trickItAll := TrickyIterator{}
+ var i int
+
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, RERR_EXHAUSTED) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for k := range 2 {
+ i = 0
+ for _, x := range trickItAll.iterEcho([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ t.Logf("k,x,i=%d,%d,%d", k, x, i)
+ i += x
+ if i >= 10 {
+ break
+ }
+ }
+ t.Logf("i = %d", i)
+
+ if i != 10 {
+ t.Errorf("Expected i == 10, saw %d instead", i)
+ }
+ }
+}
+
// TestBreak1 should just work, with well-behaved iterators.
// (The misbehaving iterator detector should not trigger.)
func TestBreak1(t *testing.T) {
@@ -412,7 +660,11 @@ func TestBreak1BadA(t *testing.T) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -443,7 +695,11 @@ func TestBreak1BadB(t *testing.T) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -507,7 +763,11 @@ func TestMultiCont1(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -551,7 +811,11 @@ func TestMultiCont2(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -595,7 +859,11 @@ func TestMultiCont3(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -639,7 +907,11 @@ func TestMultiBreak0(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -683,7 +955,11 @@ func TestMultiBreak1(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -727,7 +1003,11 @@ func TestMultiBreak2(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -771,7 +1051,11 @@ func TestMultiBreak3(t *testing.T) {
var expect = []int{1000, 10, 2, 4}
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("Expected %v, got %v", expect, result)
}
@@ -808,6 +1092,229 @@ W:
}
}
+func TestPanickyIterator1(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, 3, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, "Panicky iterator panicking") {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, z := range PanickyOfSliceIndex([]int{1, 2, 3, 4}) {
+ result = append(result, z)
+ if z == 4 {
+ break
+ }
+ }
+}
+
+func TestPanickyIterator1Check(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, 3, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, "Panicky iterator panicking") {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+ for _, z := range Check2(PanickyOfSliceIndex([]int{1, 2, 3, 4})) {
+ result = append(result, z)
+ if z == 4 {
+ break
+ }
+ }
+}
+
+func TestPanickyIterator2(t *testing.T) {
+ var result []int
+ var expect = []int{100, 10, 1, 2}
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, RERR_MISSING) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, x := range OfSliceIndex([]int{100, 200}) {
+ result = append(result, x)
+ Y:
+ // swallows panics and iterates to end BUT `break Y` disables the body, so--> 10, 1, 2
+ for _, y := range VeryBadOfSliceIndex([]int{10, 20}) {
+ result = append(result, y)
+
+ // converts early exit into a panic --> 1, 2
+ for k, z := range PanickyOfSliceIndex([]int{1, 2}) { // iterator panics
+ result = append(result, z)
+ if k == 1 {
+ break Y
+ }
+ }
+ }
+ }
+}
+
+func TestPanickyIterator2Check(t *testing.T) {
+ var result []int
+ var expect = []int{100, 10, 1, 2}
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, CERR_MISSING) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, x := range Check2(OfSliceIndex([]int{100, 200})) {
+ result = append(result, x)
+ Y:
+ // swallows panics and iterates to end BUT `break Y` disables the body, so--> 10, 1, 2
+ for _, y := range Check2(VeryBadOfSliceIndex([]int{10, 20})) {
+ result = append(result, y)
+
+ // converts early exit into a panic --> 1, 2
+ for k, z := range Check2(PanickyOfSliceIndex([]int{1, 2})) { // iterator panics
+ result = append(result, z)
+ if k == 1 {
+ break Y
+ }
+ }
+ }
+ }
+}
+
+func TestPanickyIterator3(t *testing.T) {
+ var result []int
+ var expect = []int{100, 10, 1, 2, 200, 10, 1, 2}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("Unexpected panic '%v'", r)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, x := range OfSliceIndex([]int{100, 200}) {
+ result = append(result, x)
+ Y:
+ // swallows panics and iterates to end BUT `break Y` disables the body, so--> 10, 1, 2
+ // This is cross-checked against the checked iterator below; the combinator should behave the same.
+ for _, y := range VeryBadOfSliceIndex([]int{10, 20}) {
+ result = append(result, y)
+
+ for k, z := range OfSliceIndex([]int{1, 2}) { // iterator does not panic
+ result = append(result, z)
+ if k == 1 {
+ break Y
+ }
+ }
+ }
+ }
+}
+func TestPanickyIterator3Check(t *testing.T) {
+ var result []int
+ var expect = []int{100, 10, 1, 2, 200, 10, 1, 2}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("Unexpected panic '%v'", r)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, x := range Check2(OfSliceIndex([]int{100, 200})) {
+ result = append(result, x)
+ Y:
+ // swallows panics and iterates to end BUT `break Y` disables the body, so--> 10, 1, 2
+ for _, y := range Check2(VeryBadOfSliceIndex([]int{10, 20})) {
+ result = append(result, y)
+
+ for k, z := range Check2(OfSliceIndex([]int{1, 2})) { // iterator does not panic
+ result = append(result, z)
+ if k == 1 {
+ break Y
+ }
+ }
+ }
+ }
+}
+
+func TestPanickyIterator4(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, 3}
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, RERR_MISSING) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, x := range SwallowPanicOfSliceIndex([]int{1, 2, 3, 4}) {
+ result = append(result, x)
+ if x == 3 {
+ panic("x is 3")
+ }
+ }
+
+}
+func TestPanickyIterator4Check(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, 3}
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, CERR_MISSING) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ }()
+ for _, x := range Check2(SwallowPanicOfSliceIndex([]int{1, 2, 3, 4})) {
+ result = append(result, x)
+ if x == 3 {
+ panic("x is 3")
+ }
+ }
+
+}
+
// veryBad tests that a loop nest behaves sensibly in the face of a
// "very bad" iterator. In this case, "sensibly" means that the
// break out of X still occurs after the very bad iterator finally
@@ -833,17 +1340,17 @@ X:
return result
}
-// checkVeryBad wraps a "very bad" iterator with Check,
+// veryBadCheck wraps a "very bad" iterator with Check,
// demonstrating that the very bad iterator also hides panics
// thrown by Check.
-func checkVeryBad(s []int) []int {
+func veryBadCheck(s []int) []int {
var result []int
X:
for _, x := range OfSliceIndex([]int{1, 2, 3}) {
result = append(result, x)
- for _, y := range Check(VeryBadOfSliceIndex(s)) {
+ for _, y := range Check2(VeryBadOfSliceIndex(s)) {
result = append(result, y)
break X
}
@@ -900,10 +1407,10 @@ func TestVeryBad2(t *testing.T) {
}
}
-// TestCheckVeryBad checks the behavior of an extremely poorly behaved iterator,
+// TestVeryBadCheck checks the behavior of an extremely poorly behaved iterator,
// which also suppresses the exceptions from "Check"
-func TestCheckVeryBad(t *testing.T) {
- result := checkVeryBad([]int{10, 20, 30, 40}) // even length
+func TestVeryBadCheck(t *testing.T) {
+ result := veryBadCheck([]int{10, 20, 30, 40}) // even length
expect := []int{1, 10}
if !slices.Equal(expect, result) {
@@ -929,7 +1436,11 @@ func testBreak1BadDefer(t *testing.T) (result []int) {
defer func() {
if r := recover(); r != nil {
- t.Logf("Saw expected panic '%v'", r)
+ if matchError(r, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
if !slices.Equal(expect, result) {
t.Errorf("(Inner) Expected %v, got %v", expect, result)
}
@@ -1036,11 +1547,40 @@ func testReturn3(t *testing.T) (result []int, err any) {
return
}
+// testReturn4 has no bad iterators, but exercises return variable rewriting
+// differs from testReturn1 because deferred append to "result" does not change
+// the return value in this case.
+func testReturn4(t *testing.T) (_ []int, _ []int, err any) {
+ var result []int
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return result, result, nil
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
// TestReturns checks that returns through bad iterators behave properly,
// for inner and outer bad iterators.
func TestReturns(t *testing.T) {
var result []int
+ var result2 []int
var expect = []int{-1, 1, 2, -10}
+ var expect2 = []int{-1, 1, 2}
var err any
result, err = testReturn1(t)
@@ -1058,7 +1598,11 @@ func TestReturns(t *testing.T) {
if err == nil {
t.Errorf("Missing expected error")
} else {
- t.Logf("Saw expected panic '%v'", err)
+ if matchError(err, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", err)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", err)
+ }
}
result, err = testReturn3(t)
@@ -1068,9 +1612,23 @@ func TestReturns(t *testing.T) {
if err == nil {
t.Errorf("Missing expected error")
} else {
- t.Logf("Saw expected panic '%v'", err)
+ if matchError(err, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", err)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", err)
+ }
}
+ result, result2, err = testReturn4(t)
+ if !slices.Equal(expect2, result) {
+ t.Errorf("Expected %v, got %v", expect2, result)
+ }
+ if !slices.Equal(expect2, result2) {
+ t.Errorf("Expected %v, got %v", expect2, result2)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
}
// testGotoA1 tests loop-nest-internal goto, no bad iterators.
@@ -1169,7 +1727,11 @@ func TestGotoA(t *testing.T) {
if err == nil {
t.Errorf("Missing expected error")
} else {
- t.Logf("Saw expected panic '%v'", err)
+ if matchError(err, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", err)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", err)
+ }
}
result, err = testGotoA3(t)
@@ -1179,7 +1741,11 @@ func TestGotoA(t *testing.T) {
if err == nil {
t.Errorf("Missing expected error")
} else {
- t.Logf("Saw expected panic '%v'", err)
+ if matchError(err, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", err)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", err)
+ }
}
}
@@ -1282,7 +1848,11 @@ func TestGotoB(t *testing.T) {
if err == nil {
t.Errorf("Missing expected error")
} else {
- t.Logf("Saw expected panic '%v'", err)
+ if matchError(err, RERR_DONE) {
+ t.Logf("Saw expected panic '%v'", err)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", err)
+ }
}
result, err = testGotoB3(t)
@@ -1292,6 +1862,240 @@ func TestGotoB(t *testing.T) {
if err == nil {
t.Errorf("Missing expected error")
} else {
- t.Logf("Saw expected panic '%v'", err)
+ matchErrorHelper(t, err, RERR_DONE)
+ }
+}
+
+// once returns an iterator that runs its loop body once with the supplied value
+func once[T any](x T) Seq[T] {
+ return func(yield func(T) bool) {
+ yield(x)
+ }
+}
+
+// terrify converts an iterator into one that panics with the supplied string
+// if/when the loop body terminates early (returns false, for break, goto, outer
+// continue, or return).
+func terrify[T any](s string, forall Seq[T]) Seq[T] {
+ return func(yield func(T) bool) {
+ forall(func(v T) bool {
+ if !yield(v) {
+ panic(s)
+ }
+ return true
+ })
+ }
+}
+
+func use[T any](T) {
+}
+
+// f runs a not-rangefunc iterator that recovers from a panic that follows execution of a return.
+// what does f return?
+func f() string {
+ defer func() { recover() }()
+ defer panic("f panic")
+ for _, s := range []string{"f return"} {
+ return s
+ }
+ return "f not reached"
+}
+
+// g runs a rangefunc iterator that recovers from a panic that follows execution of a return.
+// what does g return?
+func g() string {
+ defer func() { recover() }()
+ for s := range terrify("g panic", once("g return")) {
+ return s
+ }
+ return "g not reached"
+}
+
+// h runs a rangefunc iterator that recovers from a panic that follows execution of a return.
+// the panic occurs in the rangefunc iterator itself.
+// what does h return?
+func h() (hashS string) {
+ defer func() { recover() }()
+ for s := range terrify("h panic", once("h return")) {
+ hashS := s
+ use(hashS)
+ return s
+ }
+ return "h not reached"
+}
+
+func j() (hashS string) {
+ defer func() { recover() }()
+ for s := range terrify("j panic", once("j return")) {
+ hashS = s
+ return
+ }
+ return "j not reached"
+}
+
+// k runs a rangefunc iterator that recovers from a panic that follows execution of a return.
+// the panic occurs in the rangefunc iterator itself.
+// k includes an additional mechanism to for making the return happen
+// what does k return?
+func k() (hashS string) {
+ _return := func(s string) { hashS = s }
+
+ defer func() { recover() }()
+ for s := range terrify("k panic", once("k return")) {
+ _return(s)
+ return
+ }
+ return "k not reached"
+}
+
+func m() (hashS string) {
+ _return := func(s string) { hashS = s }
+
+ defer func() { recover() }()
+ for s := range terrify("m panic", once("m return")) {
+ defer _return(s)
+ return s + ", but should be replaced in a defer"
+ }
+ return "m not reached"
+}
+
+func n() string {
+ defer func() { recover() }()
+ for s := range terrify("n panic", once("n return")) {
+ return s + func(s string) string {
+ defer func() { recover() }()
+ for s := range terrify("n closure panic", once(s)) {
+ return s
+ }
+ return "n closure not reached"
+ }(" and n closure return")
+ }
+ return "n not reached"
+}
+
+type terrifyTestCase struct {
+ f func() string
+ e string
+}
+
+func TestPanicReturns(t *testing.T) {
+ tcs := []terrifyTestCase{
+ {f, "f return"},
+ {g, "g return"},
+ {h, "h return"},
+ {k, "k return"},
+ {j, "j return"},
+ {m, "m return"},
+ {n, "n return and n closure return"},
+ }
+
+ for _, tc := range tcs {
+ got := tc.f()
+ if got != tc.e {
+ t.Errorf("Got %s expected %s", got, tc.e)
+ } else {
+ t.Logf("Got expected %s", got)
+ }
+ }
+}
+
+// twice calls yield twice, the first time defer-recover-saving any panic,
+// for re-panicking later if the second call to yield does not also panic.
+// If the first call panicked, the second call ought to also panic because
+// it was called after a panic-termination of the loop body.
+func twice[T any](x, y T) Seq[T] {
+ return func(yield func(T) bool) {
+ var p any
+ done := false
+ func() {
+ defer func() {
+ p = recover()
+ }()
+ done = !yield(x)
+ }()
+ if done {
+ return
+ }
+ yield(y)
+ if p != nil {
+ // do not swallow the panic
+ panic(p)
+ }
+ }
+}
+
+func TestRunBodyAfterPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, RERR_PANIC) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result")
+ }
+ }()
+ for x := range twice(0, 1) {
+ if x == 0 {
+ panic("x is zero")
+ }
+ }
+}
+
+func TestRunBodyAfterPanicCheck(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ if matchError(r, CERR_PANIC) {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Errorf("Saw wrong panic '%v'", r)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result")
+ }
+ }()
+ for x := range Check(twice(0, 1)) {
+ if x == 0 {
+ panic("x is zero")
+ }
+ }
+}
+
+func TestTwoLevelReturn(t *testing.T) {
+ f := func() int {
+ for a := range twice(0, 1) {
+ for b := range twice(0, 2) {
+ x := a + b
+ t.Logf("x=%d", x)
+ if x == 3 {
+ return x
+ }
+ }
+ }
+ return -1
+ }
+ y := f()
+ if y != 3 {
+ t.Errorf("Expected y=3, got y=%d\n", y)
+ }
+}
+
+func TestTwoLevelReturnCheck(t *testing.T) {
+ f := func() int {
+ for a := range Check(twice(0, 1)) {
+ for b := range Check(twice(0, 2)) {
+ x := a + b
+ t.Logf("a=%d, b=%d, x=%d", a, b, x)
+ if x == 3 {
+ return x
+ }
+ }
+ }
+ return -1
+ }
+ y := f()
+ if y != 3 {
+ t.Errorf("Expected y=3, got y=%d\n", y)
}
}
diff --git a/src/cmd/compile/internal/rangefunc/rewrite.go b/src/cmd/compile/internal/rangefunc/rewrite.go
index d439412ea8..2dcdc3f018 100644
--- a/src/cmd/compile/internal/rangefunc/rewrite.go
+++ b/src/cmd/compile/internal/rangefunc/rewrite.go
@@ -99,47 +99,31 @@ The return false breaks the loop. Then when f returns, the "check
which causes the return we want.
-Return with arguments is more involved. We need somewhere to store the
-arguments while we break out of f, so we add them to the var
-declaration, like:
-
- {
- var (
- #next int
- #r1 type1
- #r2 type2
- )
- f(func(x T1) bool {
- ...
- {
- // return a, b
- #r1, #r2 = a, b
- #next = -2
- return false
- }
- ...
- return true
- })
- if #next == -2 { return #r1, #r2 }
- }
-
-TODO: What about:
-
- func f() (x bool) {
- for range g(&x) {
- return true
- }
- }
-
- func g(p *bool) func(func() bool) {
- return func(yield func() bool) {
- yield()
- // Is *p true or false here?
+Return with arguments is more involved, and has to deal with
+corner cases involving panic, defer, and recover. The results
+of the enclosing function or closure are rewritten to give them
+names if they don't have them already, and the names are assigned
+at the return site.
+
+ func foo() (#rv1 A, #rv2 B) {
+
+ {
+ var (
+ #next int
+ )
+ f(func(x T1) bool {
+ ...
+ {
+ // return a, b
+ #rv1, #rv2 = a, b
+ #next = -1
+ return false
+ }
+ ...
+ return true
+ })
+ if #next == -1 { return }
}
- }
-
-With this rewrite the "return true" is not visible after yield returns,
-but maybe it should be?
# Checking
@@ -147,8 +131,44 @@ To permit checking that an iterator is well-behaved -- that is, that
it does not call the loop body again after it has returned false or
after the entire loop has exited (it might retain a copy of the body
function, or pass it to another goroutine) -- each generated loop has
-its own #exitK flag that is checked before each iteration, and set both
-at any early exit and after the iteration completes.
+its own #stateK variable that is used to check for permitted call
+patterns to the yield function for a loop body.
+
+The state values are:
+
+abi.RF_DONE = 0 // body of loop has exited in a non-panic way
+abi.RF_READY = 1 // body of loop has not exited yet, is not running
+abi.RF_PANIC = 2 // body of loop is either currently running, or has panicked
+abi.RF_EXHAUSTED = 3 // iterator function call, e.g. f(func(x t){...}), returned so the sequence is "exhausted".
+
+abi.RF_MISSING_PANIC = 4 // used to report errors.
+
+The value of #stateK transitions
+(1) before calling the iterator function,
+
+ var #stateN = abi.RF_READY
+
+(2) after the iterator function call returns,
+
+ if #stateN == abi.RF_PANIC {
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #stateN = abi.RF_EXHAUSTED
+
+(3) at the beginning of the iteration of the loop body,
+
+ if #stateN != abi.RF_READY { runtime.panicrangestate(#stateN) }
+ #stateN = abi.RF_PANIC
+
+(4) when loop iteration continues,
+
+ #stateN = abi.RF_READY
+ [return true]
+
+(5) when control flow exits the loop body.
+
+ #stateN = abi.RF_DONE
+ [return false]
For example:
@@ -160,17 +180,23 @@ For example:
becomes
- {
- var #exit1 bool
- f(func(x T1) bool {
- if #exit1 { runtime.panicrangeexit() }
- ...
- if ... { #exit1 = true ; return false }
- ...
- return true
- })
- #exit1 = true
- }
+ {
+ var #state1 = abi.RF_READY
+ f(func(x T1) bool {
+ if #state1 != abi.RF_READY { runtime.panicrangestate(#state1) }
+ #state1 = abi.RF_PANIC
+ ...
+ if ... { #state1 = abi.RF_DONE ; return false }
+ ...
+ #state1 = abi.RF_READY
+ return true
+ })
+ if #state1 == abi.RF_PANIC {
+ // the code for the loop body did not return normally
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #state1 = abi.RF_EXHAUSTED
+ }
# Nested Loops
@@ -203,65 +229,83 @@ becomes
{
var (
#next int
- #r1 type1
- #r2 type2
)
- var #exit1 bool
- f(func() {
- if #exit1 { runtime.panicrangeexit() }
- var #exit2 bool
- g(func() {
- if #exit2 { runtime.panicrangeexit() }
+ var #state1 = abi.RF_READY
+ f(func() bool {
+ if #state1 != abi.RF_READY { runtime.panicrangestate(#state1) }
+ #state1 = abi.RF_PANIC
+ var #state2 = abi.RF_READY
+ g(func() bool {
+ if #state2 != abi.RF_READY { runtime.panicrangestate(#state2) }
...
{
// return a, b
- #r1, #r2 = a, b
- #next = -2
- #exit1, #exit2 = true, true
+ #rv1, #rv2 = a, b
+ #next = -1
+ #state2 = abi.RF_DONE
return false
}
...
+ #state2 = abi.RF_READY
return true
})
- #exit2 = true
+ if #state2 == abi.RF_PANIC {
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #state2 = abi.RF_EXHAUSTED
if #next < 0 {
+ #state1 = abi.RF_DONE
return false
}
+ #state1 = abi.RF_READY
return true
})
- #exit1 = true
- if #next == -2 {
- return #r1, #r2
+ if #state1 == abi.RF_PANIC {
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #state1 = abi.RF_EXHAUSTED
+ if #next == -1 {
+ return
}
}
-Note that the #next < 0 after the inner loop handles both kinds of
-return with a single check.
-
# Labeled break/continue of range-over-func loops
For a labeled break or continue of an outer range-over-func, we
-use positive #next values. Any such labeled break or continue
+use positive #next values.
+
+Any such labeled break or continue
really means "do N breaks" or "do N breaks and 1 continue".
-We encode that as perLoopStep*N or perLoopStep*N+1 respectively.
+
+The positive #next value tells which level of loop N to target
+with a break or continue, where perLoopStep*N means break out of
+level N and perLoopStep*N-1 means continue into level N. The
+outermost loop has level 1, therefore #next == perLoopStep means
+to break from the outermost loop, and #next == perLoopStep-1 means
+to continue the outermost loop.
Loops that might need to propagate a labeled break or continue
add one or both of these to the #next checks:
- if #next >= 2 {
- #next -= 2
- return false
- }
-
- if #next == 1 {
- #next = 0
- return true
- }
+ // N == depth of this loop, one less than the one just exited.
+ if #next != 0 {
+ if #next >= perLoopStep*N-1 { // break or continue this loop
+ if #next >= perLoopStep*N+1 { // error checking
+ // TODO reason about what exactly can appear
+ // here given full or partial checking.
+ runtime.panicrangestate(abi.RF_DONE)
+ }
+ rv := #next & 1 == 1 // code generates into #next&1
+ #next = 0
+ return rv
+ }
+ return false // or handle returns and gotos
+ }
-For example
+For example (with perLoopStep == 2)
- F: for range f {
- for range g {
+ F: for range f { // 1, 2
+ for range g { // 3, 4
for range h {
...
break F
@@ -278,52 +322,68 @@ becomes
{
var #next int
- var #exit1 bool
- f(func() {
- if #exit1 { runtime.panicrangeexit() }
- var #exit2 bool
- g(func() {
- if #exit2 { runtime.panicrangeexit() }
- var #exit3 bool
- h(func() {
- if #exit3 { runtime.panicrangeexit() }
+ var #state1 = abi.RF_READY
+ f(func() { // 1,2
+ if #state1 != abi.RF_READY { runtime.panicrangestate(#state1) }
+ #state1 = abi.RF_PANIC
+ var #state2 = abi.RF_READY
+ g(func() { // 3,4
+ if #state2 != abi.RF_READY { runtime.panicrangestate(#state2) }
+ #state2 = abi.RF_PANIC
+ var #state3 = abi.RF_READY
+ h(func() { // 5,6
+ if #state3 != abi.RF_READY { runtime.panicrangestate(#state3) }
+ #state3 = abi.RF_PANIC
...
{
// break F
- #next = 4
- #exit1, #exit2, #exit3 = true, true, true
+ #next = 2
+ #state3 = abi.RF_DONE
return false
}
...
{
// continue F
- #next = 3
- #exit2, #exit3 = true, true
+ #next = 1
+ #state3 = abi.RF_DONE
return false
}
...
+ #state3 = abi.RF_READY
return true
})
- #exit3 = true
- if #next >= 2 {
- #next -= 2
+ if #state3 == abi.RF_PANIC {
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #state3 = abi.RF_EXHAUSTED
+ if #next != 0 {
+ // no breaks or continues targeting this loop
+ #state2 = abi.RF_DONE
return false
}
return true
})
- #exit2 = true
- if #next >= 2 {
- #next -= 2
+ if #state2 == abi.RF_PANIC {
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #state2 = abi.RF_EXHAUSTED
+ if #next != 0 { // just exited g, test for break/continue applied to f/F
+ if #next >= 1 {
+ if #next >= 3 { runtime.panicrangestate(abi.RF_DONE) } // error
+ rv := #next&1 == 1
+ #next = 0
+ return rv
+ }
+ #state1 = abi.RF_DONE
return false
}
- if #next == 1 {
- #next = 0
- return true
- }
...
return true
})
- #exit1 = true
+ if #state1 == abi.RF_PANIC {
+ panic(runtime.panicrangestate(abi.RF_MISSING_PANIC))
+ }
+ #state1 = abi.RF_EXHAUSTED
}
Note that the post-h checks only consider a break,
@@ -332,13 +392,13 @@ since no generated code tries to continue g.
# Gotos and other labeled break/continue
The final control flow translations are goto and break/continue of a
-non-range-over-func statement. In both cases, we may need to break out
-of one or more range-over-func loops before we can do the actual
+non-range-over-func statement. In both cases, we may need to break
+out of one or more range-over-func loops before we can do the actual
control flow statement. Each such break/continue/goto L statement is
-assigned a unique negative #next value (below -2, since -1 and -2 are
-for the two kinds of return). Then the post-checks for a given loop
-test for the specific codes that refer to labels directly targetable
-from that block. Otherwise, the generic
+assigned a unique negative #next value (since -1 is return). Then
+the post-checks for a given loop test for the specific codes that
+refer to labels directly targetable from that block. Otherwise, the
+generic
if #next < 0 { return false }
@@ -363,39 +423,50 @@ becomes
Top: print("start\n")
{
var #next int
- var #exit1 bool
+ var #state1 = abi.RF_READY
f(func() {
- if #exit1 { runtime.panicrangeexit() }
- var #exit2 bool
+ if #state1 != abi.RF_READY{ runtime.panicrangestate(#state1) }
+ #state1 = abi.RF_PANIC
+ var #state2 = abi.RF_READY
g(func() {
- if #exit2 { runtime.panicrangeexit() }
+ if #state2 != abi.RF_READY { runtime.panicrangestate(#state2) }
+ #state2 = abi.RF_PANIC
...
- var #exit3 bool
+ var #state3 bool = abi.RF_READY
h(func() {
- if #exit3 { runtime.panicrangeexit() }
+ if #state3 != abi.RF_READY { runtime.panicrangestate(#state3) }
+ #state3 = abi.RF_PANIC
...
{
// goto Top
#next = -3
- #exit1, #exit2, #exit3 = true, true, true
+ #state3 = abi.RF_DONE
return false
}
...
+ #state3 = abi.RF_READY
return true
})
- #exit3 = true
+ if #state3 == abi.RF_PANIC {runtime.panicrangestate(abi.RF_MISSING_PANIC)}
+ #state3 = abi.RF_EXHAUSTED
if #next < 0 {
+ #state2 = abi.RF_DONE
return false
}
+ #state2 = abi.RF_READY
return true
})
- #exit2 = true
+ if #state2 == abi.RF_PANIC {runtime.panicrangestate(abi.RF_MISSING_PANIC)}
+ #state2 = abi.RF_EXHAUSTED
if #next < 0 {
+ #state1 = abi.RF_DONE
return false
}
+ #state1 = abi.RF_READY
return true
})
- #exit1 = true
+ if #state1 == abi.RF_PANIC {runtime.panicrangestate(abi.RF_MISSING_PANIC)}
+ #state1 = abi.RF_EXHAUSTED
if #next == -3 {
#next = 0
goto Top
@@ -462,6 +533,7 @@ import (
"cmd/compile/internal/types2"
"fmt"
"go/constant"
+ "internal/abi"
"os"
)
@@ -472,6 +544,7 @@ var nopos syntax.Pos
type rewriter struct {
pkg *types2.Package
info *types2.Info
+ sig *types2.Signature
outer *syntax.FuncType
body *syntax.BlockStmt
@@ -493,11 +566,13 @@ type rewriter struct {
rewritten map[*syntax.ForStmt]syntax.Stmt
// Declared variables in generated code for outermost loop.
- declStmt *syntax.DeclStmt
- nextVar types2.Object
- retVars []types2.Object
- defers types2.Object
- exitVarCount int // exitvars are referenced from their respective loops
+ declStmt *syntax.DeclStmt
+ nextVar types2.Object
+ retVars []types2.Object
+ defers types2.Object
+ stateVarCount int // stateVars are referenced from their respective loops
+
+ rangefuncBodyClosures map[*syntax.FuncLit]bool
}
// A branch is a single labeled branch.
@@ -509,44 +584,58 @@ type branch struct {
// A forLoop describes a single range-over-func loop being processed.
type forLoop struct {
nfor *syntax.ForStmt // actual syntax
- exitFlag *types2.Var // #exit variable for this loop
- exitFlagDecl *syntax.VarDecl
+ stateVar *types2.Var // #state variable for this loop
+ stateVarDecl *syntax.VarDecl
+ depth int // outermost loop has depth 1, otherwise depth = depth(parent)+1
checkRet bool // add check for "return" after loop
- checkRetArgs bool // add check for "return args" after loop
checkBreak bool // add check for "break" after loop
checkContinue bool // add check for "continue" after loop
checkBranch []branch // add check for labeled branch after loop
}
+type State int
+
// Rewrite rewrites all the range-over-funcs in the files.
-func Rewrite(pkg *types2.Package, info *types2.Info, files []*syntax.File) {
+// It returns the set of function literals generated from rangefunc loop bodies.
+// This allows for rangefunc loop bodies to be distingushed by debuggers.
+func Rewrite(pkg *types2.Package, info *types2.Info, files []*syntax.File) map[*syntax.FuncLit]bool {
+ ri := make(map[*syntax.FuncLit]bool)
for _, file := range files {
syntax.Inspect(file, func(n syntax.Node) bool {
switch n := n.(type) {
case *syntax.FuncDecl:
- rewriteFunc(pkg, info, n.Type, n.Body)
+ sig, _ := info.Defs[n.Name].Type().(*types2.Signature)
+ rewriteFunc(pkg, info, n.Type, n.Body, sig, ri)
return false
case *syntax.FuncLit:
- rewriteFunc(pkg, info, n.Type, n.Body)
+ sig, _ := info.Types[n].Type.(*types2.Signature)
+ if sig == nil {
+ tv := n.GetTypeInfo()
+ sig = tv.Type.(*types2.Signature)
+ }
+ rewriteFunc(pkg, info, n.Type, n.Body, sig, ri)
return false
}
return true
})
}
+ return ri
}
// rewriteFunc rewrites all the range-over-funcs in a single function (a top-level func or a func literal).
// The typ and body are the function's type and body.
-func rewriteFunc(pkg *types2.Package, info *types2.Info, typ *syntax.FuncType, body *syntax.BlockStmt) {
+func rewriteFunc(pkg *types2.Package, info *types2.Info, typ *syntax.FuncType, body *syntax.BlockStmt, sig *types2.Signature, ri map[*syntax.FuncLit]bool) {
if body == nil {
return
}
r := &rewriter{
- pkg: pkg,
- info: info,
- outer: typ,
- body: body,
+ pkg: pkg,
+ info: info,
+ outer: typ,
+ body: body,
+ sig: sig,
+ rangefuncBodyClosures: ri,
}
syntax.Inspect(body, r.inspect)
if (base.Flag.W != 0) && r.forStack != nil {
@@ -566,14 +655,19 @@ func (r *rewriter) checkFuncMisuse() bool {
func (r *rewriter) inspect(n syntax.Node) bool {
switch n := n.(type) {
case *syntax.FuncLit:
- rewriteFunc(r.pkg, r.info, n.Type, n.Body)
+ sig, _ := r.info.Types[n].Type.(*types2.Signature)
+ if sig == nil {
+ tv := n.GetTypeInfo()
+ sig = tv.Type.(*types2.Signature)
+ }
+ rewriteFunc(r.pkg, r.info, n.Type, n.Body, sig, r.rangefuncBodyClosures)
return false
default:
// Push n onto stack.
r.stack = append(r.stack, n)
if nfor, ok := forRangeFunc(n); ok {
- loop := &forLoop{nfor: nfor}
+ loop := &forLoop{nfor: nfor, depth: 1 + len(r.forStack)}
r.forStack = append(r.forStack, loop)
r.startLoop(loop)
}
@@ -627,8 +721,8 @@ func (r *rewriter) startLoop(loop *forLoop) {
r.rewritten = make(map[*syntax.ForStmt]syntax.Stmt)
}
if r.checkFuncMisuse() {
- // declare the exit flag for this loop's body
- loop.exitFlag, loop.exitFlagDecl = r.exitVar(loop.nfor.Pos())
+ // declare the state flag for this loop's body
+ loop.stateVar, loop.stateVarDecl = r.stateVar(loop.nfor.Pos())
}
}
@@ -674,61 +768,63 @@ func (r *rewriter) editDefer(x *syntax.CallStmt) syntax.Stmt {
}
// Attach the token as an "extra" argument to the defer.
- x.DeferAt = r.useVar(r.defers)
+ x.DeferAt = r.useObj(r.defers)
setPos(x.DeferAt, x.Pos())
return x
}
-func (r *rewriter) exitVar(pos syntax.Pos) (*types2.Var, *syntax.VarDecl) {
- r.exitVarCount++
+func (r *rewriter) stateVar(pos syntax.Pos) (*types2.Var, *syntax.VarDecl) {
+ r.stateVarCount++
- name := fmt.Sprintf("#exit%d", r.exitVarCount)
- typ := r.bool.Type()
+ name := fmt.Sprintf("#state%d", r.stateVarCount)
+ typ := r.int.Type()
obj := types2.NewVar(pos, r.pkg, name, typ)
n := syntax.NewName(pos, name)
setValueType(n, typ)
r.info.Defs[n] = obj
- return obj, &syntax.VarDecl{NameList: []*syntax.Name{n}}
+ return obj, &syntax.VarDecl{NameList: []*syntax.Name{n}, Values: r.stateConst(abi.RF_READY)}
}
// editReturn returns the replacement for the return statement x.
// See the "Return" section in the package doc comment above for more context.
func (r *rewriter) editReturn(x *syntax.ReturnStmt) syntax.Stmt {
- // #next = -1 is return with no arguments; -2 is return with arguments.
- var next int
- if x.Results == nil {
- next = -1
- r.forStack[0].checkRet = true
- } else {
- next = -2
- r.forStack[0].checkRetArgs = true
- }
-
- // Tell the loops along the way to check for a return.
- for _, loop := range r.forStack[1:] {
- loop.checkRet = true
- }
-
- // Assign results, set #next, and return false.
bl := &syntax.BlockStmt{}
+
if x.Results != nil {
- if r.retVars == nil {
+ // rewrite "return val" into "assign to named result; return"
+ if len(r.outer.ResultList) > 0 {
+ // Make sure that result parameters all have names
for i, a := range r.outer.ResultList {
- obj := r.declVar(fmt.Sprintf("#r%d", i+1), a.Type.GetTypeInfo().Type, nil)
- r.retVars = append(r.retVars, obj)
+ if a.Name == nil || a.Name.Value == "_" {
+ r.generateParamName(r.outer.ResultList, i) // updates a.Name
+ }
}
}
- bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.useList(r.retVars), Rhs: x.Results})
+ // Assign to named results
+ results := []types2.Object{}
+ for _, a := range r.outer.ResultList {
+ results = append(results, r.info.Defs[a.Name])
+ }
+ bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.useList(results), Rhs: x.Results})
+ x.Results = nil
}
+
+ next := -1 // return
+
+ // Tell the loops along the way to check for a return.
+ for _, loop := range r.forStack {
+ loop.checkRet = true
+ }
+
+ // Set #next, and return false.
+
bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)})
if r.checkFuncMisuse() {
- // mark all enclosing loop bodies as exited
- for i := 0; i < len(r.forStack); i++ {
- bl.List = append(bl.List, r.setExitedAt(i))
- }
+ // mark this loop as exited, the others (which will be exited if iterators do not interfere) have not, yet.
+ bl.List = append(bl.List, r.setState(abi.RF_DONE, x.Pos()))
}
- bl.List = append(bl.List, &syntax.ReturnStmt{Results: r.useVar(r.false)})
+ bl.List = append(bl.List, &syntax.ReturnStmt{Results: r.useObj(r.false)})
setPos(bl, x.Pos())
return bl
}
@@ -769,7 +865,7 @@ func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
var ret *syntax.ReturnStmt
if x.Tok == syntax.Goto || i < 0 {
// goto Label
- // or break/continue of labeled non-range-over-func loop.
+ // or break/continue of labeled non-range-over-func loop (x.Label != nil).
// We may be able to leave it alone, or we may have to break
// out of one or more nested loops and then use #next to signal
// to complete the break/continue/goto.
@@ -794,37 +890,34 @@ func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
exitFrom = i + 1
// Mark loop we exit to get to targ to check for that branch.
- // When i==-1 that's the outermost func body
- top := r.forStack[i+1]
+ // When i==-1 / exitFrom == 0 that's the outermost func body.
+ top := r.forStack[exitFrom]
top.checkBranch = append(top.checkBranch, branch{x.Tok, label})
// Mark loops along the way to check for a plain return, so they break.
- for j := i + 2; j < len(r.forStack); j++ {
+ for j := exitFrom + 1; j < len(r.forStack); j++ {
r.forStack[j].checkRet = true
}
// In the innermost loop, use a plain "return false".
- ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+ ret = &syntax.ReturnStmt{Results: r.useObj(r.false)}
} else {
// break/continue of labeled range-over-func loop.
- depth := len(r.forStack) - 1 - i
-
- // For continue of innermost loop, use "return true".
- // Otherwise we are breaking the innermost loop, so "return false".
-
- if depth == 0 && x.Tok == syntax.Continue {
- ret = &syntax.ReturnStmt{Results: r.useVar(r.true)}
- setPos(ret, x.Pos())
- return ret
- }
- ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
-
- // If this is a simple break, mark this loop as exited and return false.
- // No adjustments to #next.
- if depth == 0 {
+ if exitFrom == len(r.forStack) {
+ // Simple break or continue.
+ // Continue returns true, break returns false, optionally both adjust state,
+ // neither modifies #next.
+ var state abi.RF_State
+ if x.Tok == syntax.Continue {
+ ret = &syntax.ReturnStmt{Results: r.useObj(r.true)}
+ state = abi.RF_READY
+ } else {
+ ret = &syntax.ReturnStmt{Results: r.useObj(r.false)}
+ state = abi.RF_DONE
+ }
var stmts []syntax.Stmt
if r.checkFuncMisuse() {
- stmts = []syntax.Stmt{r.setExited(), ret}
+ stmts = []syntax.Stmt{r.setState(state, x.Pos()), ret}
} else {
stmts = []syntax.Stmt{ret}
}
@@ -835,12 +928,14 @@ func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
return bl
}
+ ret = &syntax.ReturnStmt{Results: r.useObj(r.false)}
+
// The loop inside the one we are break/continue-ing
// needs to make that happen when we break out of it.
if x.Tok == syntax.Continue {
r.forStack[exitFrom].checkContinue = true
} else {
- exitFrom = i
+ exitFrom = i // exitFrom--
r.forStack[exitFrom].checkBreak = true
}
@@ -851,7 +946,7 @@ func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
// Set next to break the appropriate number of times;
// the final time may be a continue, not a break.
- next = perLoopStep * depth
+ next = perLoopStep * (i + 1)
if x.Tok == syntax.Continue {
next--
}
@@ -864,10 +959,9 @@ func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
}
if r.checkFuncMisuse() {
- // Set #exitK for this loop and those exited by the control flow.
- for i := exitFrom; i < len(r.forStack); i++ {
- bl.List = append(bl.List, r.setExitedAt(i))
- }
+ // Set #stateK for this loop.
+ // The exterior loops have not exited yet, and the iterator might interfere.
+ bl.List = append(bl.List, r.setState(abi.RF_DONE, x.Pos()))
}
bl.List = append(bl.List, ret)
@@ -912,7 +1006,7 @@ func (r *rewriter) computeBranchNext() {
})
// Assign numbers to all the labels we observed.
- used := -2
+ used := -1 // returns use -1
for _, l := range labels {
used -= 3
r.branchNext[branch{syntax.Break, l}] = used
@@ -971,18 +1065,27 @@ func (r *rewriter) endLoop(loop *forLoop) {
block.List = append(block.List, r.declStmt)
}
- // declare the exitFlag here so it has proper scope and zeroing
+ // declare the state variable here so it has proper scope and initialization
if r.checkFuncMisuse() {
- exitFlagDecl := &syntax.DeclStmt{DeclList: []syntax.Decl{loop.exitFlagDecl}}
- block.List = append(block.List, exitFlagDecl)
+ stateVarDecl := &syntax.DeclStmt{DeclList: []syntax.Decl{loop.stateVarDecl}}
+ setPos(stateVarDecl, start)
+ block.List = append(block.List, stateVarDecl)
}
// iteratorFunc(bodyFunc)
block.List = append(block.List, call)
if r.checkFuncMisuse() {
- // iteratorFunc has exited, mark the exit flag for the body
- block.List = append(block.List, r.setExited())
+ // iteratorFunc has exited, check for swallowed panic, and set body state to abi.RF_EXHAUSTED
+ nif := &syntax.IfStmt{
+ Cond: r.cond(syntax.Eql, r.useObj(loop.stateVar), r.stateConst(abi.RF_PANIC)),
+ Then: &syntax.BlockStmt{
+ List: []syntax.Stmt{r.callPanic(start, r.stateConst(abi.RF_MISSING_PANIC))},
+ },
+ }
+ setPos(nif, end)
+ block.List = append(block.List, nif)
+ block.List = append(block.List, r.setState(abi.RF_EXHAUSTED, end))
}
block.List = append(block.List, checks...)
@@ -996,15 +1099,25 @@ func (r *rewriter) endLoop(loop *forLoop) {
r.rewritten[nfor] = block
}
-func (r *rewriter) setExited() *syntax.AssignStmt {
- return r.setExitedAt(len(r.forStack) - 1)
+func (r *rewriter) cond(op syntax.Operator, x, y syntax.Expr) *syntax.Operation {
+ cond := &syntax.Operation{Op: op, X: x, Y: y}
+ tv := syntax.TypeAndValue{Type: r.bool.Type()}
+ tv.SetIsValue()
+ cond.SetTypeInfo(tv)
+ return cond
+}
+
+func (r *rewriter) setState(val abi.RF_State, pos syntax.Pos) *syntax.AssignStmt {
+ ss := r.setStateAt(len(r.forStack)-1, val)
+ setPos(ss, pos)
+ return ss
}
-func (r *rewriter) setExitedAt(index int) *syntax.AssignStmt {
+func (r *rewriter) setStateAt(index int, stateVal abi.RF_State) *syntax.AssignStmt {
loop := r.forStack[index]
return &syntax.AssignStmt{
- Lhs: r.useVar(loop.exitFlag),
- Rhs: r.useVar(r.true),
+ Lhs: r.useObj(loop.stateVar),
+ Rhs: r.stateConst(stateVal),
}
}
@@ -1028,6 +1141,7 @@ func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, fty
Rbrace: end,
},
}
+ r.rangefuncBodyClosures[bodyFunc] = true
setPos(bodyFunc, start)
for i := 0; i < ftyp.Params().Len(); i++ {
@@ -1042,7 +1156,7 @@ func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, fty
paramVar = types2.NewVar(start, r.pkg, fmt.Sprintf("#p%d", 1+i), typ)
if i < len(lhs) {
x := lhs[i]
- as := &syntax.AssignStmt{Lhs: x, Rhs: r.useVar(paramVar)}
+ as := &syntax.AssignStmt{Lhs: x, Rhs: r.useObj(paramVar)}
as.SetPos(x.Pos())
setPos(as.Rhs, x.Pos())
bodyFunc.Body.List = append(bodyFunc.Body.List, as)
@@ -1063,14 +1177,18 @@ func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, fty
loop := r.forStack[len(r.forStack)-1]
if r.checkFuncMisuse() {
- bodyFunc.Body.List = append(bodyFunc.Body.List, r.assertNotExited(start, loop))
+ bodyFunc.Body.List = append(bodyFunc.Body.List, r.assertReady(start, loop))
+ bodyFunc.Body.List = append(bodyFunc.Body.List, r.setState(abi.RF_PANIC, start))
}
// Original loop body (already rewritten by editStmt during inspect).
bodyFunc.Body.List = append(bodyFunc.Body.List, body...)
- // return true to continue at end of loop body
- ret := &syntax.ReturnStmt{Results: r.useVar(r.true)}
+ // end of loop body, set state to abi.RF_READY and return true to continue iteration
+ if r.checkFuncMisuse() {
+ bodyFunc.Body.List = append(bodyFunc.Body.List, r.setState(abi.RF_READY, end))
+ }
+ ret := &syntax.ReturnStmt{Results: r.useObj(r.true)}
ret.SetPos(end)
bodyFunc.Body.List = append(bodyFunc.Body.List, ret)
@@ -1088,27 +1206,75 @@ func (r *rewriter) checks(loop *forLoop, pos syntax.Pos) []syntax.Stmt {
}
did[br] = true
doBranch := &syntax.BranchStmt{Tok: br.tok, Label: &syntax.Name{Value: br.label}}
- list = append(list, r.ifNext(syntax.Eql, r.branchNext[br], doBranch))
+ list = append(list, r.ifNext(syntax.Eql, r.branchNext[br], true, doBranch))
}
}
+
+ curLoop := loop.depth - 1
+ curLoopIndex := curLoop - 1
+
if len(r.forStack) == 1 {
- if loop.checkRetArgs {
- list = append(list, r.ifNext(syntax.Eql, -2, retStmt(r.useList(r.retVars))))
- }
if loop.checkRet {
- list = append(list, r.ifNext(syntax.Eql, -1, retStmt(nil)))
+ list = append(list, r.ifNext(syntax.Eql, -1, false, retStmt(nil)))
}
} else {
- if loop.checkRetArgs || loop.checkRet {
+
+ // Idealized check, implemented more simply for now.
+
+ // // N == depth of this loop, one less than the one just exited.
+ // if #next != 0 {
+ // if #next >= perLoopStep*N-1 { // this loop
+ // if #next >= perLoopStep*N+1 { // error checking
+ // runtime.panicrangestate(abi.RF_DONE)
+ // }
+ // rv := #next & 1 == 1 // code generates into #next&1
+ // #next = 0
+ // return rv
+ // }
+ // return false // or handle returns and gotos
+ // }
+
+ if loop.checkRet {
// Note: next < 0 also handles gotos handled by outer loops.
// We set checkRet in that case to trigger this check.
- list = append(list, r.ifNext(syntax.Lss, 0, retStmt(r.useVar(r.false))))
+ if r.checkFuncMisuse() {
+ list = append(list, r.ifNext(syntax.Lss, 0, false, r.setStateAt(curLoopIndex, abi.RF_DONE), retStmt(r.useObj(r.false))))
+ } else {
+ list = append(list, r.ifNext(syntax.Lss, 0, false, retStmt(r.useObj(r.false))))
+ }
}
- if loop.checkBreak {
- list = append(list, r.ifNext(syntax.Geq, perLoopStep, retStmt(r.useVar(r.false))))
+
+ depthStep := perLoopStep * (curLoop)
+
+ if r.checkFuncMisuse() {
+ list = append(list, r.ifNext(syntax.Gtr, depthStep, false, r.callPanic(pos, r.stateConst(abi.RF_DONE))))
+ } else {
+ list = append(list, r.ifNext(syntax.Gtr, depthStep, true))
}
- if loop.checkContinue {
- list = append(list, r.ifNext(syntax.Eql, perLoopStep-1, retStmt(r.useVar(r.true))))
+
+ if r.checkFuncMisuse() {
+ if loop.checkContinue {
+ list = append(list, r.ifNext(syntax.Eql, depthStep-1, true, r.setStateAt(curLoopIndex, abi.RF_READY), retStmt(r.useObj(r.true))))
+ }
+
+ if loop.checkBreak {
+ list = append(list, r.ifNext(syntax.Eql, depthStep, true, r.setStateAt(curLoopIndex, abi.RF_DONE), retStmt(r.useObj(r.false))))
+ }
+
+ if loop.checkContinue || loop.checkBreak {
+ list = append(list, r.ifNext(syntax.Gtr, 0, false, r.setStateAt(curLoopIndex, abi.RF_DONE), retStmt(r.useObj(r.false))))
+ }
+
+ } else {
+ if loop.checkContinue {
+ list = append(list, r.ifNext(syntax.Eql, depthStep-1, true, retStmt(r.useObj(r.true))))
+ }
+ if loop.checkBreak {
+ list = append(list, r.ifNext(syntax.Eql, depthStep, true, retStmt(r.useObj(r.false))))
+ }
+ if loop.checkContinue || loop.checkBreak {
+ list = append(list, r.ifNext(syntax.Gtr, 0, false, retStmt(r.useObj(r.false))))
+ }
}
}
@@ -1125,38 +1291,25 @@ func retStmt(results syntax.Expr) *syntax.ReturnStmt {
// ifNext returns the statement:
//
-// if #next op c { adjust; then }
-//
-// When op is >=, adjust is #next -= c.
-// When op is == and c is not -1 or -2, adjust is #next = 0.
-// Otherwise adjust is omitted.
-func (r *rewriter) ifNext(op syntax.Operator, c int, then syntax.Stmt) syntax.Stmt {
- nif := &syntax.IfStmt{
- Cond: &syntax.Operation{Op: op, X: r.next(), Y: r.intConst(c)},
- Then: &syntax.BlockStmt{
- List: []syntax.Stmt{then},
- },
- }
- tv := syntax.TypeAndValue{Type: r.bool.Type()}
- tv.SetIsValue()
- nif.Cond.SetTypeInfo(tv)
-
- if op == syntax.Geq {
- sub := &syntax.AssignStmt{
- Op: syntax.Sub,
- Lhs: r.next(),
- Rhs: r.intConst(c),
- }
- nif.Then.List = []syntax.Stmt{sub, then}
- }
- if op == syntax.Eql && c != -1 && c != -2 {
+// if #next op c { [#next = 0;] thens... }
+func (r *rewriter) ifNext(op syntax.Operator, c int, zeroNext bool, thens ...syntax.Stmt) syntax.Stmt {
+ var thenList []syntax.Stmt
+ if zeroNext {
clr := &syntax.AssignStmt{
Lhs: r.next(),
Rhs: r.intConst(0),
}
- nif.Then.List = []syntax.Stmt{clr, then}
+ thenList = append(thenList, clr)
+ }
+ for _, then := range thens {
+ thenList = append(thenList, then)
+ }
+ nif := &syntax.IfStmt{
+ Cond: r.cond(op, r.next(), r.intConst(c)),
+ Then: &syntax.BlockStmt{
+ List: thenList,
+ },
}
-
return nif
}
@@ -1167,35 +1320,37 @@ func setValueType(x syntax.Expr, typ syntax.Type) {
x.SetTypeInfo(tv)
}
-// assertNotExited returns the statement:
+// assertReady returns the statement:
//
-// if #exitK { runtime.panicrangeexit() }
+// if #stateK != abi.RF_READY { runtime.panicrangestate(#stateK) }
//
-// where #exitK is the exit guard for loop.
-func (r *rewriter) assertNotExited(start syntax.Pos, loop *forLoop) syntax.Stmt {
- callPanicExpr := &syntax.CallExpr{
- Fun: runtimeSym(r.info, "panicrangeexit"),
- }
- setValueType(callPanicExpr, nil) // no result type
-
- callPanic := &syntax.ExprStmt{X: callPanicExpr}
-
+// where #stateK is the state variable for loop.
+func (r *rewriter) assertReady(start syntax.Pos, loop *forLoop) syntax.Stmt {
nif := &syntax.IfStmt{
- Cond: r.useVar(loop.exitFlag),
+ Cond: r.cond(syntax.Neq, r.useObj(loop.stateVar), r.stateConst(abi.RF_READY)),
Then: &syntax.BlockStmt{
- List: []syntax.Stmt{callPanic},
+ List: []syntax.Stmt{r.callPanic(start, r.useObj(loop.stateVar))},
},
}
setPos(nif, start)
return nif
}
+func (r *rewriter) callPanic(start syntax.Pos, arg syntax.Expr) syntax.Stmt {
+ callPanicExpr := &syntax.CallExpr{
+ Fun: runtimeSym(r.info, "panicrangestate"),
+ ArgList: []syntax.Expr{arg},
+ }
+ setValueType(callPanicExpr, nil) // no result type
+ return &syntax.ExprStmt{X: callPanicExpr}
+}
+
// next returns a reference to the #next variable.
func (r *rewriter) next() *syntax.Name {
if r.nextVar == nil {
r.nextVar = r.declVar("#next", r.int.Type(), nil)
}
- return r.useVar(r.nextVar)
+ return r.useObj(r.nextVar)
}
// forRangeFunc checks whether n is a range-over-func.
@@ -1229,8 +1384,12 @@ func (r *rewriter) intConst(c int) *syntax.BasicLit {
return lit
}
-// useVar returns syntax for a reference to decl, which should be its declaration.
-func (r *rewriter) useVar(obj types2.Object) *syntax.Name {
+func (r *rewriter) stateConst(s abi.RF_State) *syntax.BasicLit {
+ return r.intConst(int(s))
+}
+
+// useObj returns syntax for a reference to decl, which should be its declaration.
+func (r *rewriter) useObj(obj types2.Object) *syntax.Name {
n := syntax.NewName(nopos, obj.Name())
tv := syntax.TypeAndValue{Type: obj.Type()}
tv.SetIsValue()
@@ -1243,7 +1402,7 @@ func (r *rewriter) useVar(obj types2.Object) *syntax.Name {
func (r *rewriter) useList(vars []types2.Object) syntax.Expr {
var new []syntax.Expr
for _, obj := range vars {
- new = append(new, r.useVar(obj))
+ new = append(new, r.useObj(obj))
}
if len(new) == 1 {
return new[0]
@@ -1251,18 +1410,28 @@ func (r *rewriter) useList(vars []types2.Object) syntax.Expr {
return &syntax.ListExpr{ElemList: new}
}
+func (r *rewriter) makeVarName(pos syntax.Pos, name string, typ types2.Type) (*types2.Var, *syntax.Name) {
+ obj := types2.NewVar(pos, r.pkg, name, typ)
+ n := syntax.NewName(pos, name)
+ tv := syntax.TypeAndValue{Type: typ}
+ tv.SetIsValue()
+ n.SetTypeInfo(tv)
+ r.info.Defs[n] = obj
+ return obj, n
+}
+
+func (r *rewriter) generateParamName(results []*syntax.Field, i int) {
+ obj, n := r.sig.RenameResult(results, i)
+ r.info.Defs[n] = obj
+}
+
// declVar declares a variable with a given name type and initializer value.
func (r *rewriter) declVar(name string, typ types2.Type, init syntax.Expr) *types2.Var {
if r.declStmt == nil {
r.declStmt = &syntax.DeclStmt{}
}
stmt := r.declStmt
- obj := types2.NewVar(stmt.Pos(), r.pkg, name, typ)
- n := syntax.NewName(stmt.Pos(), name)
- tv := syntax.TypeAndValue{Type: typ}
- tv.SetIsValue()
- n.SetTypeInfo(tv)
- r.info.Defs[n] = obj
+ obj, n := r.makeVarName(stmt.Pos(), name, typ)
stmt.DeclList = append(stmt.DeclList, &syntax.VarDecl{
NameList: []*syntax.Name{n},
// Note: Type is ignored
@@ -1271,26 +1440,19 @@ func (r *rewriter) declVar(name string, typ types2.Type, init syntax.Expr) *type
return obj
}
-// declType declares a type with the given name and type.
-// This is more like "type name = typ" than "type name typ".
-func declType(pos syntax.Pos, name string, typ types2.Type) *syntax.Name {
- n := syntax.NewName(pos, name)
- n.SetTypeInfo(syntax.TypeAndValue{Type: typ})
- return n
-}
-
// runtimePkg is a fake runtime package that contains what we need to refer to in package runtime.
var runtimePkg = func() *types2.Package {
var nopos syntax.Pos
pkg := types2.NewPackage("runtime", "runtime")
anyType := types2.Universe.Lookup("any").Type()
+ intType := types2.Universe.Lookup("int").Type()
// func deferrangefunc() unsafe.Pointer
obj := types2.NewFunc(nopos, pkg, "deferrangefunc", types2.NewSignatureType(nil, nil, nil, nil, types2.NewTuple(types2.NewParam(nopos, pkg, "extra", anyType)), false))
pkg.Scope().Insert(obj)
- // func panicrangeexit()
- obj = types2.NewFunc(nopos, pkg, "panicrangeexit", types2.NewSignatureType(nil, nil, nil, nil, nil, false))
+ // func panicrangestate()
+ obj = types2.NewFunc(nopos, pkg, "panicrangestate", types2.NewSignatureType(nil, nil, nil, types2.NewTuple(types2.NewParam(nopos, pkg, "state", intType)), nil, false))
pkg.Scope().Insert(obj)
return pkg
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
index 18a6586fb0..1b588edb04 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -579,11 +579,11 @@
(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...)
(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
-// Currently the updated value is not used, but we need a register to temporarily hold it.
-(AtomicAnd(8|32) ptr val mem) => (Select1 (LoweredAtomicAnd(8|32) ptr val mem))
-(AtomicOr(8|32) ptr val mem) => (Select1 (LoweredAtomicOr(8|32) ptr val mem))
-(AtomicAnd(8|32)Variant ptr val mem) => (Select1 (LoweredAtomicAnd(8|32)Variant ptr val mem))
-(AtomicOr(8|32)Variant ptr val mem) => (Select1 (LoweredAtomicOr(8|32)Variant ptr val mem))
+// Return old contents.
+(AtomicAnd(64|32|8) ...) => (LoweredAtomicAnd(64|32|8) ...)
+(AtomicOr(64|32|8) ...) => (LoweredAtomicOr(64|32|8) ...)
+(AtomicAnd(64|32|8)Variant ...) => (LoweredAtomicAnd(64|32|8)Variant ...)
+(AtomicOr(64|32|8)Variant ...) => (LoweredAtomicOr(64|32|8)Variant ...)
// Write barrier.
(WB ...) => (LoweredWB ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index 5a98aa0c54..fa18b674cc 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -707,29 +707,31 @@ func init() {
{name: "LoweredAtomicCas32Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
// atomic and/or.
- // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
// LDAXR (Rarg0), Rout
- // AND/OR Rarg1, Rout
- // STLXR Rout, (Rarg0), Rtmp
+ // AND/OR Rarg1, Rout, tempReg
+ // STLXR tempReg, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
- {name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
- {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
- {name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
- {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, needIntTemp: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, needIntTemp: true},
+ {name: "LoweredAtomicAnd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, needIntTemp: true},
+ {name: "LoweredAtomicOr64", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, needIntTemp: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, needIntTemp: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, needIntTemp: true},
// atomic and/or variant.
- // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
// AND:
// MNV Rarg1, Rtemp
// LDANDALB Rtemp, (Rarg0), Rout
- // AND Rarg1, Rout
// OR:
// LDORALB Rarg1, (Rarg0), Rout
- // ORR Rarg1, Rout
- {name: "LoweredAtomicAnd8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
- {name: "LoweredAtomicAnd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
- {name: "LoweredAtomicOr8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true},
- {name: "LoweredAtomicOr32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
// It saves all GP registers if necessary,
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
index 7518119147..d89cc59714 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -137,27 +137,28 @@
(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
// Combine rotate and mask operations
-(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
-(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
// Note, any rotated word bitmask is still a valid word bitmask.
(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
-(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
-(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
-(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
-(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
-(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
// Merge shift right + shift left and clear left (e.g for a table lookup)
(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+(CLRLSLDI [c] (SRDconst [s] x)) && mergePPC64ClrlsldiSrd(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrd(int64(c),s)] x)
(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
@@ -200,38 +201,38 @@
((Rsh64U|Lsh64)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
((Rsh64U|Lsh64)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
-((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
-((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
+((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
(Rsh64x(64|32) <t> x y) => (ISEL [0] (SRAD <t> x y) (SRADconst <t> x [63]) (CMP(U|WU)const y [64]))
-(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
-(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
+(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
((Rsh32U|Lsh32)x64 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPUconst y [32]))
((Rsh32U|Lsh32)x32 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPWUconst y [32]))
-((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
-((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
+((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
(Rsh32x(64|32) <t> x y) => (ISEL [0] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMP(U|WU)const y [32]))
-(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
-(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
+(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
((Rsh16U|Lsh16)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16]))
((Rsh16U|Lsh16)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16]))
-((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
-((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
+((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
(Rsh16x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMP(U|WU)const y [16]))
-(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
-(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
+(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
((Rsh8U|Lsh8)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8]))
((Rsh8U|Lsh8)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8]))
-((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
-((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
+((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
(Rsh8x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMP(U|WU)const y [8]))
-(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
-(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
+(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
// Catch bounded shifts in situations like foo<<uint(shift&63) which might not be caught by the prove pass.
-(CMP(U|WU)const [d] (Select0 (ANDCCconst z [c]))) && uint64(d) > uint64(c) => (FlagLT)
+(CMP(U|WU)const [d] (ANDconst z [c])) && uint64(d) > uint64(c) => (FlagLT)
(ORN x (MOVDconst [-1])) => x
@@ -281,7 +282,7 @@
(OR x (NOR y y)) => (ORN x y)
// Lowering comparisons
-(EqB x y) => (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
+(EqB x y) => (ANDconst [1] (EQV x y))
// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
(Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
(Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
@@ -321,11 +322,11 @@
(If (FGreaterThan cc) yes no) => (FGT cc yes no)
(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
-(If cond yes no) => (NE (CMPWconst [0] (Select0 <typ.UInt32> (ANDCCconst [1] cond))) yes no)
+(If cond yes no) => (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
// Absorb boolean tests into block
-(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc)))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
-(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc)))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
+(NE (CMPconst [0] (ANDconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
+(NE (CMPconst [0] (ANDconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
// absorb flag constants into branches
(EQ (FlagEQ) yes no) => (First yes no)
@@ -407,8 +408,6 @@
// Elide compares of bit tests
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
@@ -416,9 +415,9 @@
(CondSelect x y (SETBC [a] cmp)) => (ISEL [a] x y cmp)
(CondSelect x y (SETBCR [a] cmp)) => (ISEL [a+4] x y cmp)
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
-(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
// Fold any CR -> GPR -> CR transfers when applying the above rule.
-(ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
+(ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
(ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp)
(ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp)
@@ -562,45 +561,44 @@
// Discover consts
(AND x (MOVDconst [-1])) => x
-(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
+(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
// Simplify consts
-(ANDCCconst [c] (Select0 (ANDCCconst [d] x))) => (ANDCCconst [c&d] x)
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
-(Select0 (ANDCCconst [-1] x)) => x
-(Select0 (ANDCCconst [0] _)) => (MOVDconst [0])
-(Select1 (ANDCCconst [0] _)) => (FlagEQ)
+(ANDconst [-1] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
(XORconst [0] x) => x
(ORconst [-1] _) => (MOVDconst [-1])
(ORconst [0] x) => x
// zero-extend of small and => small and
-(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y
-(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y
-(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y
+(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
+(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
+(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
// sign extend of small-positive and => small-positive-and
-(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y
-(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y
-(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
+(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
+(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
+(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
// small and of zero-extend => either zero-extend or small and
-(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
-(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x)
-(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y
-(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x)
+(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
+(ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x)
+(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
+(ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x)
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
// normal case
-(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x))
-(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
-(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
+(ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x)
// Eliminate unnecessary sign/zero extend following right shift
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
@@ -644,6 +642,19 @@
(MOVBreg (MOVBZreg x)) => (MOVBreg x)
(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
+// Catch any remaining rotate+shift cases
+(MOVBZreg (SRWconst x [s])) && mergePPC64AndSrwi(0xFF,s) != 0 => (RLWINM [mergePPC64AndSrwi(0xFF,s)] x)
+(MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
+(MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
+(MOVWZreg (RLWINM [r] y)) && mergePPC64MovwzregRlwinm(r) != 0 => (RLWINM [mergePPC64MovwzregRlwinm(r)] y)
+(ANDconst [m] (RLWINM [r] y)) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
+(SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
+(RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
+(RLWINM [r] (ANDconst [a] u)) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
+// SLWconst is a special case of RLWNM which always zero-extends the result.
+(SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w)
+(MOVWZreg w:(SLWconst u)) => w
+
// H - there are more combinations than these
(MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat
@@ -668,10 +679,10 @@
(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
-(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z
+(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
(MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
-(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z
-(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z
+(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
+(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
// Arithmetic constant ops
@@ -804,7 +815,7 @@
(AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...)
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
-(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x)
+(ANDconst [1] z:(SRADconst [63] x)) && z.Uses == 1 => (SRDconst [63] x)
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
@@ -840,11 +851,11 @@
(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
-(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
-(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
// special case for power9
(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
@@ -880,6 +891,10 @@
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value.
+// Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
+(CMP(W|U|WU)const [0] a:(ANDconst [n] z)) => (CMPconst [0] a)
+
// SETBC auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 1 : 0
// SETBCR auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 0 : 1
(Equal cmp) => (SETBC [2] cmp)
@@ -936,8 +951,6 @@
(ISEL [4] x _ (Flag(EQ|GT))) => x
(ISEL [4] _ y (FlagLT)) => y
-(ISEL [2] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
-(ISEL [6] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
(SETBC [n] (InvertFlags bool)) => (SETBCR [n] bool)
(SETBCR [n] (InvertFlags bool)) => (SETBC [n] bool)
@@ -947,11 +960,8 @@
(XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp)
(XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp)
-(SETBC [2] ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
-(SETBCR [2] ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (Select0 <typ.UInt64> (ANDCCconst [1] z ))
-
-(SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (SETBC [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
-(SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (SETBCR [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+(SETBC [2] (CMPconst [0] a:(ANDconst [1] _))) => (XORconst [1] a)
+(SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) => a
// Only CMPconst for these in case AND|OR|XOR result is > 32 bits
(SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
@@ -964,7 +974,7 @@
(SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
// A particular pattern seen in cgo code:
-(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x))
+(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
// floating point negative abs
(FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
index 7f0ee9ab91..799881a8cd 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
@@ -248,11 +248,12 @@ func init() {
{name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
{name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
- {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
- {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
- {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
- {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
- {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0.
+ {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
+ {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
+ {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+ {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
+ {name: "RLDICLCC", argLength: 1, reg: gp11, asm: "RLDICLCC", aux: "Int64", typ: "(Int, Flags)"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. Sets CC.
+ {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0.
{name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros
{name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC
@@ -323,9 +324,10 @@ func init() {
{name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
{name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
- {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
- {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
- {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
+ {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+ {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, clobberFlags: true, asm: "ANDCC", aux: "Int64", typ: "Int"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
{name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
index 2eecf94300..7aa8f41e78 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
@@ -18,11 +18,8 @@
(SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp)
(SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp)
-// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst
-// always sets it.
-(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
// The upper bits of the smaller than register values is undefined. Take advantage of that.
-(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n))
+(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (ANDconst [int64(int16(m))] n)
// Convert simple bit masks to an equivalent rldic[lr] if possible.
(AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n)
@@ -47,9 +44,17 @@
// Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if
// both ops are in the same block.
(CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
-(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+(CMPconst [0] z:((NEG|CNTLZD|RLDICL) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
// Note: ADDCCconst only assembles to 1 instruction for int16 constants.
(CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+(CMPconst [0] z:(ANDconst [c] x)) && int64(uint16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
// And finally, fixup the flag user.
(CMPconst <t> [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 <t> z)
-(CMPconst <t> [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 <t> z)
+(CMPconst <t> [0] (Select0 z:((ADDCCconst|ANDCCconst|NEGCC|CNTLZDCC|RLDICLCC) y))) => (Select1 <t> z)
+
+// After trying to convert ANDconst to ANDCCconst above, if the CC result is not needed, try to avoid using
+// ANDconst which clobbers CC.
+(ANDconst [m] x) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+
+// Likewise, trying converting RLDICLCC back to ANDCCconst as it is faster.
+(RLDICLCC [a] x) && convertPPC64RldiclAndccconst(a) != 0 => (ANDCCconst [convertPPC64RldiclAndccconst(a)] x)
diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules
index 70bac217fa..61a2602af8 100644
--- a/src/cmd/compile/internal/ssa/_gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/_gen/generic.rules
@@ -734,8 +734,8 @@
=> x
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
- (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))])
- (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))])
+(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))])
+(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))])
diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go
index 637e7b617c..6805408b46 100644
--- a/src/cmd/compile/internal/ssa/_gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go
@@ -609,25 +609,31 @@ var genericOps = []opData{
{name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
- {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
- {name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
- {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
- {name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd8", argLength: 3, typ: "(Uint8, Mem)", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicOr8", argLength: 3, typ: "(Uint8, Mem)", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAnd64", argLength: 3, typ: "(Uint64, Mem)", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAnd32", argLength: 3, typ: "(Uint32, Mem)", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicOr64", argLength: 3, typ: "(Uint64, Mem)", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicOr32", argLength: 3, typ: "(Uint32, Mem)", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
// Atomic operation variants
// These variants have the same semantics as above atomic operations.
// But they are used for generating more efficient code on certain modern machines, with run-time CPU feature detection.
- // Currently, they are used on ARM64 only.
+ // On ARM64, these are used when the LSE hardware feature is avaliable (either known at compile time or detected at runtime). If LSE is not avaliable,
+ // then the basic atomic oprations are used instead.
+ // These are not currently used on any other platform.
{name: "AtomicAdd32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{name: "AtomicAdd64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{name: "AtomicExchange32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicExchange64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicCompareAndSwap32Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwap64Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
- {name: "AtomicAnd8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
- {name: "AtomicAnd32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
- {name: "AtomicOr8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
- {name: "AtomicOr32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd8Variant", argLength: 3, typ: "(Uint8, Mem)", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicOr8Variant", argLength: 3, typ: "(Uint8, Mem)", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAnd64Variant", argLength: 3, typ: "(Uint64, Mem)", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicOr64Variant", argLength: 3, typ: "(Uint64, Mem)", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAnd32Variant", argLength: 3, typ: "(Uint32, Mem)", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicOr32Variant", argLength: 3, typ: "(Uint32, Mem)", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns old contents of *arg0 and new memory.
// Publication barrier
{name: "PubBarrier", argLength: 1, hasSideEffects: true}, // Do data barrier. arg0=memory.
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index 26af10b59c..02733eaf16 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -31,6 +31,9 @@ type Block struct {
// After flagalloc, records whether flags are live at the end of the block.
FlagsLiveAtEnd bool
+ // A block that would be good to align (according to the optimizer's guesses)
+ Hotness Hotness
+
// Subsequent blocks, if any. The number and order depend on the block kind.
Succs []Edge
@@ -112,7 +115,7 @@ func (e Edge) String() string {
}
// BlockKind is the kind of SSA block.
-type BlockKind int16
+type BlockKind uint8
// short form print
func (b *Block) String() string {
@@ -426,3 +429,17 @@ const (
BranchUnknown = BranchPrediction(0)
BranchLikely = BranchPrediction(+1)
)
+
+type Hotness int8 // Could use negative numbers for specifically non-hot blocks, but don't, yet.
+const (
+ // These values are arranged in what seems to be order of increasing alignment importance.
+ // Currently only a few are relevant. Implicitly, they are all in a loop.
+ HotNotFlowIn Hotness = 1 << iota // This block is only reached by branches
+ HotInitial // In the block order, the first one for a given loop. Not necessarily topological header.
+ HotPgo // By PGO-based heuristics, this block occurs in a hot loop
+
+ HotNot = 0
+ HotInitialNotFlowIn = HotInitial | HotNotFlowIn // typically first block of a rotated loop, loop is entered with a branch (not to this block). No PGO
+ HotPgoInitial = HotPgo | HotInitial // special case; single block loop, initial block is header block has a flow-in entry, but PGO says it is hot
+ HotPgoInitialNotFLowIn = HotPgo | HotInitial | HotNotFlowIn // PGO says it is hot, and the loop is rotated so flow enters loop with a branch
+)
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index cb3427103c..ce04cb3a24 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -21,12 +21,18 @@ func dse(f *Func) {
defer f.retSparseSet(storeUse)
shadowed := f.newSparseMap(f.NumValues())
defer f.retSparseMap(shadowed)
+ // localAddrs maps from a local variable (the Aux field of a LocalAddr value) to an instance of a LocalAddr value for that variable in the current block.
+ localAddrs := map[any]*Value{}
for _, b := range f.Blocks {
// Find all the stores in this block. Categorize their uses:
// loadUse contains stores which are used by a subsequent load.
// storeUse contains stores which are used by a subsequent store.
loadUse.clear()
storeUse.clear()
+ // TODO(deparker): use the 'clear' builtin once compiler bootstrap minimum version is raised to 1.21.
+ for k := range localAddrs {
+ delete(localAddrs, k)
+ }
stores = stores[:0]
for _, v := range b.Values {
if v.Op == OpPhi {
@@ -46,6 +52,13 @@ func dse(f *Func) {
}
}
} else {
+ if v.Op == OpLocalAddr {
+ if _, ok := localAddrs[v.Aux]; !ok {
+ localAddrs[v.Aux] = v
+ } else {
+ continue
+ }
+ }
for _, a := range v.Args {
if a.Block == b && a.Type.IsMemory() {
loadUse.add(a.ID)
@@ -100,6 +113,11 @@ func dse(f *Func) {
} else { // OpZero
sz = v.AuxInt
}
+ if ptr.Op == OpLocalAddr {
+ if la, ok := localAddrs[ptr.Aux]; ok {
+ ptr = la
+ }
+ }
sr := shadowRange(shadowed.get(ptr.ID))
if sr.contains(off, off+sz) {
// Modify the store/zero into a copy of the memory state,
@@ -146,6 +164,7 @@ type shadowRange int32
func (sr shadowRange) lo() int64 {
return int64(sr & 0xffff)
}
+
func (sr shadowRange) hi() int64 {
return int64((sr >> 16) & 0xffff)
}
diff --git a/src/cmd/compile/internal/ssa/deadstore_test.go b/src/cmd/compile/internal/ssa/deadstore_test.go
index 33cb4b9755..4ccd6b8e91 100644
--- a/src/cmd/compile/internal/ssa/deadstore_test.go
+++ b/src/cmd/compile/internal/ssa/deadstore_test.go
@@ -6,6 +6,7 @@ package ssa
import (
"cmd/compile/internal/types"
+ "cmd/internal/src"
"testing"
)
@@ -44,6 +45,7 @@ func TestDeadStore(t *testing.T) {
t.Errorf("dead store (zero) not removed")
}
}
+
func TestDeadStorePhi(t *testing.T) {
// make sure we don't get into an infinite loop with phi values.
c := testConfig(t)
@@ -127,3 +129,46 @@ func TestDeadStoreUnsafe(t *testing.T) {
t.Errorf("store %s incorrectly removed", v)
}
}
+
+func TestDeadStoreSmallStructInit(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ typ := types.NewStruct([]*types.Field{
+ types.NewField(src.NoXPos, &types.Sym{Name: "A"}, c.config.Types.Int),
+ types.NewField(src.NoXPos, &types.Sym{Name: "B"}, c.config.Types.Int),
+ })
+ name := c.Temp(typ)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("zero", OpConst64, c.config.Types.Int, 0, nil),
+ Valu("v6", OpLocalAddr, ptrType, 0, name, "sp", "start"),
+ Valu("v3", OpOffPtr, ptrType, 8, nil, "v6"),
+ Valu("v22", OpOffPtr, ptrType, 0, nil, "v6"),
+ Valu("zerostore1", OpStore, types.TypeMem, 0, c.config.Types.Int, "v22", "zero", "start"),
+ Valu("zerostore2", OpStore, types.TypeMem, 0, c.config.Types.Int, "v3", "zero", "zerostore1"),
+ Valu("v8", OpLocalAddr, ptrType, 0, name, "sp", "zerostore2"),
+ Valu("v23", OpOffPtr, ptrType, 8, nil, "v8"),
+ Valu("v25", OpOffPtr, ptrType, 0, nil, "v8"),
+ Valu("zerostore3", OpStore, types.TypeMem, 0, c.config.Types.Int, "v25", "zero", "zerostore2"),
+ Valu("zerostore4", OpStore, types.TypeMem, 0, c.config.Types.Int, "v23", "zero", "zerostore3"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("zerostore4")))
+
+ fun.f.Name = "smallstructinit"
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v1 := fun.values["zerostore1"]
+ if v1.Op != OpCopy {
+ t.Errorf("dead store not removed")
+ }
+ v2 := fun.values["zerostore2"]
+ if v2.Op != OpCopy {
+ t.Errorf("dead store not removed")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index b2c4b1997f..c33c77f891 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -7,6 +7,7 @@ package ssa
import (
"testing"
+ "cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@@ -15,6 +16,7 @@ import (
"cmd/internal/obj/s390x"
"cmd/internal/obj/x86"
"cmd/internal/src"
+ "cmd/internal/sys"
)
var CheckFunc = checkFunc
@@ -115,6 +117,7 @@ func init() {
types.RegSize = 8
types.MaxWidth = 1 << 50
+ base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}}
typecheck.InitUniverse()
testTypes.SetTypPtrs()
}
diff --git a/src/cmd/compile/internal/ssa/fmahash_test.go b/src/cmd/compile/internal/ssa/fmahash_test.go
index dfa1aa1147..c563d5b8d9 100644
--- a/src/cmd/compile/internal/ssa/fmahash_test.go
+++ b/src/cmd/compile/internal/ssa/fmahash_test.go
@@ -41,7 +41,7 @@ func TestFmaHash(t *testing.T) {
t.Logf("%v", cmd.Env)
b, e := cmd.CombinedOutput()
if e != nil {
- t.Error(e)
+ t.Errorf("build failed: %v\n%s", e, b)
}
s := string(b) // Looking for "GOFMAHASH triggered main.main:24"
re := "fmahash(0?) triggered .*fma.go:29:..;.*fma.go:18:.."
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 38b459a2ff..2bb34a41cb 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -45,6 +45,7 @@ type Func struct {
laidout bool // Blocks are ordered
NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+ IsPgoHot bool
// when register allocation is done, maps value ids to locations
RegAlloc []Location
diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go
index 844a8f7124..f32125576f 100644
--- a/src/cmd/compile/internal/ssa/looprotate.go
+++ b/src/cmd/compile/internal/ssa/looprotate.go
@@ -56,9 +56,20 @@ func loopRotate(f *Func) {
}
p = e.b
}
- if p == nil || p == b {
+ if p == nil {
continue
}
+ p.Hotness |= HotInitial
+ if f.IsPgoHot {
+ p.Hotness |= HotPgo
+ }
+ // blocks will be arranged so that p is ordered first, if it isn't already.
+ if p == b { // p is header, already first (and also, only block in the loop)
+ continue
+ }
+ p.Hotness |= HotNotFlowIn
+
+ // the loop header b follows p
after[p.ID] = []*Block{b}
for {
nextIdx := idToIdx[b.ID] + 1
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 429c214395..847d62c0a5 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -1717,12 +1717,16 @@ const (
OpARM64LoweredAtomicCas64Variant
OpARM64LoweredAtomicCas32Variant
OpARM64LoweredAtomicAnd8
- OpARM64LoweredAtomicAnd32
OpARM64LoweredAtomicOr8
+ OpARM64LoweredAtomicAnd64
+ OpARM64LoweredAtomicOr64
+ OpARM64LoweredAtomicAnd32
OpARM64LoweredAtomicOr32
OpARM64LoweredAtomicAnd8Variant
- OpARM64LoweredAtomicAnd32Variant
OpARM64LoweredAtomicOr8Variant
+ OpARM64LoweredAtomicAnd64Variant
+ OpARM64LoweredAtomicOr64Variant
+ OpARM64LoweredAtomicAnd32Variant
OpARM64LoweredAtomicOr32Variant
OpARM64LoweredWB
OpARM64LoweredPanicBoundsA
@@ -2165,6 +2169,7 @@ const (
OpPPC64RLWNM
OpPPC64RLWMI
OpPPC64RLDICL
+ OpPPC64RLDICLCC
OpPPC64RLDICR
OpPPC64CNTLZD
OpPPC64CNTLZDCC
@@ -2221,6 +2226,7 @@ const (
OpPPC64ORconst
OpPPC64XORconst
OpPPC64ANDCCconst
+ OpPPC64ANDconst
OpPPC64MOVBreg
OpPPC64MOVBZreg
OpPPC64MOVHreg
@@ -3224,8 +3230,10 @@ const (
OpAtomicCompareAndSwap64
OpAtomicCompareAndSwapRel32
OpAtomicAnd8
- OpAtomicAnd32
OpAtomicOr8
+ OpAtomicAnd64
+ OpAtomicAnd32
+ OpAtomicOr64
OpAtomicOr32
OpAtomicAdd32Variant
OpAtomicAdd64Variant
@@ -3234,8 +3242,10 @@ const (
OpAtomicCompareAndSwap32Variant
OpAtomicCompareAndSwap64Variant
OpAtomicAnd8Variant
- OpAtomicAnd32Variant
OpAtomicOr8Variant
+ OpAtomicAnd64Variant
+ OpAtomicOr64Variant
+ OpAtomicAnd32Variant
OpAtomicOr32Variant
OpPubBarrier
OpClobber
@@ -22998,6 +23008,7 @@ var opcodeTable = [...]opInfo{
name: "LoweredAtomicAnd8",
argLen: 3,
resultNotInArgs: true,
+ needIntTemp: true,
faultOnNilArg0: true,
hasSideEffects: true,
unsafePoint: true,
@@ -23013,9 +23024,29 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredAtomicAnd32",
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ resultNotInArgs: true,
+ needIntTemp: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd64",
argLen: 3,
resultNotInArgs: true,
+ needIntTemp: true,
faultOnNilArg0: true,
hasSideEffects: true,
unsafePoint: true,
@@ -23031,9 +23062,10 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredAtomicOr8",
+ name: "LoweredAtomicOr64",
argLen: 3,
resultNotInArgs: true,
+ needIntTemp: true,
faultOnNilArg0: true,
hasSideEffects: true,
unsafePoint: true,
@@ -23049,9 +23081,29 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ needIntTemp: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "LoweredAtomicOr32",
argLen: 3,
resultNotInArgs: true,
+ needIntTemp: true,
faultOnNilArg0: true,
hasSideEffects: true,
unsafePoint: true,
@@ -23084,7 +23136,23 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredAtomicAnd32Variant",
+ name: "LoweredAtomicOr8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd64Variant",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
@@ -23101,7 +23169,7 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredAtomicOr8Variant",
+ name: "LoweredAtomicOr64Variant",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
@@ -23117,6 +23185,23 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredAtomicAnd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "LoweredAtomicOr32Variant",
argLen: 3,
resultNotInArgs: true,
@@ -29122,6 +29207,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "RLDICLCC",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLDICLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "RLDICR",
auxType: auxInt64,
argLen: 1,
@@ -29886,6 +29985,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "MOVBreg",
argLen: 1,
asm: ppc64.AMOVB,
@@ -40619,13 +40733,25 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "AtomicOr8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
name: "AtomicAnd32",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{
- name: "AtomicOr8",
+ name: "AtomicOr64",
argLen: 3,
hasSideEffects: true,
generic: true,
@@ -40679,13 +40805,25 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
- name: "AtomicAnd32Variant",
+ name: "AtomicOr8Variant",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{
- name: "AtomicOr8Variant",
+ name: "AtomicAnd64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32Variant",
argLen: 3,
hasSideEffects: true,
generic: true,
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 9961b540b7..aeec2b3768 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -1589,7 +1589,7 @@ func mergePPC64AndSrwi(m, s int64) int64 {
return encodePPC64RotateMask((32-s)&31, mask, 32)
}
-// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM.
+// Test if a word shift right feeding into a CLRLSLDI can be merged into RLWINM.
// Return the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
mask_1 := uint64(0xFFFFFFFF >> uint(srw))
@@ -1609,6 +1609,31 @@ func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
}
+// Test if a doubleword shift right feeding into a CLRLSLDI can be merged into RLWINM.
+// Return the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiSrd(sld, srd int64) int64 {
+ mask_1 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(srd)
+ // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // Rewrite mask to apply after the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
+
+ r_1 := 64 - srd
+ r_2 := GetPPC64Shiftsh(sld)
+ r_3 := (r_1 + r_2) & 63 // This can wrap.
+
+ if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
+ return 0
+ }
+ // This combine only works when selecting and shifting the lower 32 bits.
+ v1 := bits.RotateLeft64(0xFFFFFFFF00000000, int(r_3))
+ if v1&mask_3 != 0 {
+ return 0
+ }
+ return encodePPC64RotateMask(int64(r_3&31), int64(mask_3), 32)
+}
+
// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
// the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
@@ -1628,6 +1653,66 @@ func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
return encodePPC64RotateMask(r_3, int64(mask_3), 32)
}
+// Test if RLWINM feeding into an ANDconst can be merged. Return the encoded RLWINM constant,
+// or 0 if they cannot be merged.
+func mergePPC64AndRlwinm(mask uint32, rlw int64) int64 {
+ r, _, _, mask_rlw := DecodePPC64RotateMask(rlw)
+ mask_out := (mask_rlw & uint64(mask))
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_out)) {
+ return 0
+ }
+ return encodePPC64RotateMask(r, int64(mask_out), 32)
+}
+
+// Test if RLWINM opcode rlw clears the upper 32 bits of the
+// result. Return rlw if it does, 0 otherwise.
+func mergePPC64MovwzregRlwinm(rlw int64) int64 {
+ _, mb, me, _ := DecodePPC64RotateMask(rlw)
+ if mb > me {
+ return 0
+ }
+ return rlw
+}
+
+// Test if AND feeding into an ANDconst can be merged. Return the encoded RLWINM constant,
+// or 0 if they cannot be merged.
+func mergePPC64RlwinmAnd(rlw int64, mask uint32) int64 {
+ r, _, _, mask_rlw := DecodePPC64RotateMask(rlw)
+
+ // Rotate the input mask, combine with the rlwnm mask, and test if it is still a valid rlwinm mask.
+ r_mask := bits.RotateLeft32(mask, int(r))
+
+ mask_out := (mask_rlw & uint64(r_mask))
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_out)) {
+ return 0
+ }
+ return encodePPC64RotateMask(r, int64(mask_out), 32)
+}
+
+// Test if RLWINM feeding into SRDconst can be merged. Return the encoded RLIWNM constant,
+// or 0 if they cannot be merged.
+func mergePPC64SldiRlwinm(sldi, rlw int64) int64 {
+ r_1, mb, me, mask_1 := DecodePPC64RotateMask(rlw)
+ if mb > me || mb < sldi {
+ // Wrapping masks cannot be merged as the upper 32 bits are effectively undefined in this case.
+ // Likewise, if mb is less than the shift amount, it cannot be merged.
+ return 0
+ }
+ // combine the masks, and adjust for the final left shift.
+ mask_3 := mask_1 << sldi
+ r_3 := (r_1 + sldi) & 31 // This can wrap.
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if uint64(uint32(mask_3)) != mask_3 {
+ return 0
+ }
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
+}
+
// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
// or return 0 if they cannot be combined.
func mergePPC64SldiSrw(sld, srw int64) int64 {
@@ -1670,9 +1755,11 @@ func convertPPC64OpToOpCC(op *Value) *Value {
OpPPC64ADD: OpPPC64ADDCC,
OpPPC64ADDconst: OpPPC64ADDCCconst,
OpPPC64AND: OpPPC64ANDCC,
+ OpPPC64ANDconst: OpPPC64ANDCCconst,
OpPPC64ANDN: OpPPC64ANDNCC,
OpPPC64CNTLZD: OpPPC64CNTLZDCC,
OpPPC64OR: OpPPC64ORCC,
+ OpPPC64RLDICL: OpPPC64RLDICLCC,
OpPPC64SUB: OpPPC64SUBCC,
OpPPC64NEG: OpPPC64NEGCC,
OpPPC64NOR: OpPPC64NORCC,
@@ -1686,6 +1773,15 @@ func convertPPC64OpToOpCC(op *Value) *Value {
return op
}
+// Try converting a RLDICL to ANDCC. If successful, return the mask otherwise 0.
+func convertPPC64RldiclAndccconst(sauxint int64) int64 {
+ r, _, _, mask := DecodePPC64RotateMask(sauxint)
+ if r != 0 || mask&0xFFFF != mask {
+ return 0
+ }
+ return int64(mask)
+}
+
// Convenience function to rotate a 32 bit constant value by another constant.
func rotateLeft32(v, rotate int64) int64 {
return int64(bits.RotateLeft32(uint32(v), int(rotate)))
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 8f60f023b1..a548f6bd97 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -470,13 +470,23 @@ func rewriteValueARM64(v *Value) bool {
v.Op = OpARM64LoweredAtomicAdd64Variant
return true
case OpAtomicAnd32:
- return rewriteValueARM64_OpAtomicAnd32(v)
+ v.Op = OpARM64LoweredAtomicAnd32
+ return true
case OpAtomicAnd32Variant:
- return rewriteValueARM64_OpAtomicAnd32Variant(v)
+ v.Op = OpARM64LoweredAtomicAnd32Variant
+ return true
+ case OpAtomicAnd64:
+ v.Op = OpARM64LoweredAtomicAnd64
+ return true
+ case OpAtomicAnd64Variant:
+ v.Op = OpARM64LoweredAtomicAnd64Variant
+ return true
case OpAtomicAnd8:
- return rewriteValueARM64_OpAtomicAnd8(v)
+ v.Op = OpARM64LoweredAtomicAnd8
+ return true
case OpAtomicAnd8Variant:
- return rewriteValueARM64_OpAtomicAnd8Variant(v)
+ v.Op = OpARM64LoweredAtomicAnd8Variant
+ return true
case OpAtomicCompareAndSwap32:
v.Op = OpARM64LoweredAtomicCas32
return true
@@ -514,13 +524,23 @@ func rewriteValueARM64(v *Value) bool {
v.Op = OpARM64LDAR
return true
case OpAtomicOr32:
- return rewriteValueARM64_OpAtomicOr32(v)
+ v.Op = OpARM64LoweredAtomicOr32
+ return true
case OpAtomicOr32Variant:
- return rewriteValueARM64_OpAtomicOr32Variant(v)
+ v.Op = OpARM64LoweredAtomicOr32Variant
+ return true
+ case OpAtomicOr64:
+ v.Op = OpARM64LoweredAtomicOr64
+ return true
+ case OpAtomicOr64Variant:
+ v.Op = OpARM64LoweredAtomicOr64Variant
+ return true
case OpAtomicOr8:
- return rewriteValueARM64_OpAtomicOr8(v)
+ v.Op = OpARM64LoweredAtomicOr8
+ return true
case OpAtomicOr8Variant:
- return rewriteValueARM64_OpAtomicOr8Variant(v)
+ v.Op = OpARM64LoweredAtomicOr8Variant
+ return true
case OpAtomicStore32:
v.Op = OpARM64STLRW
return true
@@ -17783,158 +17803,6 @@ func rewriteValueARM64_OpAddr(v *Value) bool {
return true
}
}
-func rewriteValueARM64_OpAtomicAnd32(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicAnd32 ptr val mem)
- // result: (Select1 (LoweredAtomicAnd32 ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicAnd32Variant(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicAnd32Variant ptr val mem)
- // result: (Select1 (LoweredAtomicAnd32Variant ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicAnd8 ptr val mem)
- // result: (Select1 (LoweredAtomicAnd8 ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicAnd8Variant(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicAnd8Variant ptr val mem)
- // result: (Select1 (LoweredAtomicAnd8Variant ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicOr32(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicOr32 ptr val mem)
- // result: (Select1 (LoweredAtomicOr32 ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicOr32Variant(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicOr32Variant ptr val mem)
- // result: (Select1 (LoweredAtomicOr32Variant ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicOr8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicOr8 ptr val mem)
- // result: (Select1 (LoweredAtomicOr8 ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM64_OpAtomicOr8Variant(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (AtomicOr8Variant ptr val mem)
- // result: (Select1 (LoweredAtomicOr8Variant ptr val mem))
- for {
- ptr := v_0
- val := v_1
- mem := v_2
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
- v0.AddArg3(ptr, val, mem)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM64_OpAvg64u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 4ac5eec073..b45770995e 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -449,10 +449,10 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ADDconst(v)
case OpPPC64AND:
return rewriteValuePPC64_OpPPC64AND(v)
- case OpPPC64ANDCCconst:
- return rewriteValuePPC64_OpPPC64ANDCCconst(v)
case OpPPC64ANDN:
return rewriteValuePPC64_OpPPC64ANDN(v)
+ case OpPPC64ANDconst:
+ return rewriteValuePPC64_OpPPC64ANDconst(v)
case OpPPC64BRD:
return rewriteValuePPC64_OpPPC64BRD(v)
case OpPPC64BRH:
@@ -611,6 +611,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ORN(v)
case OpPPC64ORconst:
return rewriteValuePPC64_OpPPC64ORconst(v)
+ case OpPPC64RLWINM:
+ return rewriteValuePPC64_OpPPC64RLWINM(v)
case OpPPC64ROTL:
return rewriteValuePPC64_OpPPC64ROTL(v)
case OpPPC64ROTLW:
@@ -1395,7 +1397,7 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
}
// match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
- // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+ // result: (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
for {
x := v_0
y := v_1
@@ -1405,8 +1407,9 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v1.AuxInt = int64ToAuxInt(1)
v1.AddArg(bool)
v0.AddArg(v1)
@@ -1985,17 +1988,14 @@ func rewriteValuePPC64_OpEqB(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (EqB x y)
- // result: (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
+ // result: (ANDconst [1] (EQV x y))
for {
x := v_0
y := v_1
- v.reset(OpSelect0)
- v.Type = typ.Int
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(1)
- v1 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
- v1.AddArg2(x, y)
- v0.AddArg(v1)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2631,7 +2631,7 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool {
return true
}
// match: (Lsh16x16 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
for {
t := v.Type
x := v_0
@@ -2644,8 +2644,9 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF0)
v4.AddArg(y)
v3.AddArg(v4)
@@ -2767,7 +2768,7 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool {
return true
}
// match: (Lsh16x8 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
for {
t := v.Type
x := v_0
@@ -2780,8 +2781,9 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F0)
v4.AddArg(y)
v3.AddArg(v4)
@@ -2808,7 +2810,7 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool {
return true
}
// match: (Lsh32x16 <t> x y)
- // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
for {
t := v.Type
x := v_0
@@ -2819,8 +2821,9 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFE0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -2938,7 +2941,7 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool {
return true
}
// match: (Lsh32x8 <t> x y)
- // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
for {
t := v.Type
x := v_0
@@ -2949,8 +2952,9 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00E0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -2977,7 +2981,7 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool {
return true
}
// match: (Lsh64x16 <t> x y)
- // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
for {
t := v.Type
x := v_0
@@ -2988,8 +2992,9 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFC0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -3107,7 +3112,7 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool {
return true
}
// match: (Lsh64x8 <t> x y)
- // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
for {
t := v.Type
x := v_0
@@ -3118,8 +3123,9 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00C0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -3146,7 +3152,7 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool {
return true
}
// match: (Lsh8x16 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
for {
t := v.Type
x := v_0
@@ -3159,8 +3165,9 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF8)
v4.AddArg(y)
v3.AddArg(v4)
@@ -3282,7 +3289,7 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool {
return true
}
// match: (Lsh8x8 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
for {
t := v.Type
x := v_0
@@ -3295,8 +3302,9 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F8)
v4.AddArg(y)
v3.AddArg(v4)
@@ -4187,8 +4195,6 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
@@ -4334,7 +4340,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
}
// match: (AND x (MOVDconst [c]))
// cond: isU16Bit(c)
- // result: (Select0 (ANDCCconst [c] x))
+ // result: (ANDconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -4345,11 +4351,9 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
if !(isU16Bit(c)) {
continue
}
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
return true
}
break
@@ -4391,7 +4395,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
break
}
// match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
- // result: (Select0 (ANDCCconst [c&0xFF] x))
+ // result: (ANDconst [c&0xFF] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64MOVDconst {
@@ -4402,55 +4406,262 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
if x.Op != OpPPC64MOVBZload {
continue
}
- v.reset(OpSelect0)
- v0 := b.NewValue0(x.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFF)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
return true
}
break
}
return false
}
-func rewriteValuePPC64_OpPPC64ANDCCconst(v *Value) bool {
+func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
+ v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ANDCCconst [c] (Select0 (ANDCCconst [d] x)))
- // result: (ANDCCconst [c&d] x)
+ // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&^d])
for {
- c := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64MOVDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
break
}
- d := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
- v.reset(OpPPC64ANDCCconst)
- v.AuxInt = int64ToAuxInt(c & d)
- v.AddArg(x)
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c &^ d)
return true
}
return false
}
-func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
- v_1 := v.Args[1]
+func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
- // result: (MOVDconst [c&^d])
+ // match: (ANDconst [m] (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
for {
- if v_0.Op != OpPPC64MOVDconst {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLWconst {
break
}
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpPPC64MOVDconst {
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLW {
+ break
+ }
+ r := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
break
}
- d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64ToAuxInt(c &^ d)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVBZreg _))
+ // cond: c&0xFF == 0xFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFF] (MOVBreg x))
+ // result: (MOVBZreg x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFF || v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVHZreg _))
+ // cond: c&0xFFFF == 0xFFFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFFFF] (MOVHreg x))
+ // result: (MOVHZreg x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFFFF || v_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBZreg x))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHZreg x))
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWZreg x))
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(uint32(m),r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(uint32(m), r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(uint32(m), r))
+ v.AddArg(y)
+ return true
+ }
+ // match: (ANDconst [1] z:(SRADconst [63] x))
+ // cond: z.Uses == 1
+ // result: (SRDconst [63] x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
return true
}
return false
@@ -4628,6 +4839,24 @@ func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (CLRLSLDI [c] (SRDconst [s] x))
+ // cond: mergePPC64ClrlsldiSrd(int64(c),s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiSrd(int64(c),s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64ClrlsldiSrd(int64(c), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrd(int64(c), s))
+ v.AddArg(x)
+ return true
+ }
// match: (CLRLSLDI [c] i:(RLWINM [s] x))
// cond: mergePPC64ClrlsldiRlwinm(c,s) != 0
// result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
@@ -4765,19 +4994,15 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (CMPUconst [d] (Select0 (ANDCCconst z [c])))
+ // match: (CMPUconst [d] (ANDconst z [c]))
// cond: uint64(d) > uint64(c)
// result: (FlagLT)
for {
d := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(v_0_0.AuxInt)
+ c := auxIntToInt64(v_0.AuxInt)
if !(uint64(d) > uint64(c)) {
break
}
@@ -4829,6 +5054,21 @@ func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
v.reset(OpPPC64FlagGT)
return true
}
+ // match: (CMPUconst [0] a:(ANDconst [n] z))
+ // result: (CMPconst [0] a)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpPPC64ANDconst {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(a)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
@@ -4995,19 +5235,15 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (CMPWUconst [d] (Select0 (ANDCCconst z [c])))
+ // match: (CMPWUconst [d] (ANDconst z [c]))
// cond: uint64(d) > uint64(c)
// result: (FlagLT)
for {
d := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0_0.AuxInt)
+ c := auxIntToInt64(v_0.AuxInt)
if !(uint64(d) > uint64(c)) {
break
}
@@ -5059,6 +5295,21 @@ func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
v.reset(OpPPC64FlagGT)
return true
}
+ // match: (CMPWUconst [0] a:(ANDconst [n] z))
+ // result: (CMPconst [0] a)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpPPC64ANDconst {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(a)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool {
@@ -5108,6 +5359,21 @@ func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool {
v.reset(OpPPC64FlagGT)
return true
}
+ // match: (CMPWconst [0] a:(ANDconst [n] z))
+ // result: (CMPconst [0] a)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpPPC64ANDconst {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(a)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool {
@@ -5841,9 +6107,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp))))
+ // match: (ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp))))
// result: (ISEL [c] x y cmp)
for {
if auxIntToInt32(v.AuxInt) != 6 {
@@ -5851,11 +6115,11 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
}
x := v_0
y := v_1
- if v_2.Op != OpSelect1 {
+ if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
break
}
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
break
}
v_2_0_0 := v_2_0.Args[0]
@@ -6191,130 +6455,6 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
v.copyOf(y)
return true
}
- // match: (ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [n] z))))
- // result: (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 2 {
- break
- }
- x := v_0
- y := v_1
- if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
- break
- }
- v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpSelect0 {
- break
- }
- v_2_0_0 := v_2_0.Args[0]
- if v_2_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- n := auxIntToInt64(v_2_0_0.AuxInt)
- z := v_2_0_0.Args[0]
- v.reset(OpPPC64ISEL)
- v.AuxInt = int32ToAuxInt(2)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(n)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg3(x, y, v0)
- return true
- }
- // match: (ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
- // result: (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 2 {
- break
- }
- x := v_0
- y := v_1
- if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
- break
- }
- v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpSelect0 {
- break
- }
- v_2_0_0 := v_2_0.Args[0]
- if v_2_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- n := auxIntToInt64(v_2_0_0.AuxInt)
- z := v_2_0_0.Args[0]
- v.reset(OpPPC64ISEL)
- v.AuxInt = int32ToAuxInt(2)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(n)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg3(x, y, v0)
- return true
- }
- // match: (ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [n] z))))
- // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 6 {
- break
- }
- x := v_0
- y := v_1
- if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
- break
- }
- v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpSelect0 {
- break
- }
- v_2_0_0 := v_2_0.Args[0]
- if v_2_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- n := auxIntToInt64(v_2_0_0.AuxInt)
- z := v_2_0_0.Args[0]
- v.reset(OpPPC64ISEL)
- v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(n)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg3(x, y, v0)
- return true
- }
- // match: (ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
- // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 6 {
- break
- }
- x := v_0
- y := v_1
- if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
- break
- }
- v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpSelect0 {
- break
- }
- v_2_0_0 := v_2_0.Args[0]
- if v_2_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- n := auxIntToInt64(v_2_0_0.AuxInt)
- z := v_2_0_0.Args[0]
- v.reset(OpPPC64ISEL)
- v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(n)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg3(x, y, v0)
- return true
- }
// match: (ISEL [n] x y (InvertFlags bool))
// cond: n%4 == 0
// result: (ISEL [n+1] x y bool)
@@ -6637,19 +6777,15 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVBZreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVBZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
- break
- }
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
+ if y.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFF) {
break
}
@@ -6747,6 +6883,40 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBZreg (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(0xFF,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(0xFF,s)] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(0xFF, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(0xFF, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(0xFF,r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
+ for {
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(0xFF, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(0xFF, r))
+ v.AddArg(y)
+ return true
+ }
// match: (MOVBZreg (OR <t> x (MOVWZreg y)))
// result: (MOVBZreg (OR <t> x y))
for {
@@ -6963,19 +7133,15 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
}
break
}
- // match: (MOVBZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVBZload {
+ if z_0.Op != OpPPC64MOVBZload {
break
}
v.copyOf(z)
@@ -7066,19 +7232,15 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVBreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVBreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7F
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
+ if y.Op != OpPPC64ANDconst {
break
}
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0x7F) {
break
}
@@ -8484,19 +8646,15 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVHZreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVHZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
- break
- }
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
+ if y.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFF) {
break
}
@@ -8592,6 +8750,23 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVHZreg (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(0xFFFF,r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
+ for {
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(0xFFFF, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(0xFFFF, r))
+ v.AddArg(y)
+ return true
+ }
// match: (MOVHZreg y:(MOVHZreg _))
// result: y
for {
@@ -8778,19 +8953,15 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
}
break
}
- // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVBZload {
+ if z_0.Op != OpPPC64MOVBZload {
break
}
v.copyOf(z)
@@ -8815,19 +8986,15 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
}
break
}
- // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x))))
+ // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVHZload {
+ if z_0.Op != OpPPC64MOVHZload {
break
}
v.copyOf(z)
@@ -9018,19 +9185,15 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVHreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVHreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7FFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
- break
- }
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
+ if y.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0x7FFF) {
break
}
@@ -9809,19 +9972,15 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVWZreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVWZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFFFFFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
+ if y.Op != OpPPC64ANDconst {
break
}
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFFFFFF) {
break
}
@@ -9942,6 +10101,33 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVWZreg (RLWINM [r] y))
+ // cond: mergePPC64MovwzregRlwinm(r) != 0
+ // result: (RLWINM [mergePPC64MovwzregRlwinm(r)] y)
+ for {
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64MovwzregRlwinm(r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64MovwzregRlwinm(r))
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVWZreg w:(SLWconst u))
+ // result: w
+ for {
+ w := v_0
+ if w.Op != OpPPC64SLWconst {
+ break
+ }
+ v.copyOf(w)
+ return true
+ }
// match: (MOVWZreg y:(MOVWZreg _))
// result: y
for {
@@ -10076,19 +10262,15 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
}
break
}
- // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVBZload {
+ if z_0.Op != OpPPC64MOVBZload {
break
}
v.copyOf(z)
@@ -10113,37 +10295,29 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
}
break
}
- // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x))))
+ // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVHZload {
+ if z_0.Op != OpPPC64MOVHZload {
break
}
v.copyOf(z)
return true
}
- // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x))))
+ // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVWZload {
+ if z_0.Op != OpPPC64MOVWZload {
break
}
v.copyOf(z)
@@ -10368,19 +10542,15 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVWreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVWreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
- break
- }
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
+ if y.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFF) {
break
}
@@ -11328,6 +11498,45 @@ func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64RLWINM(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RLWINM [r] (MOVHZreg u))
+ // cond: mergePPC64RlwinmAnd(r,0xFFFF) != 0
+ // result: (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ u := v_0.Args[0]
+ if !(mergePPC64RlwinmAnd(r, 0xFFFF) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64RlwinmAnd(r, 0xFFFF))
+ v.AddArg(u)
+ return true
+ }
+ // match: (RLWINM [r] (ANDconst [a] u))
+ // cond: mergePPC64RlwinmAnd(r,uint32(a)) != 0
+ // result: (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ a := auxIntToInt64(v_0.AuxInt)
+ u := v_0.Args[0]
+ if !(mergePPC64RlwinmAnd(r, uint32(a)) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64RlwinmAnd(r, uint32(a)))
+ v.AddArg(u)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64ROTL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -11393,20 +11602,16 @@ func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool {
}
break
}
- // match: (ROTLWconst [r] (Select0 (ANDCCconst [m] x)))
+ // match: (ROTLWconst [r] (ANDconst [m] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
for {
r := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
if !(isPPC64WordRotateMask(m)) {
break
}
@@ -11560,80 +11765,19 @@ func rewriteValuePPC64_OpPPC64SETBC(v *Value) bool {
v.AddArg(bool)
return true
}
- // match: (SETBC [2] (CMPconst [0] (Select0 (ANDCCconst [1] z))))
- // result: (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
+ // match: (SETBC [2] (CMPconst [0] a:(ANDconst [1] _)))
+ // result: (XORconst [1] a)
for {
if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- z := v_0_0_0.Args[0]
- v.reset(OpPPC64XORconst)
- v.AuxInt = int64ToAuxInt(1)
- v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(1)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
- // match: (SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [1] z))))
- // result: (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ a := v_0.Args[0]
+ if a.Op != OpPPC64ANDconst || auxIntToInt64(a.AuxInt) != 1 {
break
}
- z := v_0_0_0.Args[0]
v.reset(OpPPC64XORconst)
v.AuxInt = int64ToAuxInt(1)
- v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(1)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
- // match: (SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
- // result: (SETBC [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- n := auxIntToInt64(v_0_0_0.AuxInt)
- z := v_0_0_0.Args[0]
- v.reset(OpPPC64SETBC)
- v.AuxInt = int32ToAuxInt(2)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(n)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v.AddArg(a)
return true
}
// match: (SETBC [2] (CMPconst [0] a:(AND y z)))
@@ -11856,76 +12000,17 @@ func rewriteValuePPC64_OpPPC64SETBCR(v *Value) bool {
v.AddArg(bool)
return true
}
- // match: (SETBCR [2] (CMPconst [0] (Select0 (ANDCCconst [1] z))))
- // result: (Select0 <typ.UInt64> (ANDCCconst [1] z ))
+ // match: (SETBCR [2] (CMPconst [0] a:(ANDconst [1] _)))
+ // result: a
for {
if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- z := v_0_0_0.Args[0]
- v.reset(OpSelect0)
- v.Type = typ.UInt64
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(1)
- v0.AddArg(z)
- v.AddArg(v0)
- return true
- }
- // match: (SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [1] z))))
- // result: (Select0 <typ.UInt64> (ANDCCconst [1] z ))
- for {
- if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- z := v_0_0_0.Args[0]
- v.reset(OpSelect0)
- v.Type = typ.UInt64
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(1)
- v0.AddArg(z)
- v.AddArg(v0)
- return true
- }
- // match: (SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
- // result: (SETBCR [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
- for {
- if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ a := v_0.Args[0]
+ if a.Op != OpPPC64ANDconst || auxIntToInt64(a.AuxInt) != 1 {
break
}
- n := auxIntToInt64(v_0_0_0.AuxInt)
- z := v_0_0_0.Args[0]
- v.reset(OpPPC64SETBCR)
- v.AuxInt = int32ToAuxInt(2)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(n)
- v1.AddArg(z)
- v0.AddArg(v1)
- v.AddArg(v0)
+ v.copyOf(a)
return true
}
// match: (SETBCR [2] (CMPconst [0] a:(AND y z)))
@@ -12043,6 +12128,24 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SLDconst [s] (RLWINM [r] y))
+ // cond: mergePPC64SldiRlwinm(s,r) != 0
+ // result: (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64SldiRlwinm(s, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64SldiRlwinm(s, r))
+ v.AddArg(y)
+ return true
+ }
// match: (SLDconst [c] z:(MOVBZreg x))
// cond: c < 8 && z.Uses == 1
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
@@ -12097,21 +12200,17 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SLDconst [c] z:(Select0 (ANDCCconst [d] x)))
+ // match: (SLDconst [c] z:(ANDconst [d] x))
// cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
for {
c := auxIntToInt64(v.AuxInt)
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
- z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- d := auxIntToInt64(z_0.AuxInt)
- x := z_0.Args[0]
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
break
}
@@ -12188,6 +12287,19 @@ func rewriteValuePPC64_OpPPC64SLW(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
v_0 := v.Args[0]
+ // match: (SLWconst [s] (MOVWZreg w))
+ // result: (SLWconst [s] w)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ w := v_0.Args[0]
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg(w)
+ return true
+ }
// match: (SLWconst [c] z:(MOVBZreg x))
// cond: z.Uses == 1 && c < 8
// result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
@@ -12224,21 +12336,17 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SLWconst [c] z:(Select0 (ANDCCconst [d] x)))
+ // match: (SLWconst [c] z:(ANDconst [d] x))
// cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
// result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
for {
c := auxIntToInt64(v.AuxInt)
z := v_0
- if z.Op != OpSelect0 {
- break
- }
- z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
+ if z.Op != OpPPC64ANDconst {
break
}
- d := auxIntToInt64(z_0.AuxInt)
- x := z_0.Args[0]
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
break
}
@@ -12369,19 +12477,15 @@ func rewriteValuePPC64_OpPPC64SRW(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s])
+ // match: (SRWconst (ANDconst [m] x) [s])
// cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
// result: (MOVDconst [0])
for {
s := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- m := auxIntToInt64(v_0_0.AuxInt)
+ m := auxIntToInt64(v_0.AuxInt)
if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
break
}
@@ -12389,20 +12493,16 @@ func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s])
+ // match: (SRWconst (ANDconst [m] x) [s])
// cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
// result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
for {
s := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- m := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
break
}
@@ -12879,7 +12979,7 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool {
return true
}
// match: (Rsh16Ux16 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
for {
t := v.Type
x := v_0
@@ -12892,8 +12992,9 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF0)
v4.AddArg(y)
v3.AddArg(v4)
@@ -13023,7 +13124,7 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool {
return true
}
// match: (Rsh16Ux8 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
for {
t := v.Type
x := v_0
@@ -13036,8 +13137,9 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F0)
v4.AddArg(y)
v3.AddArg(v4)
@@ -13066,7 +13168,7 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool {
return true
}
// match: (Rsh16x16 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
for {
t := v.Type
x := v_0
@@ -13080,8 +13182,9 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(15)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF0)
v4.AddArg(y)
v3.AddArg(v4)
@@ -13232,7 +13335,7 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool {
return true
}
// match: (Rsh16x8 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
for {
t := v.Type
x := v_0
@@ -13246,8 +13349,9 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(15)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F0)
v4.AddArg(y)
v3.AddArg(v4)
@@ -13274,7 +13378,7 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool {
return true
}
// match: (Rsh32Ux16 <t> x y)
- // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
for {
t := v.Type
x := v_0
@@ -13285,8 +13389,9 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFE0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13404,7 +13509,7 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool {
return true
}
// match: (Rsh32Ux8 <t> x y)
- // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
for {
t := v.Type
x := v_0
@@ -13415,8 +13520,9 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00E0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13443,7 +13549,7 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool {
return true
}
// match: (Rsh32x16 <t> x y)
- // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
for {
t := v.Type
x := v_0
@@ -13455,8 +13561,9 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFE0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13591,7 +13698,7 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool {
return true
}
// match: (Rsh32x8 <t> x y)
- // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
for {
t := v.Type
x := v_0
@@ -13603,8 +13710,9 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00E0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13631,7 +13739,7 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool {
return true
}
// match: (Rsh64Ux16 <t> x y)
- // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
for {
t := v.Type
x := v_0
@@ -13642,8 +13750,9 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFC0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13761,7 +13870,7 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool {
return true
}
// match: (Rsh64Ux8 <t> x y)
- // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
for {
t := v.Type
x := v_0
@@ -13772,8 +13881,9 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool {
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00C0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13800,7 +13910,7 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool {
return true
}
// match: (Rsh64x16 <t> x y)
- // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
for {
t := v.Type
x := v_0
@@ -13812,8 +13922,9 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFC0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13948,7 +14059,7 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool {
return true
}
// match: (Rsh64x8 <t> x y)
- // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
for {
t := v.Type
x := v_0
@@ -13960,8 +14071,9 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00C0)
v3.AddArg(y)
v2.AddArg(v3)
@@ -13990,7 +14102,7 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool {
return true
}
// match: (Rsh8Ux16 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
for {
t := v.Type
x := v_0
@@ -14003,8 +14115,9 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF8)
v4.AddArg(y)
v3.AddArg(v4)
@@ -14134,7 +14247,7 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool {
return true
}
// match: (Rsh8Ux8 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
for {
t := v.Type
x := v_0
@@ -14147,8 +14260,9 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool {
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F8)
v4.AddArg(y)
v3.AddArg(v4)
@@ -14177,7 +14291,7 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool {
return true
}
// match: (Rsh8x16 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
for {
t := v.Type
x := v_0
@@ -14191,8 +14305,9 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(7)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF8)
v4.AddArg(y)
v3.AddArg(v4)
@@ -14343,7 +14458,7 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool {
return true
}
// match: (Rsh8x8 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
for {
t := v.Type
x := v_0
@@ -14357,8 +14472,9 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(7)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F8)
v4.AddArg(y)
v3.AddArg(v4)
@@ -14424,249 +14540,6 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
v.AddArg(v0)
return true
}
- // match: (Select0 (ANDCCconst [m] (ROTLWconst [r] x)))
- // cond: isPPC64WordRotateMask(m)
- // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ROTLWconst {
- break
- }
- r := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
- if !(isPPC64WordRotateMask(m)) {
- break
- }
- v.reset(OpPPC64RLWINM)
- v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (ROTLW x r)))
- // cond: isPPC64WordRotateMask(m)
- // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ROTLW {
- break
- }
- r := v_0_0.Args[1]
- x := v_0_0.Args[0]
- if !(isPPC64WordRotateMask(m)) {
- break
- }
- v.reset(OpPPC64RLWNM)
- v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
- v.AddArg2(x, r)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (SRWconst x [s])))
- // cond: mergePPC64RShiftMask(m,s,32) == 0
- // result: (MOVDconst [0])
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64SRWconst {
- break
- }
- s := auxIntToInt64(v_0_0.AuxInt)
- if !(mergePPC64RShiftMask(m, s, 32) == 0) {
- break
- }
- v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64ToAuxInt(0)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (SRWconst x [s])))
- // cond: mergePPC64AndSrwi(m,s) != 0
- // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64SRWconst {
- break
- }
- s := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
- if !(mergePPC64AndSrwi(m, s) != 0) {
- break
- }
- v.reset(OpPPC64RLWINM)
- v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [-1] x))
- // result: x
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != -1 {
- break
- }
- x := v_0.Args[0]
- v.copyOf(x)
- return true
- }
- // match: (Select0 (ANDCCconst [0] _))
- // result: (MOVDconst [0])
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64ToAuxInt(0)
- return true
- }
- // match: (Select0 (ANDCCconst [c] y:(MOVBZreg _)))
- // cond: c&0xFF == 0xFF
- // result: y
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- y := v_0.Args[0]
- if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
- break
- }
- v.copyOf(y)
- return true
- }
- // match: (Select0 (ANDCCconst [0xFF] (MOVBreg x)))
- // result: (MOVBZreg x)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVBreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpPPC64MOVBZreg)
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [c] y:(MOVHZreg _)))
- // cond: c&0xFFFF == 0xFFFF
- // result: y
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- y := v_0.Args[0]
- if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
- break
- }
- v.copyOf(y)
- return true
- }
- // match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x)))
- // result: (MOVHZreg x)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVHreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpPPC64MOVHZreg)
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [c] (MOVBZreg x)))
- // result: (Select0 (ANDCCconst [c&0xFF] x))
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVBZreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFF)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (Select0 (ANDCCconst [c] (MOVHZreg x)))
- // result: (Select0 (ANDCCconst [c&0xFFFF] x))
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVHZreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFFFF)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (Select0 (ANDCCconst [c] (MOVWZreg x)))
- // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVWZreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x)))
- // cond: z.Uses == 1
- // result: (SRDconst [63] x)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 {
- break
- }
- z := v_0.Args[0]
- if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
- break
- }
- x := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v.reset(OpPPC64SRDconst)
- v.AuxInt = int64ToAuxInt(63)
- v.AddArg(x)
- return true
- }
return false
}
func rewriteValuePPC64_OpSelect1(v *Value) bool {
@@ -14771,15 +14644,6 @@ func rewriteValuePPC64_OpSelect1(v *Value) bool {
v.copyOf(x)
return true
}
- // match: (Select1 (ANDCCconst [0] _))
- // result: (FlagEQ)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v.reset(OpPPC64FlagEQ)
- return true
- }
return false
}
func rewriteValuePPC64_OpSelectN(v *Value) bool {
@@ -15406,46 +15270,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64EQ, cmp)
return true
}
- // match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64EQ, v0)
- return true
- }
- // match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64EQ, v0)
- return true
- }
// match: (EQ (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (EQ (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
@@ -15564,46 +15388,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64LE, cmp)
return true
}
- // match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GE, v0)
- return true
- }
- // match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GE, v0)
- return true
- }
// match: (GE (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (GE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
@@ -15723,46 +15507,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64LT, cmp)
return true
}
- // match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GT, v0)
- return true
- }
- // match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GT, v0)
- return true
- }
// match: (GT (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (GT (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
@@ -15935,16 +15679,14 @@ func rewriteBlockPPC64(b *Block) bool {
return true
}
// match: (If cond yes no)
- // result: (NE (CMPWconst [0] (Select0 <typ.UInt32> (ANDCCconst [1] cond))) yes no)
+ // result: (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
for {
cond := b.Controls[0]
- v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
- v0.AuxInt = int32ToAuxInt(0)
- v1 := b.NewValue0(cond.Pos, OpSelect0, typ.UInt32)
- v2 := b.NewValue0(cond.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v2.AuxInt = int64ToAuxInt(1)
- v2.AddArg(cond)
- v1.AddArg(v2)
+ v0 := b.NewValue0(cond.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(cond.Pos, OpPPC64ANDconst, typ.Int)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(cond)
v0.AddArg(v1)
b.resetWithControl(BlockPPC64NE, v0)
return true
@@ -15977,46 +15719,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64GE, cmp)
return true
}
- // match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LE, v0)
- return true
- }
- // match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LE, v0)
- return true
- }
// match: (LE (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (LE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
@@ -16136,46 +15838,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64GT, cmp)
return true
}
- // match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LT, v0)
- return true
- }
- // match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LT, v0)
- return true
- }
// match: (LT (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (LT (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
@@ -16267,233 +15929,193 @@ func rewriteBlockPPC64(b *Block) bool {
break
}
case BlockPPC64NE:
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (Equal cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (Equal cc))) yes no)
// result: (EQ cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64Equal {
+ if v_0_0_0.Op != OpPPC64Equal {
break
}
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64EQ, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (NotEqual cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (NotEqual cc))) yes no)
// result: (NE cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64NotEqual {
+ if v_0_0_0.Op != OpPPC64NotEqual {
break
}
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64NE, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessThan cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (LessThan cc))) yes no)
// result: (LT cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64LessThan {
+ if v_0_0_0.Op != OpPPC64LessThan {
break
}
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64LT, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessEqual cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (LessEqual cc))) yes no)
// result: (LE cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64LessEqual {
+ if v_0_0_0.Op != OpPPC64LessEqual {
break
}
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64LE, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterThan cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (GreaterThan cc))) yes no)
// result: (GT cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64GreaterThan {
+ if v_0_0_0.Op != OpPPC64GreaterThan {
break
}
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64GT, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterEqual cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (GreaterEqual cc))) yes no)
// result: (GE cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- break
- }
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64GreaterEqual {
+ if v_0_0_0.Op != OpPPC64GreaterEqual {
break
}
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64GE, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessThan cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FLessThan cc))) yes no)
// result: (FLT cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ if v_0_0_0.Op != OpPPC64FLessThan {
break
}
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64FLessThan {
- break
- }
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64FLT, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessEqual cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FLessEqual cc))) yes no)
// result: (FLE cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ if v_0_0_0.Op != OpPPC64FLessEqual {
break
}
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64FLessEqual {
- break
- }
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64FLE, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterThan cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FGreaterThan cc))) yes no)
// result: (FGT cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ if v_0_0_0.Op != OpPPC64FGreaterThan {
break
}
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64FGreaterThan {
- break
- }
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64FGT, cc)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterEqual cc)))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no)
// result: (FGE cc yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ if v_0_0_0.Op != OpPPC64FGreaterEqual {
break
}
- v_0_0_0_0 := v_0_0_0.Args[0]
- if v_0_0_0_0.Op != OpPPC64FGreaterEqual {
- break
- }
- cc := v_0_0_0_0.Args[0]
+ cc := v_0_0_0.Args[0]
b.resetWithControl(BlockPPC64FGE, cc)
return true
}
@@ -16524,46 +16146,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64NE, cmp)
return true
}
- // match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64NE, v0)
- return true
- }
- // match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64NE, v0)
- return true
- }
// match: (NE (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (NE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
index 771dd6aaa2..23d8601fb4 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
@@ -3,7 +3,6 @@
package ssa
import "internal/buildcfg"
-import "cmd/compile/internal/types"
func rewriteValuePPC64latelower(v *Value) bool {
switch v.Op {
@@ -11,18 +10,20 @@ func rewriteValuePPC64latelower(v *Value) bool {
return rewriteValuePPC64latelower_OpPPC64ADD(v)
case OpPPC64AND:
return rewriteValuePPC64latelower_OpPPC64AND(v)
+ case OpPPC64ANDconst:
+ return rewriteValuePPC64latelower_OpPPC64ANDconst(v)
case OpPPC64CMPconst:
return rewriteValuePPC64latelower_OpPPC64CMPconst(v)
case OpPPC64ISEL:
return rewriteValuePPC64latelower_OpPPC64ISEL(v)
case OpPPC64RLDICL:
return rewriteValuePPC64latelower_OpPPC64RLDICL(v)
+ case OpPPC64RLDICLCC:
+ return rewriteValuePPC64latelower_OpPPC64RLDICLCC(v)
case OpPPC64SETBC:
return rewriteValuePPC64latelower_OpPPC64SETBC(v)
case OpPPC64SETBCR:
return rewriteValuePPC64latelower_OpPPC64SETBCR(v)
- case OpSelect0:
- return rewriteValuePPC64latelower_OpSelect0(v)
}
return false
}
@@ -54,11 +55,9 @@ func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool {
func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (AND <t> x:(MOVDconst [m]) n)
// cond: t.Size() <= 2
- // result: (Select0 (ANDCCconst [int64(int16(m))] n))
+ // result: (ANDconst [int64(int16(m))] n)
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -71,11 +70,9 @@ func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
if !(t.Size() <= 2) {
continue
}
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(int64(int16(m)))
- v0.AddArg(n)
- v.AddArg(v0)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(m)))
+ v.AddArg(n)
return true
}
break
@@ -146,6 +143,24 @@ func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
}
return false
}
+func rewriteValuePPC64latelower_OpPPC64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [m] x)
+ // cond: isPPC64ValidShiftMask(m)
+ // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(isPPC64ValidShiftMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
v_0 := v.Args[0]
// match: (CMPconst [0] z:(ADD x y))
@@ -319,6 +334,25 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
v.AddArg(convertPPC64OpToOpCC(z))
return true
}
+ // match: (CMPconst [0] z:(RLDICL x))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64RLDICL {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
// match: (CMPconst [0] z:(ADDconst [c] x))
// cond: int64(int16(c)) == c && v.Block == z.Block
// result: (CMPconst [0] convertPPC64OpToOpCC(z))
@@ -339,6 +373,26 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
v.AddArg(convertPPC64OpToOpCC(z))
return true
}
+ // match: (CMPconst [0] z:(ANDconst [c] x))
+ // cond: int64(uint16(c)) == c && v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if !(int64(uint16(c)) == c && v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
// match: (CMPconst <t> [0] (Select0 z:(ADDCC x y)))
// result: (Select1 <t> z)
for {
@@ -467,6 +521,22 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
v.AddArg(z)
return true
}
+ // match: (CMPconst <t> [0] (Select0 z:(ANDCCconst y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
// match: (CMPconst <t> [0] (Select0 z:(NEGCC y)))
// result: (Select1 <t> z)
for {
@@ -499,6 +569,22 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
v.AddArg(z)
return true
}
+ // match: (CMPconst <t> [0] (Select0 z:(RLDICLCC y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64RLDICLCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
return false
}
func rewriteValuePPC64latelower_OpPPC64ISEL(v *Value) bool {
@@ -558,6 +644,24 @@ func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool {
}
return false
}
+func rewriteValuePPC64latelower_OpPPC64RLDICLCC(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RLDICLCC [a] x)
+ // cond: convertPPC64RldiclAndccconst(a) != 0
+ // result: (ANDCCconst [convertPPC64RldiclAndccconst(a)] x)
+ for {
+ a := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(convertPPC64RldiclAndccconst(a) != 0) {
+ break
+ }
+ v.reset(OpPPC64ANDCCconst)
+ v.AuxInt = int64ToAuxInt(convertPPC64RldiclAndccconst(a))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -678,28 +782,6 @@ func rewriteValuePPC64latelower_OpPPC64SETBCR(v *Value) bool {
}
return false
}
-func rewriteValuePPC64latelower_OpSelect0(v *Value) bool {
- v_0 := v.Args[0]
- // match: (Select0 z:(ANDCCconst [m] x))
- // cond: z.Uses == 1 && isPPC64ValidShiftMask(m)
- // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
- for {
- z := v_0
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(z.AuxInt)
- x := z.Args[0]
- if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) {
- break
- }
- v.reset(OpPPC64RLDICL)
- v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
- v.AddArg(x)
- return true
- }
- return false
-}
func rewriteBlockPPC64latelower(b *Block) bool {
return false
}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index a9f4f102af..d08059c9d5 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -139,7 +139,7 @@ func (v *Value) AuxValAndOff() ValAndOff {
func (v *Value) AuxArm64BitField() arm64BitField {
if opcodeTable[v.Op].auxType != auxARM64BitField {
- v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ v.Fatalf("op %s doesn't have a ARM64BitField aux field", v.Op)
}
return arm64BitField(v.AuxInt)
}
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
index 5c4a8aff69..d5ae3b1793 100644
--- a/src/cmd/compile/internal/ssagen/abi.go
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -148,6 +148,11 @@ func (s *SymABIs) GenABIWrappers() {
// offsets to dispatch arguments, which currently using ABI0
// frame layout. Pin it to ABI0.
fn.ABI = obj.ABI0
+ // Propagate linkname attribute, which was set on the ABIInternal
+ // symbol.
+ if sym.Linksym().IsLinkname() {
+ sym.LinksymABI(fn.ABI).Set(obj.AttrLinkname, true)
+ }
}
// If cgo-exported, add the definition ABI to the cgo
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
index b8756eea61..8e776695e3 100644
--- a/src/cmd/compile/internal/ssagen/nowb.go
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -174,6 +174,14 @@ func (c *nowritebarrierrecChecker) check() {
fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
call = funcs[call.target]
}
+ // Seeing this error in a failed CI run? It indicates that
+ // a function in the runtime package marked nowritebarrierrec
+ // (the outermost stack element) was found, by a static
+ // reachability analysis over the fully lowered optimized code,
+ // to call a function (fn) that involves a write barrier.
+ //
+ // Even if the call path is infeasable,
+ // you will need to reorganize the code to avoid it.
base.ErrorfAt(fn.WBPos, 0, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
continue
}
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index 5b57c8a825..e666c22a7d 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -12,9 +12,11 @@ import (
"sync"
"cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/pgoir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -296,8 +298,8 @@ const maxStackSize = 1 << 30
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
-func Compile(fn *ir.Func, worker int) {
- f := buildssa(fn, worker)
+func Compile(fn *ir.Func, worker int, profile *pgoir.Profile) {
+ f := buildssa(fn, worker, inline.IsPgoHotFunc(fn, profile) || inline.HasPgoHotInline(fn))
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 9e384fe016..0d2693ea33 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -291,7 +291,7 @@ func (s *state) emitOpenDeferInfo() {
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *ir.Func, worker int) *ssa.Func {
+func buildssa(fn *ir.Func, worker int, isPgoHot bool) *ssa.Func {
name := ir.FuncName(fn)
abiSelf := abiForFunc(fn, ssaConfig.ABI0, ssaConfig.ABI1)
@@ -373,6 +373,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
s.f.Entry.Pos = fn.Pos()
+ s.f.IsPgoHot = isPgoHot
if printssa {
ssaDF := ssaDumpFile
@@ -514,6 +515,21 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// Populate closure variables.
if fn.Needctxt() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
+ if fn.RangeParent != nil {
+ // For a range body closure, keep its closure pointer live on the
+ // stack with a special name, so the debugger can look for it and
+ // find the parent frame.
+ sym := &types.Sym{Name: ".closureptr", Pkg: types.LocalPkg}
+ cloSlot := s.curfn.NewLocal(src.NoXPos, sym, s.f.Config.Types.BytePtr)
+ cloSlot.SetUsed(true)
+ cloSlot.SetEsc(ir.EscNever)
+ cloSlot.SetAddrtaken(true)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, cloSlot, s.mem(), false)
+ addr := s.addr(cloSlot)
+ s.store(s.f.Config.Types.BytePtr, addr, clo)
+ // Keep it from being dead-store eliminated.
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, cloSlot, s.mem(), false)
+ }
csiter := typecheck.NewClosureStructIter(fn.ClosureVars)
for {
n, typ, offset := csiter.Next()
@@ -1460,7 +1476,11 @@ func (s *state) stmt(n ir.Node) {
s.callResult(n, callNormal)
if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME && n.Fun.(*ir.Name).Class == ir.PFUNC {
if fn := n.Fun.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
- n.Fun.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
+ n.Fun.Sym().Pkg == ir.Pkgs.Runtime &&
+ (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" ||
+ fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" ||
+ fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr" ||
+ fn == "panicrangestate") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
@@ -4391,7 +4411,7 @@ func InitTables() {
type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
- makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
+ makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ types.Kind, emit atomicOpEmitter) intrinsicBuilder {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if buildcfg.GOARM64.LSE {
@@ -4423,24 +4443,24 @@ func InitTables() {
// Merge results.
s.startBlock(bEnd)
}
- if rtyp == types.TNIL {
+ if typ == types.TNIL {
return nil
} else {
- return s.variable(n, types.Types[rtyp])
+ return s.variable(n, types.Types[typ])
}
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ atomicEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("internal/runtime/atomic", "Xchg",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, atomicEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "Xchg64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, atomicEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "Xadd",
@@ -4459,10 +4479,10 @@ func InitTables() {
sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("internal/runtime/atomic", "Xadd",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, atomicEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "Xadd64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, atomicEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "Cas",
@@ -4494,10 +4514,10 @@ func InitTables() {
}
addF("internal/runtime/atomic", "Cas",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "Cas64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "And8",
@@ -4525,21 +4545,29 @@ func InitTables() {
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
- s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
- }
-
addF("internal/runtime/atomic", "And8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TUINT8, atomicEmitterARM64),
+ sys.ARM64)
+ addF("internal/runtime/atomic", "Or8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TUINT8, atomicEmitterARM64),
+ sys.ARM64)
+ addF("internal/runtime/atomic", "And64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd64, ssa.OpAtomicAnd64Variant, types.TUINT64, atomicEmitterARM64),
+ sys.ARM64)
+ addF("internal/runtime/atomic", "And32",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TUINT32, atomicEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "And",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TUINT32, atomicEmitterARM64),
sys.ARM64)
- addF("internal/runtime/atomic", "Or8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ addF("internal/runtime/atomic", "Or64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr64, ssa.OpAtomicOr64Variant, types.TUINT64, atomicEmitterARM64),
+ sys.ARM64)
+ addF("internal/runtime/atomic", "Or32",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TUINT32, atomicEmitterARM64),
sys.ARM64)
addF("internal/runtime/atomic", "Or",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TUINT32, atomicEmitterARM64),
sys.ARM64)
// Aliases for atomic load operations
@@ -4589,6 +4617,10 @@ func InitTables() {
alias("internal/runtime/atomic", "Casp1", "internal/runtime/atomic", "Cas64", p8...)
alias("internal/runtime/atomic", "CasRel", "internal/runtime/atomic", "Cas", lwatomics...)
+ // Aliases for atomic And/Or operations
+ alias("internal/runtime/atomic", "Anduintptr", "internal/runtime/atomic", "And64", sys.ArchARM64)
+ alias("internal/runtime/atomic", "Oruintptr", "internal/runtime/atomic", "Or64", sys.ArchARM64)
+
/******** math ********/
addF("math", "sqrt",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
@@ -5065,6 +5097,17 @@ func InitTables() {
alias("sync/atomic", "AddUintptr", "internal/runtime/atomic", "Xadd", p4...)
alias("sync/atomic", "AddUintptr", "internal/runtime/atomic", "Xadd64", p8...)
+ alias("sync/atomic", "AndInt32", "internal/runtime/atomic", "And32", sys.ArchARM64)
+ alias("sync/atomic", "AndUint32", "internal/runtime/atomic", "And32", sys.ArchARM64)
+ alias("sync/atomic", "AndInt64", "internal/runtime/atomic", "And64", sys.ArchARM64)
+ alias("sync/atomic", "AndUint64", "internal/runtime/atomic", "And64", sys.ArchARM64)
+ alias("sync/atomic", "AndUintptr", "internal/runtime/atomic", "And64", sys.ArchARM64)
+ alias("sync/atomic", "OrInt32", "internal/runtime/atomic", "Or32", sys.ArchARM64)
+ alias("sync/atomic", "OrUint32", "internal/runtime/atomic", "Or32", sys.ArchARM64)
+ alias("sync/atomic", "OrInt64", "internal/runtime/atomic", "Or64", sys.ArchARM64)
+ alias("sync/atomic", "OrUint64", "internal/runtime/atomic", "Or64", sys.ArchARM64)
+ alias("sync/atomic", "OrUintptr", "internal/runtime/atomic", "Or64", sys.ArchARM64)
+
/******** math/big ********/
alias("math/big", "mulWW", "math/bits", "Mul64", p8...)
}
@@ -7302,12 +7345,47 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
var argLiveIdx int = -1 // argument liveness info index
+ // These control cache line alignment; if the required portion of
+ // a cache line is not available, then pad to obtain cache line
+ // alignment. Not implemented on all architectures, may not be
+ // useful on all architectures.
+ var hotAlign, hotRequire int64
+
+ if base.Debug.AlignHot > 0 {
+ switch base.Ctxt.Arch.Name {
+ // enable this on a case-by-case basis, with benchmarking.
+ // currently shown:
+ // good for amd64
+ // not helpful for Apple Silicon
+ //
+ case "amd64", "386":
+ // Align to 64 if 31 or fewer bytes remain in a cache line
+ // benchmarks a little better than always aligning, and also
+ // adds slightly less to the (PGO-compiled) binary size.
+ hotAlign = 64
+ hotRequire = 31
+ }
+ }
+
// Emit basic blocks
for i, b := range f.Blocks {
- s.bstart[b.ID] = s.pp.Next
+
s.lineRunStart = nil
s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
+ if hotAlign > 0 && b.Hotness&ssa.HotPgoInitial == ssa.HotPgoInitial {
+ // So far this has only been shown profitable for PGO-hot loop headers.
+ // The Hotness values allows distinctions betwen initial blocks that are "hot" or not, and "flow-in" or not.
+ // Currently only the initial blocks of loops are tagged in this way;
+ // there are no blocks tagged "pgo-hot" that are not also tagged "initial".
+ // TODO more heuristics, more architectures.
+ p := s.pp.Prog(obj.APCALIGNMAX)
+ p.From.SetConst(hotAlign)
+ p.To.SetConst(hotRequire)
+ }
+
+ s.bstart[b.ID] = s.pp.Next
+
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx
p := s.pp.Prog(obj.APCDATA)
@@ -7466,7 +7544,8 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// going to emit anyway, and use those instructions instead of the
// inline marks.
for p := s.pp.Text; p != nil; p = p.Link {
- if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
+ if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT ||
+ p.As == obj.APCALIGN || p.As == obj.APCALIGNMAX || Arch.LinkArch.Family == sys.Wasm {
// Don't use 0-sized instructions as inline marks, because we need
// to identify inline mark instructions by pc offset.
// (Some of these instructions are sometimes zero-sized, sometimes not.
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index f75f86587a..66570fe92a 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -268,7 +268,7 @@ func (p *parser) syntaxErrorAt(pos Pos, msg string) {
var tok string
switch p.tok {
case _Name:
- tok = "`" + p.lit + "'"
+ tok = "name " + p.lit
case _Semi:
tok = p.lit
case _Literal:
diff --git a/src/cmd/compile/internal/syntax/pos.go b/src/cmd/compile/internal/syntax/pos.go
index dd25d4f249..5ea9f5304a 100644
--- a/src/cmd/compile/internal/syntax/pos.go
+++ b/src/cmd/compile/internal/syntax/pos.go
@@ -34,6 +34,18 @@ func (pos Pos) Base() *PosBase { return pos.base }
func (pos Pos) Line() uint { return uint(pos.line) }
func (pos Pos) Col() uint { return uint(pos.col) }
+// FileBase returns the PosBase of the file containing pos,
+// skipping over intermediate PosBases from //line directives.
+// The result is nil if pos doesn't have a file base.
+func (pos Pos) FileBase() *PosBase {
+ b := pos.base
+ for b != nil && b != b.pos.base {
+ b = b.pos.base
+ }
+ // b == nil || b == b.pos.base
+ return b
+}
+
func (pos Pos) RelFilename() string { return pos.base.Filename() }
func (pos Pos) RelLine() uint {
diff --git a/src/cmd/compile/internal/syntax/testdata/issue20789.go b/src/cmd/compile/internal/syntax/testdata/issue20789.go
index 8a6db6d2ee..846da83694 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue20789.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue20789.go
@@ -6,4 +6,4 @@
// Line 9 must end in EOF for this test (no newline).
package e
-func([<-chan<-[func /* ERROR unexpected `u' */ u){go \ No newline at end of file
+func([<-chan<-[func /* ERROR unexpected name u */ u){go \ No newline at end of file
diff --git a/src/cmd/compile/internal/syntax/testdata/issue47704.go b/src/cmd/compile/internal/syntax/testdata/issue47704.go
index aab3790560..e490130db0 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue47704.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue47704.go
@@ -7,7 +7,7 @@ package p
func _() {
_ = m[] // ERROR expected operand
_ = m[x,]
- _ = m[x /* ERROR unexpected `a' */ a b c d]
+ _ = m[x /* ERROR unexpected name a */ a b c d]
}
// test case from the issue
diff --git a/src/cmd/compile/internal/syntax/testdata/issue49205.go b/src/cmd/compile/internal/syntax/testdata/issue49205.go
index 9b6c769703..891a6eeb83 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue49205.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue49205.go
@@ -7,7 +7,7 @@ package p
// test case from issue
type _ interface{
- m /* ERROR unexpected `int' in interface type; possibly missing semicolon or newline or } */ int
+ m /* ERROR unexpected name int in interface type; possibly missing semicolon or newline or } */ int
}
// other cases where the fix for this issue affects the error message
@@ -16,12 +16,12 @@ const (
x int = 10 /* ERROR unexpected literal "foo" in grouped declaration; possibly missing semicolon or newline or \) */ "foo"
)
-var _ = []int{1, 2, 3 /* ERROR unexpected `int' in composite literal; possibly missing comma or } */ int }
+var _ = []int{1, 2, 3 /* ERROR unexpected name int in composite literal; possibly missing comma or } */ int }
type _ struct {
x y /* ERROR syntax error: unexpected comma in struct type; possibly missing semicolon or newline or } */ ,
}
-func f(a, b c /* ERROR unexpected `d' in parameter list; possibly missing comma or \) */ d) {
- f(a, b, c /* ERROR unexpected `d' in argument list; possibly missing comma or \) */ d)
+func f(a, b c /* ERROR unexpected name d in parameter list; possibly missing comma or \) */ d) {
+ f(a, b, c /* ERROR unexpected name d in argument list; possibly missing comma or \) */ d)
}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue52391.go b/src/cmd/compile/internal/syntax/testdata/issue52391.go
index 42b71cc83a..85c080c9e6 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue52391.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue52391.go
@@ -13,5 +13,5 @@ type _ interface {
(int) | (string)
(int) | ~(string)
(/* ERROR unexpected ~ */ ~int)
- (int /* ERROR unexpected \| */ | /* ERROR unexpected `string' */ string /* ERROR unexpected \) */ )
+ (int /* ERROR unexpected \| */ | /* ERROR unexpected name string */ string /* ERROR unexpected \) */ )
}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue65790.go b/src/cmd/compile/internal/syntax/testdata/issue65790.go
index 07ffd12678..9e079423ab 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue65790.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue65790.go
@@ -9,6 +9,6 @@ import (
)
func f() {
- int status // ERROR syntax error: unexpected `status' at end of statement
+ int status // ERROR syntax error: unexpected name status at end of statement
fmt.Println(status)
}
diff --git a/src/cmd/compile/internal/syntax/type.go b/src/cmd/compile/internal/syntax/type.go
index 53132a442d..0be7e250ee 100644
--- a/src/cmd/compile/internal/syntax/type.go
+++ b/src/cmd/compile/internal/syntax/type.go
@@ -10,9 +10,12 @@ import "go/constant"
// All types implement the Type interface.
// (This type originally lived in types2. We moved it here
// so we could depend on it from other packages without
-// introducing a circularity.)
+// introducing an import cycle.)
type Type interface {
// Underlying returns the underlying type of a type.
+ // Underlying types are never Named, TypeParam, or Alias types.
+ //
+ // See https://go.dev/ref/spec#Underlying_types.
Underlying() Type
// String returns a string representation of a type.
diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
index 3fee023afb..3a5d3576be 100644
--- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go
+++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
@@ -117,8 +117,8 @@ func interfaceSwitch(s *byte, t *byte) (int, *byte)
func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
-// panic for iteration after exit in range func
-func panicrangeexit()
+// panic for various rangefunc iterator errors
+func panicrangestate(state int)
// defer in range over func
func deferrangefunc() interface{}
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
index e3ef360a03..2f43b1d01c 100644
--- a/src/cmd/compile/internal/typecheck/builtin.go
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -102,142 +102,142 @@ var runtimeDecls = [...]struct {
{"interfaceSwitch", funcTag, 70},
{"ifaceeq", funcTag, 72},
{"efaceeq", funcTag, 72},
- {"panicrangeexit", funcTag, 9},
- {"deferrangefunc", funcTag, 73},
- {"rand32", funcTag, 74},
- {"makemap64", funcTag, 76},
- {"makemap", funcTag, 77},
- {"makemap_small", funcTag, 78},
- {"mapaccess1", funcTag, 79},
- {"mapaccess1_fast32", funcTag, 80},
- {"mapaccess1_fast64", funcTag, 81},
- {"mapaccess1_faststr", funcTag, 82},
- {"mapaccess1_fat", funcTag, 83},
- {"mapaccess2", funcTag, 84},
- {"mapaccess2_fast32", funcTag, 85},
- {"mapaccess2_fast64", funcTag, 86},
- {"mapaccess2_faststr", funcTag, 87},
- {"mapaccess2_fat", funcTag, 88},
- {"mapassign", funcTag, 79},
- {"mapassign_fast32", funcTag, 80},
- {"mapassign_fast32ptr", funcTag, 89},
- {"mapassign_fast64", funcTag, 81},
- {"mapassign_fast64ptr", funcTag, 89},
- {"mapassign_faststr", funcTag, 82},
- {"mapiterinit", funcTag, 90},
- {"mapdelete", funcTag, 90},
- {"mapdelete_fast32", funcTag, 91},
- {"mapdelete_fast64", funcTag, 92},
- {"mapdelete_faststr", funcTag, 93},
- {"mapiternext", funcTag, 94},
- {"mapclear", funcTag, 95},
- {"makechan64", funcTag, 97},
- {"makechan", funcTag, 98},
- {"chanrecv1", funcTag, 100},
- {"chanrecv2", funcTag, 101},
- {"chansend1", funcTag, 103},
- {"closechan", funcTag, 104},
- {"chanlen", funcTag, 105},
- {"chancap", funcTag, 105},
- {"writeBarrier", varTag, 107},
- {"typedmemmove", funcTag, 108},
- {"typedmemclr", funcTag, 109},
- {"typedslicecopy", funcTag, 110},
- {"selectnbsend", funcTag, 111},
- {"selectnbrecv", funcTag, 112},
- {"selectsetpc", funcTag, 113},
- {"selectgo", funcTag, 114},
+ {"panicrangestate", funcTag, 73},
+ {"deferrangefunc", funcTag, 74},
+ {"rand32", funcTag, 75},
+ {"makemap64", funcTag, 77},
+ {"makemap", funcTag, 78},
+ {"makemap_small", funcTag, 79},
+ {"mapaccess1", funcTag, 80},
+ {"mapaccess1_fast32", funcTag, 81},
+ {"mapaccess1_fast64", funcTag, 82},
+ {"mapaccess1_faststr", funcTag, 83},
+ {"mapaccess1_fat", funcTag, 84},
+ {"mapaccess2", funcTag, 85},
+ {"mapaccess2_fast32", funcTag, 86},
+ {"mapaccess2_fast64", funcTag, 87},
+ {"mapaccess2_faststr", funcTag, 88},
+ {"mapaccess2_fat", funcTag, 89},
+ {"mapassign", funcTag, 80},
+ {"mapassign_fast32", funcTag, 81},
+ {"mapassign_fast32ptr", funcTag, 90},
+ {"mapassign_fast64", funcTag, 82},
+ {"mapassign_fast64ptr", funcTag, 90},
+ {"mapassign_faststr", funcTag, 83},
+ {"mapiterinit", funcTag, 91},
+ {"mapdelete", funcTag, 91},
+ {"mapdelete_fast32", funcTag, 92},
+ {"mapdelete_fast64", funcTag, 93},
+ {"mapdelete_faststr", funcTag, 94},
+ {"mapiternext", funcTag, 95},
+ {"mapclear", funcTag, 96},
+ {"makechan64", funcTag, 98},
+ {"makechan", funcTag, 99},
+ {"chanrecv1", funcTag, 101},
+ {"chanrecv2", funcTag, 102},
+ {"chansend1", funcTag, 104},
+ {"closechan", funcTag, 105},
+ {"chanlen", funcTag, 106},
+ {"chancap", funcTag, 106},
+ {"writeBarrier", varTag, 108},
+ {"typedmemmove", funcTag, 109},
+ {"typedmemclr", funcTag, 110},
+ {"typedslicecopy", funcTag, 111},
+ {"selectnbsend", funcTag, 112},
+ {"selectnbrecv", funcTag, 113},
+ {"selectsetpc", funcTag, 114},
+ {"selectgo", funcTag, 115},
{"block", funcTag, 9},
- {"makeslice", funcTag, 115},
- {"makeslice64", funcTag, 116},
- {"makeslicecopy", funcTag, 117},
- {"growslice", funcTag, 119},
- {"unsafeslicecheckptr", funcTag, 120},
+ {"makeslice", funcTag, 116},
+ {"makeslice64", funcTag, 117},
+ {"makeslicecopy", funcTag, 118},
+ {"growslice", funcTag, 120},
+ {"unsafeslicecheckptr", funcTag, 121},
{"panicunsafeslicelen", funcTag, 9},
{"panicunsafeslicenilptr", funcTag, 9},
- {"unsafestringcheckptr", funcTag, 121},
+ {"unsafestringcheckptr", funcTag, 122},
{"panicunsafestringlen", funcTag, 9},
{"panicunsafestringnilptr", funcTag, 9},
- {"memmove", funcTag, 122},
- {"memclrNoHeapPointers", funcTag, 123},
- {"memclrHasPointers", funcTag, 123},
- {"memequal", funcTag, 124},
- {"memequal0", funcTag, 125},
- {"memequal8", funcTag, 125},
- {"memequal16", funcTag, 125},
- {"memequal32", funcTag, 125},
- {"memequal64", funcTag, 125},
- {"memequal128", funcTag, 125},
- {"f32equal", funcTag, 126},
- {"f64equal", funcTag, 126},
- {"c64equal", funcTag, 126},
- {"c128equal", funcTag, 126},
- {"strequal", funcTag, 126},
- {"interequal", funcTag, 126},
- {"nilinterequal", funcTag, 126},
- {"memhash", funcTag, 127},
- {"memhash0", funcTag, 128},
- {"memhash8", funcTag, 128},
- {"memhash16", funcTag, 128},
- {"memhash32", funcTag, 128},
- {"memhash64", funcTag, 128},
- {"memhash128", funcTag, 128},
- {"f32hash", funcTag, 129},
- {"f64hash", funcTag, 129},
- {"c64hash", funcTag, 129},
- {"c128hash", funcTag, 129},
- {"strhash", funcTag, 129},
- {"interhash", funcTag, 129},
- {"nilinterhash", funcTag, 129},
- {"int64div", funcTag, 130},
- {"uint64div", funcTag, 131},
- {"int64mod", funcTag, 130},
- {"uint64mod", funcTag, 131},
- {"float64toint64", funcTag, 132},
- {"float64touint64", funcTag, 133},
- {"float64touint32", funcTag, 134},
- {"int64tofloat64", funcTag, 135},
- {"int64tofloat32", funcTag, 137},
- {"uint64tofloat64", funcTag, 138},
- {"uint64tofloat32", funcTag, 139},
- {"uint32tofloat64", funcTag, 140},
- {"complex128div", funcTag, 141},
- {"getcallerpc", funcTag, 142},
- {"getcallersp", funcTag, 142},
+ {"memmove", funcTag, 123},
+ {"memclrNoHeapPointers", funcTag, 124},
+ {"memclrHasPointers", funcTag, 124},
+ {"memequal", funcTag, 125},
+ {"memequal0", funcTag, 126},
+ {"memequal8", funcTag, 126},
+ {"memequal16", funcTag, 126},
+ {"memequal32", funcTag, 126},
+ {"memequal64", funcTag, 126},
+ {"memequal128", funcTag, 126},
+ {"f32equal", funcTag, 127},
+ {"f64equal", funcTag, 127},
+ {"c64equal", funcTag, 127},
+ {"c128equal", funcTag, 127},
+ {"strequal", funcTag, 127},
+ {"interequal", funcTag, 127},
+ {"nilinterequal", funcTag, 127},
+ {"memhash", funcTag, 128},
+ {"memhash0", funcTag, 129},
+ {"memhash8", funcTag, 129},
+ {"memhash16", funcTag, 129},
+ {"memhash32", funcTag, 129},
+ {"memhash64", funcTag, 129},
+ {"memhash128", funcTag, 129},
+ {"f32hash", funcTag, 130},
+ {"f64hash", funcTag, 130},
+ {"c64hash", funcTag, 130},
+ {"c128hash", funcTag, 130},
+ {"strhash", funcTag, 130},
+ {"interhash", funcTag, 130},
+ {"nilinterhash", funcTag, 130},
+ {"int64div", funcTag, 131},
+ {"uint64div", funcTag, 132},
+ {"int64mod", funcTag, 131},
+ {"uint64mod", funcTag, 132},
+ {"float64toint64", funcTag, 133},
+ {"float64touint64", funcTag, 134},
+ {"float64touint32", funcTag, 135},
+ {"int64tofloat64", funcTag, 136},
+ {"int64tofloat32", funcTag, 138},
+ {"uint64tofloat64", funcTag, 139},
+ {"uint64tofloat32", funcTag, 140},
+ {"uint32tofloat64", funcTag, 141},
+ {"complex128div", funcTag, 142},
+ {"getcallerpc", funcTag, 143},
+ {"getcallersp", funcTag, 143},
{"racefuncenter", funcTag, 31},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
- {"racereadrange", funcTag, 143},
- {"racewriterange", funcTag, 143},
- {"msanread", funcTag, 143},
- {"msanwrite", funcTag, 143},
- {"msanmove", funcTag, 144},
- {"asanread", funcTag, 143},
- {"asanwrite", funcTag, 143},
- {"checkptrAlignment", funcTag, 145},
- {"checkptrArithmetic", funcTag, 147},
- {"libfuzzerTraceCmp1", funcTag, 148},
- {"libfuzzerTraceCmp2", funcTag, 149},
- {"libfuzzerTraceCmp4", funcTag, 150},
- {"libfuzzerTraceCmp8", funcTag, 151},
- {"libfuzzerTraceConstCmp1", funcTag, 148},
- {"libfuzzerTraceConstCmp2", funcTag, 149},
- {"libfuzzerTraceConstCmp4", funcTag, 150},
- {"libfuzzerTraceConstCmp8", funcTag, 151},
- {"libfuzzerHookStrCmp", funcTag, 152},
- {"libfuzzerHookEqualFold", funcTag, 152},
- {"addCovMeta", funcTag, 154},
+ {"racereadrange", funcTag, 144},
+ {"racewriterange", funcTag, 144},
+ {"msanread", funcTag, 144},
+ {"msanwrite", funcTag, 144},
+ {"msanmove", funcTag, 145},
+ {"asanread", funcTag, 144},
+ {"asanwrite", funcTag, 144},
+ {"checkptrAlignment", funcTag, 146},
+ {"checkptrArithmetic", funcTag, 148},
+ {"libfuzzerTraceCmp1", funcTag, 149},
+ {"libfuzzerTraceCmp2", funcTag, 150},
+ {"libfuzzerTraceCmp4", funcTag, 151},
+ {"libfuzzerTraceCmp8", funcTag, 152},
+ {"libfuzzerTraceConstCmp1", funcTag, 149},
+ {"libfuzzerTraceConstCmp2", funcTag, 150},
+ {"libfuzzerTraceConstCmp4", funcTag, 151},
+ {"libfuzzerTraceConstCmp8", funcTag, 152},
+ {"libfuzzerHookStrCmp", funcTag, 153},
+ {"libfuzzerHookEqualFold", funcTag, 153},
+ {"addCovMeta", funcTag, 155},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
{"armHasVFPv4", varTag, 6},
{"arm64HasATOMICS", varTag, 6},
- {"asanregisterglobals", funcTag, 123},
+ {"asanregisterglobals", funcTag, 124},
}
func runtimeTypes() []*types.Type {
- var typs [155]*types.Type
+ var typs [156]*types.Type
typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
@@ -311,88 +311,89 @@ func runtimeTypes() []*types.Type {
typs[70] = newSig(params(typs[1], typs[1]), params(typs[15], typs[1]))
typs[71] = types.NewPtr(typs[5])
typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6]))
- typs[73] = newSig(nil, params(typs[10]))
- typs[74] = newSig(nil, params(typs[60]))
- typs[75] = types.NewMap(typs[2], typs[2])
- typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75]))
- typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75]))
- typs[78] = newSig(nil, params(typs[75]))
- typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3]))
- typs[80] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3]))
- typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3]))
- typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3]))
- typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3]))
- typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6]))
- typs[85] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3], typs[6]))
- typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6]))
- typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6]))
- typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6]))
- typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3]))
- typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil)
- typs[91] = newSig(params(typs[1], typs[75], typs[60]), nil)
- typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil)
- typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil)
- typs[94] = newSig(params(typs[3]), nil)
- typs[95] = newSig(params(typs[1], typs[75]), nil)
- typs[96] = types.NewChan(typs[2], types.Cboth)
- typs[97] = newSig(params(typs[1], typs[22]), params(typs[96]))
- typs[98] = newSig(params(typs[1], typs[15]), params(typs[96]))
- typs[99] = types.NewChan(typs[2], types.Crecv)
- typs[100] = newSig(params(typs[99], typs[3]), nil)
- typs[101] = newSig(params(typs[99], typs[3]), params(typs[6]))
- typs[102] = types.NewChan(typs[2], types.Csend)
- typs[103] = newSig(params(typs[102], typs[3]), nil)
- typs[104] = newSig(params(typs[102]), nil)
- typs[105] = newSig(params(typs[2]), params(typs[15]))
- typs[106] = types.NewArray(typs[0], 3)
- typs[107] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[106]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
- typs[108] = newSig(params(typs[1], typs[3], typs[3]), nil)
- typs[109] = newSig(params(typs[1], typs[3]), nil)
- typs[110] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
- typs[111] = newSig(params(typs[102], typs[3]), params(typs[6]))
- typs[112] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6]))
- typs[113] = newSig(params(typs[71]), nil)
- typs[114] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
- typs[115] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
- typs[116] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
- typs[117] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
- typs[118] = types.NewSlice(typs[2])
- typs[119] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[118]))
- typs[120] = newSig(params(typs[1], typs[7], typs[22]), nil)
- typs[121] = newSig(params(typs[7], typs[22]), nil)
- typs[122] = newSig(params(typs[3], typs[3], typs[5]), nil)
- typs[123] = newSig(params(typs[7], typs[5]), nil)
- typs[124] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
- typs[125] = newSig(params(typs[3], typs[3]), params(typs[6]))
- typs[126] = newSig(params(typs[7], typs[7]), params(typs[6]))
- typs[127] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
- typs[128] = newSig(params(typs[7], typs[5]), params(typs[5]))
- typs[129] = newSig(params(typs[3], typs[5]), params(typs[5]))
- typs[130] = newSig(params(typs[22], typs[22]), params(typs[22]))
- typs[131] = newSig(params(typs[24], typs[24]), params(typs[24]))
- typs[132] = newSig(params(typs[20]), params(typs[22]))
- typs[133] = newSig(params(typs[20]), params(typs[24]))
- typs[134] = newSig(params(typs[20]), params(typs[60]))
- typs[135] = newSig(params(typs[22]), params(typs[20]))
- typs[136] = types.Types[types.TFLOAT32]
- typs[137] = newSig(params(typs[22]), params(typs[136]))
- typs[138] = newSig(params(typs[24]), params(typs[20]))
- typs[139] = newSig(params(typs[24]), params(typs[136]))
- typs[140] = newSig(params(typs[60]), params(typs[20]))
- typs[141] = newSig(params(typs[26], typs[26]), params(typs[26]))
- typs[142] = newSig(nil, params(typs[5]))
- typs[143] = newSig(params(typs[5], typs[5]), nil)
- typs[144] = newSig(params(typs[5], typs[5], typs[5]), nil)
- typs[145] = newSig(params(typs[7], typs[1], typs[5]), nil)
- typs[146] = types.NewSlice(typs[7])
- typs[147] = newSig(params(typs[7], typs[146]), nil)
- typs[148] = newSig(params(typs[64], typs[64], typs[17]), nil)
- typs[149] = newSig(params(typs[58], typs[58], typs[17]), nil)
- typs[150] = newSig(params(typs[60], typs[60], typs[17]), nil)
- typs[151] = newSig(params(typs[24], typs[24], typs[17]), nil)
- typs[152] = newSig(params(typs[28], typs[28], typs[17]), nil)
- typs[153] = types.NewArray(typs[0], 16)
- typs[154] = newSig(params(typs[7], typs[60], typs[153], typs[28], typs[15], typs[64], typs[64]), params(typs[60]))
+ typs[73] = newSig(params(typs[15]), nil)
+ typs[74] = newSig(nil, params(typs[10]))
+ typs[75] = newSig(nil, params(typs[60]))
+ typs[76] = types.NewMap(typs[2], typs[2])
+ typs[77] = newSig(params(typs[1], typs[22], typs[3]), params(typs[76]))
+ typs[78] = newSig(params(typs[1], typs[15], typs[3]), params(typs[76]))
+ typs[79] = newSig(nil, params(typs[76]))
+ typs[80] = newSig(params(typs[1], typs[76], typs[3]), params(typs[3]))
+ typs[81] = newSig(params(typs[1], typs[76], typs[60]), params(typs[3]))
+ typs[82] = newSig(params(typs[1], typs[76], typs[24]), params(typs[3]))
+ typs[83] = newSig(params(typs[1], typs[76], typs[28]), params(typs[3]))
+ typs[84] = newSig(params(typs[1], typs[76], typs[3], typs[1]), params(typs[3]))
+ typs[85] = newSig(params(typs[1], typs[76], typs[3]), params(typs[3], typs[6]))
+ typs[86] = newSig(params(typs[1], typs[76], typs[60]), params(typs[3], typs[6]))
+ typs[87] = newSig(params(typs[1], typs[76], typs[24]), params(typs[3], typs[6]))
+ typs[88] = newSig(params(typs[1], typs[76], typs[28]), params(typs[3], typs[6]))
+ typs[89] = newSig(params(typs[1], typs[76], typs[3], typs[1]), params(typs[3], typs[6]))
+ typs[90] = newSig(params(typs[1], typs[76], typs[7]), params(typs[3]))
+ typs[91] = newSig(params(typs[1], typs[76], typs[3]), nil)
+ typs[92] = newSig(params(typs[1], typs[76], typs[60]), nil)
+ typs[93] = newSig(params(typs[1], typs[76], typs[24]), nil)
+ typs[94] = newSig(params(typs[1], typs[76], typs[28]), nil)
+ typs[95] = newSig(params(typs[3]), nil)
+ typs[96] = newSig(params(typs[1], typs[76]), nil)
+ typs[97] = types.NewChan(typs[2], types.Cboth)
+ typs[98] = newSig(params(typs[1], typs[22]), params(typs[97]))
+ typs[99] = newSig(params(typs[1], typs[15]), params(typs[97]))
+ typs[100] = types.NewChan(typs[2], types.Crecv)
+ typs[101] = newSig(params(typs[100], typs[3]), nil)
+ typs[102] = newSig(params(typs[100], typs[3]), params(typs[6]))
+ typs[103] = types.NewChan(typs[2], types.Csend)
+ typs[104] = newSig(params(typs[103], typs[3]), nil)
+ typs[105] = newSig(params(typs[103]), nil)
+ typs[106] = newSig(params(typs[2]), params(typs[15]))
+ typs[107] = types.NewArray(typs[0], 3)
+ typs[108] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[107]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+ typs[109] = newSig(params(typs[1], typs[3], typs[3]), nil)
+ typs[110] = newSig(params(typs[1], typs[3]), nil)
+ typs[111] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
+ typs[112] = newSig(params(typs[103], typs[3]), params(typs[6]))
+ typs[113] = newSig(params(typs[3], typs[100]), params(typs[6], typs[6]))
+ typs[114] = newSig(params(typs[71]), nil)
+ typs[115] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
+ typs[116] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
+ typs[117] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
+ typs[118] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
+ typs[119] = types.NewSlice(typs[2])
+ typs[120] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[119]))
+ typs[121] = newSig(params(typs[1], typs[7], typs[22]), nil)
+ typs[122] = newSig(params(typs[7], typs[22]), nil)
+ typs[123] = newSig(params(typs[3], typs[3], typs[5]), nil)
+ typs[124] = newSig(params(typs[7], typs[5]), nil)
+ typs[125] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+ typs[126] = newSig(params(typs[3], typs[3]), params(typs[6]))
+ typs[127] = newSig(params(typs[7], typs[7]), params(typs[6]))
+ typs[128] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
+ typs[129] = newSig(params(typs[7], typs[5]), params(typs[5]))
+ typs[130] = newSig(params(typs[3], typs[5]), params(typs[5]))
+ typs[131] = newSig(params(typs[22], typs[22]), params(typs[22]))
+ typs[132] = newSig(params(typs[24], typs[24]), params(typs[24]))
+ typs[133] = newSig(params(typs[20]), params(typs[22]))
+ typs[134] = newSig(params(typs[20]), params(typs[24]))
+ typs[135] = newSig(params(typs[20]), params(typs[60]))
+ typs[136] = newSig(params(typs[22]), params(typs[20]))
+ typs[137] = types.Types[types.TFLOAT32]
+ typs[138] = newSig(params(typs[22]), params(typs[137]))
+ typs[139] = newSig(params(typs[24]), params(typs[20]))
+ typs[140] = newSig(params(typs[24]), params(typs[137]))
+ typs[141] = newSig(params(typs[60]), params(typs[20]))
+ typs[142] = newSig(params(typs[26], typs[26]), params(typs[26]))
+ typs[143] = newSig(nil, params(typs[5]))
+ typs[144] = newSig(params(typs[5], typs[5]), nil)
+ typs[145] = newSig(params(typs[5], typs[5], typs[5]), nil)
+ typs[146] = newSig(params(typs[7], typs[1], typs[5]), nil)
+ typs[147] = types.NewSlice(typs[7])
+ typs[148] = newSig(params(typs[7], typs[147]), nil)
+ typs[149] = newSig(params(typs[64], typs[64], typs[17]), nil)
+ typs[150] = newSig(params(typs[58], typs[58], typs[17]), nil)
+ typs[151] = newSig(params(typs[60], typs[60], typs[17]), nil)
+ typs[152] = newSig(params(typs[24], typs[24], typs[17]), nil)
+ typs[153] = newSig(params(typs[28], typs[28], typs[17]), nil)
+ typs[154] = types.NewArray(typs[0], 16)
+ typs[155] = newSig(params(typs[7], typs[60], typs[154], typs[28], typs[15], typs[64], typs[64]), params(typs[60]))
return typs[:]
}
diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go
index 9b7a13f81e..5148d5db03 100644
--- a/src/cmd/compile/internal/types2/alias.go
+++ b/src/cmd/compile/internal/types2/alias.go
@@ -4,7 +4,10 @@
package types2
-import "fmt"
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
// An Alias represents an alias type.
// Whether or not Alias types are created is controlled by the
@@ -14,7 +17,9 @@ import "fmt"
// which points directly to the actual (aliased) type.
type Alias struct {
obj *TypeName // corresponding declared alias object
+ orig *Alias // original, uninstantiated alias
tparams *TypeParamList // type parameters, or nil
+ targs *TypeList // type arguments, or nil
fromRHS Type // RHS of type alias declaration; may be an alias
actual Type // actual (aliased) type; never an alias
}
@@ -28,9 +33,37 @@ func NewAlias(obj *TypeName, rhs Type) *Alias {
return alias
}
-func (a *Alias) Obj() *TypeName { return a.obj }
+// Obj returns the type name for the declaration defining the alias type a.
+// For instantiated types, this is same as the type name of the origin type.
+func (a *Alias) Obj() *TypeName { return a.orig.obj }
+
+func (a *Alias) String() string { return TypeString(a, nil) }
+
+// Underlying returns the [underlying type] of the alias type a, which is the
+// underlying type of the aliased type. Underlying types are never Named,
+// TypeParam, or Alias types.
+//
+// [underlying type]: https://go.dev/ref/spec#Underlying_types.
func (a *Alias) Underlying() Type { return unalias(a).Underlying() }
-func (a *Alias) String() string { return TypeString(a, nil) }
+
+// Origin returns the generic Alias type of which a is an instance.
+// If a is not an instance of a generic alias, Origin returns a.
+func (a *Alias) Origin() *Alias { return a.orig }
+
+// TypeParams returns the type parameters of the alias type a, or nil.
+// A generic Alias and its instances have the same type parameters.
+func (a *Alias) TypeParams() *TypeParamList { return a.tparams }
+
+// SetTypeParams sets the type parameters of the alias type a.
+// The alias a must not have type arguments.
+func (a *Alias) SetTypeParams(tparams []*TypeParam) {
+ assert(a.targs == nil)
+ a.tparams = bindTParams(tparams)
+}
+
+// TypeArgs returns the type arguments used to instantiate the Alias type.
+// If a is not an instance of a generic alias, the result is nil.
+func (a *Alias) TypeArgs() *TypeList { return a.targs }
// Rhs returns the type R on the right-hand side of an alias
// declaration "type A = R", which may be another alias.
@@ -82,7 +115,10 @@ func asNamed(t Type) *Named {
// rhs must not be nil.
func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
assert(rhs != nil)
- a := &Alias{obj, nil, rhs, nil}
+ a := new(Alias)
+ a.obj = obj
+ a.orig = a
+ a.fromRHS = rhs
if obj.typ == nil {
obj.typ = a
}
@@ -95,6 +131,20 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
return a
}
+// newAliasInstance creates a new alias instance for the given origin and type
+// arguments, recording pos as the position of its synthetic object (for error
+// reporting).
+func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias {
+ assert(len(targs) > 0)
+ obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
+ rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt)
+ res := check.newAlias(obj, rhs)
+ res.orig = orig
+ res.tparams = orig.tparams
+ res.targs = newTypeList(targs)
+ return res
+}
+
func (a *Alias) cleanup() {
// Ensure a.actual is set before types are published,
// so Unalias is a pure "getter", not a "setter".
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 029d105e2e..b9ec874d45 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package types declares the data types and implements
+// Package types2 declares the data types and implements
// the algorithms for type-checking of Go packages. Use
// Config.Check to invoke the type checker for a package.
// Alternatively, create a new type checker with NewChecker
@@ -176,9 +176,13 @@ type Config struct {
// exactly one "%s" format, e.g. "[go.dev/e/%s]".
ErrorURL string
- // If EnableAlias is set, alias declarations produce an Alias type.
- // Otherwise the alias information is only in the type name, which
- // points directly to the actual (aliased) type.
+ // If EnableAlias is set, alias declarations produce an Alias type. Otherwise
+ // the alias information is only in the type name, which points directly to
+ // the actual (aliased) type.
+ //
+ // This setting must not differ among concurrent type-checking operations,
+ // since it affects the behavior of Universe.Lookup("any").
+ //
// This flag will eventually be removed (with Go 1.24 at the earliest).
EnableAlias bool
}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index cf3c105f6c..5126ac5111 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -1867,7 +1867,10 @@ func sameSlice(a, b []int) bool {
// the correct result at various positions within the source.
func TestScopeLookupParent(t *testing.T) {
imports := make(testImporter)
- conf := Config{Importer: imports}
+ conf := Config{
+ Importer: imports,
+ EnableAlias: true, // must match default Universe.Lookup behavior
+ }
var info Info
makePkg := func(path, src string) {
var err error
@@ -3022,3 +3025,25 @@ type C = int
t.Errorf("A.Rhs = %s, want %s", got, want)
}
}
+
+// Test the hijacking described of "any" described in golang/go#66921, for
+// (concurrent) type checking.
+func TestAnyHijacking_Check(t *testing.T) {
+ for _, enableAlias := range []bool{false, true} {
+ t.Run(fmt.Sprintf("EnableAlias=%t", enableAlias), func(t *testing.T) {
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ pkg := mustTypecheck("package p; var x any", &Config{EnableAlias: enableAlias}, nil)
+ x := pkg.Scope().Lookup("x")
+ if _, gotAlias := x.Type().(*Alias); gotAlias != enableAlias {
+ t.Errorf(`Lookup("x").Type() is %T: got Alias: %t, want %t`, x.Type(), gotAlias, enableAlias)
+ }
+ }()
+ }
+ wg.Wait()
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index b897a55212..808d39fd24 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -25,7 +25,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if hasDots(call) && id != _Append {
check.errorf(dddErrPos(call),
InvalidDotDotDot,
- invalidOp+"invalid use of ... with built-in %s", quote(bin.name))
+ invalidOp+"invalid use of ... with built-in %s", bin.name)
check.use(argList...)
return
}
@@ -210,7 +210,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if id == _Len {
code = InvalidLen
}
- check.errorf(x, code, invalidArg+"%s for %s", x, quote(bin.name))
+ check.errorf(x, code, invalidArg+"%s for built-in %s", x, bin.name)
}
return
}
@@ -533,7 +533,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Max, _Min:
// max(x, ...)
// min(x, ...)
- check.verifyVersionf(call.Fun, go1_21, quote(bin.name))
+ check.verifyVersionf(call.Fun, go1_21, "built-in %s", bin.name)
op := token.LSS
if id == _Max {
@@ -576,7 +576,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if x.mode != constant_ {
x.mode = value
// A value must not be untyped.
- check.assignment(x, &emptyInterface, "argument to "+quote(bin.name))
+ check.assignment(x, &emptyInterface, "argument to built-in "+bin.name)
if x.mode == invalid {
return
}
@@ -641,7 +641,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if nargs > 0 {
params = make([]Type, nargs)
for i, a := range args {
- check.assignment(a, nil, "argument to "+quote(predeclaredFuncs[id].name))
+ check.assignment(a, nil, "argument to built-in"+predeclaredFuncs[id].name)
if a.mode == invalid {
return
}
@@ -960,7 +960,7 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
// applyTypeFunc returns nil.
// If x is not a type parameter, the result is f(x).
func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId) Type {
- if tp, _ := x.typ.(*TypeParam); tp != nil {
+ if tp, _ := Unalias(x.typ).(*TypeParam); tp != nil {
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
var terms []*Term
@@ -992,7 +992,7 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId)
default:
panic("unreachable")
}
- check.softErrorf(x, code, "%s not supported as argument to %s for go1.18 (see go.dev/issue/50937)", x, quote(predeclaredFuncs[id].name))
+ check.softErrorf(x, code, "%s not supported as argument to built-in %s for go1.18 (see go.dev/issue/50937)", x, predeclaredFuncs[id].name)
// Construct a suitable new type parameter for the result type.
// The type parameter is placed in the current package so export/import
@@ -1026,7 +1026,7 @@ func makeSig(res Type, args ...Type) *Signature {
// arrayPtrDeref returns A if typ is of the form *A and A is an array;
// otherwise it returns typ.
func arrayPtrDeref(typ Type) Type {
- if p, ok := typ.(*Pointer); ok {
+ if p, ok := Unalias(typ).(*Pointer); ok {
if a, _ := under(p.base).(*Array); a != nil {
return a
}
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index ca9772ff41..7df4e8250e 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -719,7 +719,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName
goto Error
}
if !exp.Exported() {
- check.errorf(e.Sel, UnexportedName, "%s not exported by package %s", quote(sel), quote(pkg.name))
+ check.errorf(e.Sel, UnexportedName, "name %s not exported by package %s", sel, pkg.name)
// ok to continue
}
}
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index ee7e2e8683..91ad474e9d 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -10,8 +10,8 @@ import (
"cmd/compile/internal/syntax"
"fmt"
"go/constant"
- "internal/godebug"
. "internal/types/errors"
+ "sync/atomic"
)
// nopos indicates an unknown position
@@ -20,11 +20,28 @@ var nopos syntax.Pos
// debugging/development support
const debug = false // leave on during development
-// gotypesalias controls the use of Alias types.
-// As of Apr 16 2024 they are used by default.
-// To disable their use, set GODEBUG to gotypesalias=0.
-// This GODEBUG flag will be removed in the near future (tentatively Go 1.24).
-var gotypesalias = godebug.New("gotypesalias")
+// _aliasAny changes the behavior of [Scope.Lookup] for "any" in the
+// [Universe] scope.
+//
+// This is necessary because while Alias creation is controlled by
+// [Config.EnableAlias], the representation of "any" is a global. In
+// [Scope.Lookup], we select this global representation based on the result of
+// [aliasAny], but as a result need to guard against this behavior changing
+// during the type checking pass. Therefore we implement the following rule:
+// any number of goroutines can type check concurrently with the same
+// EnableAlias value, but if any goroutine tries to type check concurrently
+// with a different EnableAlias value, we panic.
+//
+// To achieve this, _aliasAny is a state machine:
+//
+// 0: no type checking is occurring
+// negative: type checking is occurring without EnableAlias set
+// positive: type checking is occurring with EnableAlias set
+var _aliasAny int32
+
+func aliasAny() bool {
+ return atomic.LoadInt32(&_aliasAny) >= 0 // default true
+}
// exprInfo stores information about an untyped expression.
type exprInfo struct {
@@ -293,7 +310,7 @@ func (check *Checker) initFiles(files []*syntax.File) {
check.files = append(check.files, file)
default:
- check.errorf(file, MismatchedPkgName, "package %s; expected %s", quote(name), quote(pkg.name))
+ check.errorf(file, MismatchedPkgName, "package %s; expected package %s", name, pkg.name)
// ignore this file
}
}
@@ -356,7 +373,7 @@ func (check *Checker) initFiles(files []*syntax.File) {
check.errorf(file.PkgName, TooNew, "file requires newer Go version %v", fileVersion)
}
}
- versions[base(file.Pos())] = v // base(file.Pos()) may be nil for tests
+ versions[file.Pos().FileBase()] = v // file.Pos().FileBase() may be nil for tests
}
}
@@ -397,6 +414,20 @@ func (check *Checker) Files(files []*syntax.File) (err error) {
// syntax is properly type annotated even in a package containing
// errors.
func (check *Checker) checkFiles(files []*syntax.File) {
+ // Ensure that EnableAlias is consistent among concurrent type checking
+ // operations. See the documentation of [_aliasAny] for details.
+ if check.conf.EnableAlias {
+ if atomic.AddInt32(&_aliasAny, 1) <= 0 {
+ panic("EnableAlias set while !EnableAlias type checking is ongoing")
+ }
+ defer atomic.AddInt32(&_aliasAny, -1)
+ } else {
+ if atomic.AddInt32(&_aliasAny, -1) >= 0 {
+ panic("!EnableAlias set while EnableAlias type checking is ongoing")
+ }
+ defer atomic.AddInt32(&_aliasAny, 1)
+ }
+
print := func(msg string) {
if check.conf.Trace {
fmt.Println()
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
index 63f831aa92..8b7b5316f0 100644
--- a/src/cmd/compile/internal/types2/check_test.go
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -247,17 +247,17 @@ func testFilesImpl(t *testing.T, filenames []string, srcs [][]byte, colDelta uin
panic("unreachable")
}
}
- pattern, err := strconv.Unquote(strings.TrimSpace(pattern))
+ unquoted, err := strconv.Unquote(strings.TrimSpace(pattern))
if err != nil {
- t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err)
+ t.Errorf("%s:%d:%d: invalid ERROR pattern (cannot unquote %s)", filename, line, want.Pos.Col(), pattern)
continue
}
if substr {
- if !strings.Contains(gotMsg, pattern) {
+ if !strings.Contains(gotMsg, unquoted) {
continue
}
} else {
- rx, err := regexp.Compile(pattern)
+ rx, err := regexp.Compile(unquoted)
if err != nil {
t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err)
continue
diff --git a/src/cmd/compile/internal/types2/compiler_internal.go b/src/cmd/compile/internal/types2/compiler_internal.go
new file mode 100644
index 0000000000..790a6779e4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/compiler_internal.go
@@ -0,0 +1,50 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
+
+// This file should not be copied to go/types. See go.dev/issue/67477
+
+// RenameResult takes an array of (result) fields and an index, and if the indexed field
+// does not have a name and if the result in the signature also does not have a name,
+// then the signature and field are renamed to
+//
+// fmt.Sprintf("#rv%d", i+1)`
+//
+// the newly named object is inserted into the signature's scope,
+// and the object and new field name are returned.
+//
+// The intended use for RenameResult is to allow rangefunc to assign results within a closure.
+// This is a hack, as narrowly targeted as possible to discourage abuse.
+func (s *Signature) RenameResult(results []*syntax.Field, i int) (*Var, *syntax.Name) {
+ a := results[i]
+ obj := s.Results().At(i)
+
+ if !(obj.name == "" || obj.name == "_" && a.Name == nil || a.Name.Value == "_") {
+ panic("Cannot change an existing name")
+ }
+
+ pos := a.Pos()
+ typ := a.Type.GetTypeInfo().Type
+
+ name := fmt.Sprintf("#rv%d", i+1)
+ obj.name = name
+ s.scope.Insert(obj)
+ obj.setScopePos(pos)
+
+ tv := syntax.TypeAndValue{Type: typ}
+ tv.SetIsValue()
+
+ n := syntax.NewName(pos, obj.Name())
+ n.SetTypeInfo(tv)
+
+ a.Name = n
+
+ return obj, n
+}
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index b8d8f6e150..43208c3d9b 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -56,7 +56,7 @@ func (check *Checker) conversion(x *operand, T Type) {
// If T's type set is empty, or if it doesn't
// have specific types, constant x cannot be
// converted.
- ok = T.(*TypeParam).underIs(func(u Type) bool {
+ ok = Unalias(T).(*TypeParam).underIs(func(u Type) bool {
// u is nil if there are no specific type terms
if u == nil {
cause = check.sprintf("%s does not contain specific types", T)
@@ -139,13 +139,16 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
return true
}
- // "V and T have identical underlying types if tags are ignored
- // and V and T are not type parameters"
- V := x.typ
+ origT := T
+ V := Unalias(x.typ)
+ T = Unalias(T)
Vu := under(V)
Tu := under(T)
Vp, _ := V.(*TypeParam)
Tp, _ := T.(*TypeParam)
+
+ // "V and T have identical underlying types if tags are ignored
+ // and V and T are not type parameters"
if IdenticalIgnoreTags(Vu, Tu) && Vp == nil && Tp == nil {
return true
}
@@ -267,7 +270,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
}
x.typ = V.typ
if !x.convertibleTo(check, T, cause) {
- errorf("cannot convert %s (in %s) to type %s", V.typ, Vp, T)
+ errorf("cannot convert %s (in %s) to type %s", V.typ, Vp, origT)
return false
}
return true
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 246568e25e..6a266de7fd 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -8,6 +8,7 @@ import (
"cmd/compile/internal/syntax"
"fmt"
"go/constant"
+ "internal/buildcfg"
. "internal/types/errors"
)
@@ -522,6 +523,10 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
// handle type parameters even if not allowed (Alias type is supported)
if tparam0 != nil {
+ if !versionErr && !buildcfg.Experiment.AliasTypeParams {
+ check.error(tdecl, UnsupportedFeature, "generic type alias requires GOEXPERIMENT=aliastypeparams")
+ versionErr = true
+ }
check.openScope(tdecl, "type parameters")
defer check.closeScope()
check.collectTypeParams(&alias.tparams, tdecl.TParamList)
@@ -738,7 +743,7 @@ func (check *Checker) checkFieldUniqueness(base *Named) {
// For historical consistency, we report the primary error on the
// method, and the alt decl on the field.
err := check.newError(DuplicateFieldAndMethod)
- err.addf(alt, "field and method with the same name %s", quote(fld.name))
+ err.addf(alt, "field and method with the same name %s", fld.name)
err.addAltDecl(fld)
err.report()
}
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index b2ff262762..92949a924d 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -131,6 +131,7 @@ var op2str2 = [...]string{
// If typ is a type parameter, underIs returns the result of typ.underIs(f).
// Otherwise, underIs returns the result of f(under(typ)).
func underIs(typ Type, f func(Type) bool) bool {
+ typ = Unalias(typ)
if tpar, _ := typ.(*TypeParam); tpar != nil {
return tpar.underIs(f)
}
@@ -1012,7 +1013,7 @@ func (check *Checker) nonGeneric(T *target, x *operand) {
}
var what string
switch t := x.typ.(type) {
- case *Named:
+ case *Alias, *Named:
if isGeneric(t) {
what = "type"
}
diff --git a/src/cmd/compile/internal/types2/format.go b/src/cmd/compile/internal/types2/format.go
index 1b9cf606b7..442d219d1a 100644
--- a/src/cmd/compile/internal/types2/format.go
+++ b/src/cmd/compile/internal/types2/format.go
@@ -14,39 +14,6 @@ import (
"strings"
)
-// quote encloses s in `' quotes, as in `foo', except for _,
-// which is left alone.
-//
-// Use to prevent confusion when user supplied names alter the
-// meaning of an error message.
-//
-// For instance, report
-//
-// duplicate method `wanted'
-//
-// rather than
-//
-// duplicate method wanted
-//
-// Exceptions:
-//
-// - don't quote _:
-// `_' is ugly and not necessary
-// - don't quote after a ":" as there's no need for it:
-// undefined name: foo
-// - don't quote if the name is used correctly in a statement:
-// goto L jumps over variable declaration
-//
-// quote encloses s in `' quotes, as in `foo',
-// except for _ which is left alone.
-func quote(s string) string {
- if s == "_" {
- // `_' is ugly and not necessary
- return s
- }
- return "`" + s + "'"
-}
-
func sprintf(qf Qualifier, tpSubscripts bool, format string, args ...any) string {
for i, arg := range args {
switch a := arg.(type) {
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 1cdc4e79a2..122ac9e04f 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -184,6 +184,10 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// Thus, for untyped arguments we only need to look at parameter types
// that are single type parameters.
// Also, untyped nils don't have a default type and can be ignored.
+ // Finally, it's not possible to have an alias type denoting a type
+ // parameter declared by the current function and use it in the same
+ // function signature; hence we don't need to Unalias before the
+ // .(*TypeParam) type assertion above.
untyped = append(untyped, i)
}
}
@@ -306,7 +310,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// maximum untyped type for each of those parameters, if possible.
var maxUntyped map[*TypeParam]Type // lazily allocated (we may not need it)
for _, index := range untyped {
- tpar := params.At(index).typ.(*TypeParam) // is type parameter by construction of untyped
+ tpar := params.At(index).typ.(*TypeParam) // is type parameter (no alias) by construction of untyped
if u.at(tpar) == nil {
arg := args[index] // arg corresponding to tpar
if maxUntyped == nil {
@@ -689,6 +693,7 @@ type cycleFinder struct {
}
func (w *cycleFinder) typ(typ Type) {
+ typ = Unalias(typ)
if w.seen[typ] {
// We have seen typ before. If it is one of the type parameters
// in w.tparams, iterative substitution will lead to infinite expansion.
@@ -710,8 +715,8 @@ func (w *cycleFinder) typ(typ Type) {
case *Basic:
// nothing to do
- case *Alias:
- w.typ(Unalias(t))
+ // *Alias:
+ // This case should not occur because of Unalias(typ) at the top.
case *Array:
w.typ(t.elem)
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index a25cb141ec..72227ab122 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -14,12 +14,20 @@ import (
. "internal/types/errors"
)
+// A genericType implements access to its type parameters.
+type genericType interface {
+ Type
+ TypeParams() *TypeParamList
+}
+
// Instantiate instantiates the type orig with the given type arguments targs.
-// orig must be a *Named or a *Signature type. If there is no error, the
-// resulting Type is an instantiated type of the same kind (either a *Named or
-// a *Signature). Methods attached to a *Named type are also instantiated, and
-// associated with a new *Func that has the same position as the original
-// method, but nil function scope.
+// orig must be an *Alias, *Named, or *Signature type. If there is no error,
+// the resulting Type is an instantiated type of the same kind (*Alias, *Named
+// or *Signature, respectively).
+//
+// Methods attached to a *Named type are also instantiated, and associated with
+// a new *Func that has the same position as the original method, but nil function
+// scope.
//
// If ctxt is non-nil, it may be used to de-duplicate the instance against
// previous instances with the same identity. As a special case, generic
@@ -29,10 +37,10 @@ import (
// not guarantee that identical instances are deduplicated in all cases.
//
// If validate is set, Instantiate verifies that the number of type arguments
-// and parameters match, and that the type arguments satisfy their
-// corresponding type constraints. If verification fails, the resulting error
-// may wrap an *ArgumentError indicating which type argument did not satisfy
-// its corresponding type parameter constraint, and why.
+// and parameters match, and that the type arguments satisfy their respective
+// type constraints. If verification fails, the resulting error may wrap an
+// *ArgumentError indicating which type argument did not satisfy its type parameter
+// constraint, and why.
//
// If validate is not set, Instantiate does not verify the type argument count
// or whether the type arguments satisfy their constraints. Instantiate is
@@ -41,17 +49,15 @@ import (
// count is incorrect; for *Named types, a panic may occur later inside the
// *Named API.
func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
+ assert(len(targs) > 0)
if ctxt == nil {
ctxt = NewContext()
}
+ orig_ := orig.(genericType) // signature of Instantiate must not change for backward-compatibility
+
if validate {
- var tparams []*TypeParam
- switch t := orig.(type) {
- case *Named:
- tparams = t.TypeParams().list()
- case *Signature:
- tparams = t.TypeParams().list()
- }
+ tparams := orig_.TypeParams().list()
+ assert(len(tparams) > 0)
if len(targs) != len(tparams) {
return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
}
@@ -60,7 +66,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e
}
}
- inst := (*Checker)(nil).instance(nopos, orig, targs, nil, ctxt)
+ inst := (*Checker)(nil).instance(nopos, orig_, targs, nil, ctxt)
return inst, nil
}
@@ -75,7 +81,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e
// must be non-nil.
//
// For Named types the resulting instance may be unexpanded.
-func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expanding *Named, ctxt *Context) (res Type) {
+func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, expanding *Named, ctxt *Context) (res Type) {
// The order of the contexts below matters: we always prefer instances in the
// expanding instance context in order to preserve reference cycles.
//
@@ -97,8 +103,9 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expandin
hashes[i] = ctxt.instanceHash(orig, targs)
}
- // If local is non-nil, updateContexts return the type recorded in
- // local.
+ // Record the result in all contexts.
+ // Prefer to re-use existing types from expanding context, if it exists, to reduce
+ // the memory pinned by the Named type.
updateContexts := func(res Type) Type {
for i := len(ctxts) - 1; i >= 0; i-- {
res = ctxts[i].update(hashes[i], orig, targs, res)
@@ -118,6 +125,21 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expandin
case *Named:
res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
+ case *Alias:
+ // TODO(gri) is this correct?
+ assert(expanding == nil) // Alias instances cannot be reached from Named types
+
+ tparams := orig.TypeParams()
+ // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here)
+ if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) {
+ return Typ[Invalid]
+ }
+ if tparams.Len() == 0 {
+ return orig // nothing to do (minor optimization)
+ }
+
+ return check.newAliasInstance(pos, orig, targs, ctxt)
+
case *Signature:
assert(expanding == nil) // function instances cannot be reached from Named types
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
index b087550b80..b4da3c0b91 100644
--- a/src/cmd/compile/internal/types2/issues_test.go
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -258,11 +258,11 @@ func TestIssue22525(t *testing.T) {
conf := Config{Error: func(err error) { got += err.Error() + "\n" }}
typecheck(src, &conf, nil) // do not crash
want := "\n" +
- "p:1:27: `a' declared and not used\n" +
- "p:1:30: `b' declared and not used\n" +
- "p:1:33: `c' declared and not used\n" +
- "p:1:36: `d' declared and not used\n" +
- "p:1:39: `e' declared and not used\n"
+ "p:1:27: declared and not used: a\n" +
+ "p:1:30: declared and not used: b\n" +
+ "p:1:33: declared and not used: c\n" +
+ "p:1:36: declared and not used: d\n" +
+ "p:1:39: declared and not used: e\n"
if got != want {
t.Errorf("got: %swant: %s", got, want)
}
@@ -600,7 +600,7 @@ var _ T = template /* ERRORx "cannot use.*text/template.* as T value" */.Templat
}
func TestIssue50646(t *testing.T) {
- anyType := Universe.Lookup("any").Type()
+ anyType := Universe.Lookup("any").Type().Underlying()
comparableType := Universe.Lookup("comparable").Type()
if !Comparable(anyType) {
diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go
index 61b3ca7511..548df7925b 100644
--- a/src/cmd/compile/internal/types2/labels.go
+++ b/src/cmd/compile/internal/types2/labels.go
@@ -26,13 +26,11 @@ func (check *Checker) labels(body *syntax.BlockStmt) {
name := jmp.Label.Value
if alt := all.Lookup(name); alt != nil {
msg = "goto %s jumps into block"
- alt.(*Label).used = true // avoid another error
code = JumpIntoBlock
- // don't quote name here because "goto L" matches the code
+ alt.(*Label).used = true // avoid another error
} else {
msg = "label %s not declared"
code = UndeclaredLabel
- name = quote(name)
}
check.errorf(jmp.Label, code, msg, name)
}
@@ -41,7 +39,7 @@ func (check *Checker) labels(body *syntax.BlockStmt) {
for name, obj := range all.elems {
obj = resolve(name, obj)
if lbl := obj.(*Label); !lbl.used {
- check.softErrorf(lbl.pos, UnusedLabel, "label %s declared and not used", quote(lbl.name))
+ check.softErrorf(lbl.pos, UnusedLabel, "label %s declared and not used", lbl.name)
}
}
}
@@ -137,7 +135,7 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.Lab
if alt := all.Insert(lbl); alt != nil {
err := check.newError(DuplicateLabel)
err.soft = true
- err.addf(lbl.pos, "label %s already declared", quote(name))
+ err.addf(lbl.pos, "label %s already declared", name)
err.addAltDecl(alt)
err.report()
// ok to continue
@@ -193,7 +191,7 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.Lab
}
}
if !valid {
- check.errorf(s.Label, MisplacedLabel, "invalid break label %s", quote(name))
+ check.errorf(s.Label, MisplacedLabel, "invalid break label %s", name)
return
}
@@ -208,7 +206,7 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.Lab
}
}
if !valid {
- check.errorf(s.Label, MisplacedLabel, "invalid continue label %s", quote(name))
+ check.errorf(s.Label, MisplacedLabel, "invalid continue label %s", name)
return
}
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index aa7ab00c33..1859b27aa4 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -485,9 +485,17 @@ func (t *Named) methodIndex(name string, foldCase bool) int {
return -1
}
-// TODO(gri) Investigate if Unalias can be moved to where underlying is set.
-func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) }
-func (t *Named) String() string { return TypeString(t, nil) }
+// Underlying returns the [underlying type] of the named type t, resolving all
+// forwarding declarations. Underlying types are never Named, TypeParam, or
+// Alias types.
+//
+// [underlying type]: https://go.dev/ref/spec#Underlying_types.
+func (t *Named) Underlying() Type {
+ // TODO(gri) Investigate if Unalias can be moved to where underlying is set.
+ return Unalias(t.resolve().underlying)
+}
+
+func (t *Named) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 3026777cad..f9a25473a1 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -577,7 +577,7 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
// Special handling for any: because WriteType will format 'any' as 'any',
// resulting in the object string `type any = any` rather than `type any =
// interface{}`. To avoid this, swap in a different empty interface.
- if obj == universeAny {
+ if obj.Name() == "any" && obj.Parent() == Universe {
assert(Identical(typ, &emptyInterface))
typ = &emptyInterface
}
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 15ec86fb5e..a176b9faf3 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -186,7 +186,7 @@ func operandString(x *operand, qf Qualifier) string {
}
buf.WriteString(intro)
WriteType(&buf, x.typ, qf)
- if tpar, _ := x.typ.(*TypeParam); tpar != nil {
+ if tpar, _ := Unalias(x.typ).(*TypeParam); tpar != nil {
buf.WriteString(" constrained by ")
WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here
// If we have the type set and it's empty, say so for better error messages.
@@ -260,7 +260,9 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod
return true, 0 // avoid spurious errors
}
- V := x.typ
+ origT := T
+ V := Unalias(x.typ)
+ T = Unalias(T)
// x's type is identical to T
if Identical(V, T) {
@@ -386,7 +388,7 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod
x.typ = V.typ
ok, code = x.assignableTo(check, T, cause)
if !ok {
- errorf("cannot assign %s (in %s) to %s", V.typ, Vp, T)
+ errorf("cannot assign %s (in %s) to %s", V.typ, Vp, origT)
return false
}
return true
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index 6d9e6ec760..6403be6bcb 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -137,6 +137,9 @@ func hasEmptyTypeset(t Type) bool {
// TODO(gri) should we include signatures or assert that they are not present?
func isGeneric(t Type) bool {
// A parameterized type is only generic if it doesn't have an instantiation already.
+ if alias, _ := t.(*Alias); alias != nil && alias.tparams != nil && alias.targs == nil {
+ return true
+ }
named := asNamed(t)
return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
}
@@ -518,7 +521,9 @@ func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool
// it returns the incoming type for all other types. The default type
// for untyped nil is untyped nil.
func Default(t Type) Type {
- if t, ok := Unalias(t).(*Basic); ok {
+ // Alias and named types cannot denote untyped types
+ // so there's no need to call Unalias or under, below.
+ if t, _ := t.(*Basic); t != nil {
switch t.kind {
case UntypedBool:
return Typ[Bool]
diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go
index b75e5cbaf7..f5ad25e81e 100644
--- a/src/cmd/compile/internal/types2/scope.go
+++ b/src/cmd/compile/internal/types2/scope.go
@@ -68,7 +68,19 @@ func (s *Scope) Child(i int) *Scope { return s.children[i] }
// Lookup returns the object in scope s with the given name if such an
// object exists; otherwise the result is nil.
func (s *Scope) Lookup(name string) Object {
- return resolve(name, s.elems[name])
+ obj := resolve(name, s.elems[name])
+ // Hijack Lookup for "any": with gotypesalias=1, we want the Universe to
+ // return an Alias for "any", and with gotypesalias=0 we want to return
+ // the legacy representation of aliases.
+ //
+ // This is rather tricky, but works out after auditing of the usage of
+ // s.elems. The only external API to access scope elements is Lookup.
+ //
+ // TODO: remove this once gotypesalias=0 is no longer supported.
+ if obj == universeAnyAlias && !aliasAny() {
+ return universeAnyNoAlias
+ }
+ return obj
}
// LookupParent follows the parent chain of scopes starting with s until
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index bb4d32b016..7a5a2c155f 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -73,9 +73,6 @@ func (s *Signature) Recv() *Var { return s.recv }
// TypeParams returns the type parameters of signature s, or nil.
func (s *Signature) TypeParams() *TypeParamList { return s.tparams }
-// SetTypeParams sets the type parameters of signature s.
-func (s *Signature) SetTypeParams(tparams []*TypeParam) { s.tparams = bindTParams(tparams) }
-
// RecvTypeParams returns the receiver type parameters of signature s, or nil.
func (s *Signature) RecvTypeParams() *TypeParamList { return s.rparams }
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
index 405af78572..ed79b92c46 100644
--- a/src/cmd/compile/internal/types2/stdlib_test.go
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -396,7 +396,8 @@ func typecheckFiles(path string, filenames []string, importer Importer) (*Packag
Error: func(err error) {
errs = append(errs, err)
},
- Importer: importer,
+ Importer: importer,
+ EnableAlias: true,
}
info := Info{Uses: make(map[*syntax.Name]Object)}
pkg, _ := conf.Check(path, files, &info)
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index 7fd7009e13..656f0e2eb2 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -64,7 +64,7 @@ func (check *Checker) usage(scope *Scope) {
return cmpPos(unused[i].pos, unused[j].pos) < 0
})
for _, v := range unused {
- check.softErrorf(v.pos, UnusedVar, "%s declared and not used", quote(v.name))
+ check.softErrorf(v.pos, UnusedVar, "declared and not used: %s", v.name)
}
for _, scope := range scope.children {
@@ -496,7 +496,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
for _, obj := range res.vars {
if alt := check.lookup(obj.name); alt != nil && alt != obj {
err := check.newError(OutOfScopeResult)
- err.addf(s, "result parameter %s not in scope at return", quote(obj.name))
+ err.addf(s, "result parameter %s not in scope at return", obj.name)
err.addf(alt, "inner declaration of %s", obj)
err.report()
// ok to continue
@@ -898,7 +898,7 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil
rhs := [2]Type{key, val} // key, val may be nil
- constIntRange := x.mode == constant_ && isInteger(x.typ)
+ rangeOverInt := isInteger(x.typ)
if isDef {
// short variable declaration
@@ -923,19 +923,27 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
check.errorf(lhs, InvalidSyntaxTree, "cannot declare %s", lhs)
obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
}
+ assert(obj.typ == nil)
- // initialize lhs variable
- if constIntRange {
- check.initVar(obj, &x, "range clause")
- } else if typ := rhs[i]; typ != nil {
- x.mode = value
- x.expr = lhs // we don't have a better rhs expression to use here
- x.typ = typ
- check.initVar(obj, &x, "assignment") // error is on variable, use "assignment" not "range clause"
- } else {
+ // initialize lhs iteration variable, if any
+ typ := rhs[i]
+ if typ == nil {
obj.typ = Typ[Invalid]
obj.used = true // don't complain about unused variable
+ continue
+ }
+
+ if rangeOverInt {
+ assert(i == 0) // at most one iteration variable (rhs[1] == nil for rangeOverInt)
+ check.initVar(obj, &x, "range clause")
+ } else {
+ var y operand
+ y.mode = value
+ y.expr = lhs // we don't have a better rhs expression to use here
+ y.typ = typ
+ check.initVar(obj, &y, "assignment") // error is on variable, use "assignment" not "range clause"
}
+ assert(obj.typ != nil)
}
// declare variables
@@ -954,21 +962,36 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
continue
}
- if constIntRange {
+ // assign to lhs iteration variable, if any
+ typ := rhs[i]
+ if typ == nil {
+ continue
+ }
+
+ if rangeOverInt {
+ assert(i == 0) // at most one iteration variable (rhs[1] == nil for rangeOverInt)
check.assignVar(lhs, nil, &x, "range clause")
- } else if typ := rhs[i]; typ != nil {
- x.mode = value
- x.expr = lhs // we don't have a better rhs expression to use here
- x.typ = typ
- check.assignVar(lhs, nil, &x, "assignment") // error is on variable, use "assignment" not "range clause"
+ // If the assignment succeeded, if x was untyped before, it now
+ // has a type inferred via the assignment. It must be an integer.
+ // (go.dev/issues/67027)
+ if x.mode != invalid && !isInteger(x.typ) {
+ check.softErrorf(lhs, InvalidRangeExpr, "cannot use iteration variable of type %s", x.typ)
+ }
+ } else {
+ var y operand
+ y.mode = value
+ y.expr = lhs // we don't have a better rhs expression to use here
+ y.typ = typ
+ check.assignVar(lhs, nil, &y, "assignment") // error is on variable, use "assignment" not "range clause"
}
}
- } else if constIntRange {
+ } else if rangeOverInt {
// If we don't have any iteration variables, we still need to
// check that a (possibly untyped) integer range expression x
// is valid.
// We do this by checking the assignment _ = x. This ensures
- // that an untyped x can be converted to a value of type int.
+ // that an untyped x can be converted to a value of its default
+ // type (rune or int).
check.assignment(&x, nil, "range clause")
}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index 215d1f2d4f..650ae846a6 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -96,15 +96,26 @@ func (subst *subster) typ(typ Type) Type {
// nothing to do
case *Alias:
- rhs := subst.typ(t.fromRHS)
- if rhs != t.fromRHS {
- // This branch cannot be reached because the RHS of an alias
- // may only contain type parameters of an enclosing function.
- // Such function bodies are never "instantiated" and thus
- // substitution is not called on locally declared alias types.
- // TODO(gri) adjust once parameterized aliases are supported
- panic("unreachable for unparameterized aliases")
- // return subst.check.newAlias(t.obj, rhs)
+ // This code follows the code for *Named types closely.
+ // TODO(gri) try to factor better
+ orig := t.Origin()
+ n := orig.TypeParams().Len()
+ if n == 0 {
+ return t // type is not parameterized
+ }
+
+ // TODO(gri) do we need this for Alias types?
+ if t.TypeArgs().Len() != n {
+ return Typ[Invalid] // error reported elsewhere
+ }
+
+ // already instantiated
+ // For each (existing) type argument determine if it needs
+ // to be substituted; i.e., if it is or contains a type parameter
+ // that has a type argument for it.
+ targs, updated := subst.typeList(t.TypeArgs().list())
+ if updated {
+ return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt)
}
case *Array:
@@ -221,18 +232,6 @@ func (subst *subster) typ(typ Type) Type {
}
case *Named:
- // dump is for debugging
- dump := func(string, ...interface{}) {}
- if subst.check != nil && subst.check.conf.Trace {
- subst.check.indent++
- defer func() {
- subst.check.indent--
- }()
- dump = func(format string, args ...interface{}) {
- subst.check.trace(subst.pos, format, args...)
- }
- }
-
// subst is called during expansion, so in this function we need to be
// careful not to call any methods that would cause t to be expanded: doing
// so would result in deadlock.
@@ -241,44 +240,26 @@ func (subst *subster) typ(typ Type) Type {
orig := t.Origin()
n := orig.TypeParams().Len()
if n == 0 {
- dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
- var newTArgs []Type
if t.TypeArgs().Len() != n {
return Typ[Invalid] // error reported elsewhere
}
// already instantiated
- dump(">>> %s already instantiated", t)
- // For each (existing) type argument targ, determine if it needs
+ // For each (existing) type argument determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
- for i, targ := range t.TypeArgs().list() {
- dump(">>> %d targ = %s", i, targ)
- new_targ := subst.typ(targ)
- if new_targ != targ {
- dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
- if newTArgs == nil {
- newTArgs = make([]Type, n)
- copy(newTArgs, t.TypeArgs().list())
- }
- newTArgs[i] = new_targ
- }
+ targs, updated := subst.typeList(t.TypeArgs().list())
+ if updated {
+ // Create a new instance and populate the context to avoid endless
+ // recursion. The position used here is irrelevant because validation only
+ // occurs on t (we don't call validType on named), but we use subst.pos to
+ // help with debugging.
+ return subst.check.instance(subst.pos, orig, targs, subst.expanding, subst.ctxt)
}
- if newTArgs == nil {
- dump(">>> nothing to substitute in %s", t)
- return t // nothing to substitute
- }
-
- // Create a new instance and populate the context to avoid endless
- // recursion. The position used here is irrelevant because validation only
- // occurs on t (we don't call validType on named), but we use subst.pos to
- // help with debugging.
- return subst.check.instance(subst.pos, orig, newTArgs, subst.expanding, subst.ctxt)
-
case *TypeParam:
return subst.smap.lookup(t)
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
index 5c6030b3fb..9ad064906f 100644
--- a/src/cmd/compile/internal/types2/typeparam.go
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -86,6 +86,10 @@ func (t *TypeParam) SetConstraint(bound Type) {
t.iface()
}
+// Underlying returns the [underlying type] of the type parameter t, which is
+// the underlying type of its constraint. This type is always an interface.
+//
+// [underlying type]: https://go.dev/ref/spec#Underlying_types.
func (t *TypeParam) Underlying() Type {
return t.iface()
}
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
index 2ce586e7a7..0457502e39 100644
--- a/src/cmd/compile/internal/types2/typeset.go
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -226,8 +226,8 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
case explicit:
if check != nil {
err := check.newError(DuplicateDecl)
- err.addf(atPos(pos), "duplicate method %s", quote(m.name))
- err.addf(atPos(mpos[other.(*Func)]), "other declaration of %s", quote(m.name))
+ err.addf(atPos(pos), "duplicate method %s", m.name)
+ err.addf(atPos(mpos[other.(*Func)]), "other declaration of method %s", m.name)
err.report()
}
default:
@@ -240,8 +240,8 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
check.later(func() {
if pos.IsKnown() && !check.allowVersion(atPos(pos), go1_14) || !Identical(m.typ, other.Type()) {
err := check.newError(DuplicateDecl)
- err.addf(atPos(pos), "duplicate method %s", quote(m.name))
- err.addf(atPos(mpos[other.(*Func)]), "other declaration of %s", quote(m.name))
+ err.addf(atPos(pos), "duplicate method %s", m.name)
+ err.addf(atPos(mpos[other.(*Func)]), "other declaration of method %s", m.name)
err.report()
}
}).describef(atPos(pos), "duplicate method check for %s", m.name)
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 723c074e60..7db86a70f1 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -211,10 +211,11 @@ func (w *typeWriter) typ(typ Type) {
case *Interface:
if w.ctxt == nil {
- if t == universeAny.Type() {
+ if t == universeAnyAlias.Type().Underlying() {
// When not hashing, we can try to improve type strings by writing "any"
- // for a type that is pointer-identical to universeAny. This logic should
- // be deprecated by more robust handling for aliases.
+ // for a type that is pointer-identical to universeAny.
+ // TODO(rfindley): this logic should not be necessary with
+ // gotypesalias=1. Remove once that is always the case.
w.string("any")
break
}
@@ -334,6 +335,10 @@ func (w *typeWriter) typ(typ Type) {
case *Alias:
w.typeName(t.obj)
+ if list := t.targs.list(); len(list) != 0 {
+ // instantiated type
+ w.typeList(list)
+ }
if w.ctxt != nil {
// TODO(gri) do we need to print the alias type name, too?
w.typ(Unalias(t.obj.typ))
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index ec012c24eb..6c121ae054 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -41,11 +41,19 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType
check.errorf(e, UndeclaredName, "undefined: %s", e.Value)
}
return
- case universeAny, universeComparable:
+ case universeComparable:
if !check.verifyVersionf(e, go1_18, "predeclared %s", e.Value) {
return // avoid follow-on errors
}
}
+ // Because the representation of any depends on gotypesalias, we don't check
+ // pointer identity here.
+ if obj.Name() == "any" && obj.Parent() == Universe {
+ if !check.verifyVersionf(e, go1_18, "predeclared %s", e.Value) {
+ return // avoid follow-on errors
+ }
+ }
+
check.recordUse(e, obj)
// If we want a type but don't have one, stop right here and avoid potential problems
@@ -87,7 +95,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType
switch obj := obj.(type) {
case *PkgName:
- check.errorf(e, InvalidPkgUse, "use of package %s not in selector", quote(obj.name))
+ check.errorf(e, InvalidPkgUse, "use of package %s not in selector", obj.name)
return
case *Const:
@@ -109,7 +117,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType
case *TypeName:
if !check.conf.EnableAlias && check.isBrokenAlias(obj) {
- check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", quote(obj.name))
+ check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", obj.name)
return
}
x.mode = typexpr
@@ -445,6 +453,10 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
}()
}
+ defer func() {
+ setDefType(def, res)
+ }()
+
var cause string
gtyp := check.genericType(x, &cause)
if cause != "" {
@@ -454,21 +466,23 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
return gtyp // error already reported
}
- orig := asNamed(gtyp)
- if orig == nil {
- panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
- }
-
// evaluate arguments
targs := check.typeList(xlist)
if targs == nil {
- setDefType(def, Typ[Invalid]) // avoid errors later due to lazy instantiation
return Typ[Invalid]
}
+ if orig, _ := gtyp.(*Alias); orig != nil {
+ return check.instance(x.Pos(), orig, targs, nil, check.context())
+ }
+
+ orig := asNamed(gtyp)
+ if orig == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
+ }
+
// create the instance
inst := asNamed(check.instance(x.Pos(), orig, targs, nil, check.context()))
- setDefType(def, inst)
// orig.tparams may not be set up, so we need to do expansion later.
check.later(func() {
diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go
index 6b24399de4..2d90c35d3b 100644
--- a/src/cmd/compile/internal/types2/under.go
+++ b/src/cmd/compile/internal/types2/under.go
@@ -22,6 +22,7 @@ func under(t Type) Type {
// identical element types), the single underlying type is the restricted
// channel type if the restrictions are always the same, or nil otherwise.
func coreType(t Type) Type {
+ t = Unalias(t)
tpar, _ := t.(*TypeParam)
if tpar == nil {
return under(t)
@@ -51,6 +52,7 @@ func coreType(t Type) Type {
// and strings as identical. In this case, if successful and we saw
// a string, the result is of type (possibly untyped) string.
func coreString(t Type) Type {
+ t = Unalias(t)
tpar, _ := t.(*TypeParam)
if tpar == nil {
return under(t) // string or untyped string
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index 6838f270c1..8c91294d2b 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -205,10 +205,10 @@ func (u *unifier) join(x, y *TypeParam) bool {
return true
}
-// asTypeParam returns x.(*TypeParam) if x is a type parameter recorded with u.
+// asBoundTypeParam returns x.(*TypeParam) if x is a type parameter recorded with u.
// Otherwise, the result is nil.
-func (u *unifier) asTypeParam(x Type) *TypeParam {
- if x, _ := x.(*TypeParam); x != nil {
+func (u *unifier) asBoundTypeParam(x Type) *TypeParam {
+ if x, _ := Unalias(x).(*TypeParam); x != nil {
if _, found := u.handles[x]; found {
return x
}
@@ -269,7 +269,7 @@ func (u *unifier) inferred(tparams []*TypeParam) []Type {
// asInterface returns the underlying type of x as an interface if
// it is a non-type parameter interface. Otherwise it returns nil.
func asInterface(x Type) (i *Interface) {
- if _, ok := x.(*TypeParam); !ok {
+ if _, ok := Unalias(x).(*TypeParam); !ok {
i, _ = under(x).(*Interface)
}
return i
@@ -291,11 +291,8 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
u.depth--
}()
- x = Unalias(x)
- y = Unalias(y)
-
// nothing to do if x == y
- if x == y {
+ if x == y || Unalias(x) == Unalias(y) {
return true
}
@@ -314,7 +311,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// Ensure that if we have at least one
// - defined type, make sure one is in y
// - type parameter recorded with u, make sure one is in x
- if asNamed(x) != nil || u.asTypeParam(y) != nil {
+ if asNamed(x) != nil || u.asBoundTypeParam(y) != nil {
if traceInference {
u.tracef("%s ≡ %s\t// swap", y, x)
}
@@ -358,7 +355,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// isTypeLit(x) is false and y was not changed above. In other
// words, if y was a defined type, it is still a defined type
// (relevant for the logic below).
- switch px, py := u.asTypeParam(x), u.asTypeParam(y); {
+ switch px, py := u.asBoundTypeParam(x), u.asBoundTypeParam(y); {
case px != nil && py != nil:
// both x and y are type parameters
if u.join(px, py) {
@@ -449,7 +446,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
}
// x != y if we get here
- assert(x != y)
+ assert(x != y && Unalias(x) != Unalias(y))
// If u.EnableInterfaceInference is set and we don't require exact unification,
// if both types are interfaces, one interface must have a subset of the
@@ -573,6 +570,10 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
emode |= exact
}
+ // Continue with unaliased types but don't lose original alias names, if any (go.dev/issue/67628).
+ xorig, x := x, Unalias(x)
+ yorig, y := y, Unalias(y)
+
switch x := x.(type) {
case *Basic:
// Basic types are singletons except for the rune and byte
@@ -751,7 +752,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
case *TypeParam:
// x must be an unbound type parameter (see comment above).
if debug {
- assert(u.asTypeParam(x) == nil)
+ assert(u.asBoundTypeParam(x) == nil)
}
// By definition, a valid type argument must be in the type set of
// the respective type constraint. Therefore, the type argument's
@@ -774,13 +775,13 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// need to take care of that case separately.
if cx := coreType(x); cx != nil {
if traceInference {
- u.tracef("core %s ≡ %s", x, y)
+ u.tracef("core %s ≡ %s", xorig, yorig)
}
// If y is a defined type, it may not match against cx which
// is an underlying type (incl. int, string, etc.). Use assign
// mode here so that the unifier automatically takes under(y)
// if necessary.
- return u.nify(cx, y, assign, p)
+ return u.nify(cx, yorig, assign, p)
}
}
// x != y and there's nothing to do
@@ -789,7 +790,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// avoid a crash in case of nil type
default:
- panic(sprintf(nil, true, "u.nify(%s, %s, %d)", x, y, mode))
+ panic(sprintf(nil, true, "u.nify(%s, %s, %d)", xorig, yorig, mode))
}
return false
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index 8e1e4a2bb7..9c76ac2373 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -23,7 +23,8 @@ var (
universeIota Object
universeByte Type // uint8 alias, but has name "byte"
universeRune Type // int32 alias, but has name "rune"
- universeAny Object
+ universeAnyNoAlias *TypeName
+ universeAnyAlias *TypeName
universeError Type
universeComparable Object
)
@@ -65,7 +66,7 @@ var Typ = [...]*Basic{
UntypedNil: {UntypedNil, IsUntyped, "untyped nil"},
}
-var aliases = [...]*Basic{
+var basicAliases = [...]*Basic{
{Byte, IsInteger | IsUnsigned, "byte"},
{Rune, IsInteger, "rune"},
}
@@ -74,15 +75,41 @@ func defPredeclaredTypes() {
for _, t := range Typ {
def(NewTypeName(nopos, nil, t.name, t))
}
- for _, t := range aliases {
+ for _, t := range basicAliases {
def(NewTypeName(nopos, nil, t.name, t))
}
// type any = interface{}
- // Note: don't use &emptyInterface for the type of any. Using a unique
- // pointer allows us to detect any and format it as "any" rather than
- // interface{}, which clarifies user-facing error messages significantly.
- def(NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet}))
+ //
+ // Implement two representations of any: one for the legacy gotypesalias=0,
+ // and one for gotypesalias=1. This is necessary for consistent
+ // representation of interface aliases during type checking, and is
+ // implemented via hijacking [Scope.Lookup] for the [Universe] scope.
+ //
+ // Both representations use the same distinguished pointer for their RHS
+ // interface type, allowing us to detect any (even with the legacy
+ // representation), and format it as "any" rather than interface{}, which
+ // clarifies user-facing error messages significantly.
+ //
+ // TODO(rfindley): once the gotypesalias GODEBUG variable is obsolete (and we
+ // consistently use the Alias node), we should be able to clarify user facing
+ // error messages without using a distinguished pointer for the any
+ // interface.
+ {
+ universeAnyNoAlias = NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet})
+ universeAnyNoAlias.setColor(black)
+ // ensure that the any TypeName reports a consistent Parent, after
+ // hijacking Universe.Lookup with gotypesalias=0.
+ universeAnyNoAlias.setParent(Universe)
+
+ // It shouldn't matter which representation of any is actually inserted
+ // into the Universe, but we lean toward the future and insert the Alias
+ // representation.
+ universeAnyAlias = NewTypeName(nopos, nil, "any", nil)
+ universeAnyAlias.setColor(black)
+ _ = NewAlias(universeAnyAlias, universeAnyNoAlias.Type().Underlying()) // Link TypeName and Alias
+ def(universeAnyAlias)
+ }
// type error interface{ Error() string }
{
@@ -250,7 +277,6 @@ func init() {
universeIota = Universe.Lookup("iota")
universeByte = Universe.Lookup("byte").Type()
universeRune = Universe.Lookup("rune").Type()
- universeAny = Universe.Lookup("any")
universeError = Universe.Lookup("error").Type()
universeComparable = Universe.Lookup("comparable")
}
diff --git a/src/cmd/compile/internal/types2/util.go b/src/cmd/compile/internal/types2/util.go
index 0422c03346..db0a3e70ba 100644
--- a/src/cmd/compile/internal/types2/util.go
+++ b/src/cmd/compile/internal/types2/util.go
@@ -36,7 +36,7 @@ func dddErrPos(call *syntax.CallExpr) *syntax.CallExpr {
return call
}
-// argErrPos returns the node (poser) for reportign an invalid argument count.
+// argErrPos returns the node (poser) for reporting an invalid argument count.
func argErrPos(call *syntax.CallExpr) *syntax.CallExpr { return call }
// ExprString returns a string representation of x.
diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go
index 241b10d3e6..39ecb9c3af 100644
--- a/src/cmd/compile/internal/types2/version.go
+++ b/src/cmd/compile/internal/types2/version.go
@@ -5,7 +5,6 @@
package types2
import (
- "cmd/compile/internal/syntax"
"fmt"
"go/version"
"internal/goversion"
@@ -56,7 +55,7 @@ var (
func (check *Checker) allowVersion(at poser, v goVersion) bool {
fileVersion := check.conf.GoVersion
if pos := at.Pos(); pos.IsKnown() {
- fileVersion = check.versions[base(pos)]
+ fileVersion = check.versions[pos.FileBase()]
}
// We need asGoVersion (which calls version.Lang) below
@@ -76,19 +75,3 @@ func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args
}
return true
}
-
-// base finds the underlying PosBase of the source file containing pos,
-// skipping over intermediate PosBase layers created by //line directives.
-// The positions must be known.
-func base(pos syntax.Pos) *syntax.PosBase {
- assert(pos.IsKnown())
- b := pos.Base()
- for {
- bb := b.Pos().Base()
- if bb == nil || bb == b {
- break
- }
- b = bb
- }
- return b
-}
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
index fc3b858a80..63b6a1d2c1 100644
--- a/src/cmd/compile/internal/walk/assign.go
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -623,21 +623,23 @@ func isAppendOfMake(n ir.Node) bool {
// panicmakeslicelen()
// }
// s := l1
-// n := len(s) + l2
-// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
-// // cap is a positive int and n can become negative when len(s) + l2
-// // overflows int. Interpreting n when negative as uint makes it larger
-// // than cap(s). growslice will check the int n arg and panic if n is
-// // negative. This prevents the overflow from being undetected.
-// if uint(n) <= uint(cap(s)) {
-// s = s[:n]
-// } else {
-// s = growslice(T, s.ptr, n, s.cap, l2, T)
+// if l2 != 0 {
+// n := len(s) + l2
+// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+// // cap is a positive int and n can become negative when len(s) + l2
+// // overflows int. Interpreting n when negative as uint makes it larger
+// // than cap(s). growslice will check the int n arg and panic if n is
+// // negative. This prevents the overflow from being undetected.
+// if uint(n) <= uint(cap(s)) {
+// s = s[:n]
+// } else {
+// s = growslice(T, s.ptr, n, s.cap, l2, T)
+// }
+// // clear the new portion of the underlying array.
+// hp := &s[len(s)-l2]
+// hn := l2 * sizeof(T)
+// memclr(hp, hn)
// }
-// // clear the new portion of the underlying array.
-// hp := &s[len(s)-l2]
-// hn := l2 * sizeof(T)
-// memclr(hp, hn)
// }
// s
//
@@ -671,11 +673,18 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
+ // if l2 != 0 {
+ // Avoid work if we're not appending anything. But more importantly,
+ // avoid allowing hp to be a past-the-end pointer when clearing. See issue 67255.
+ nifnz := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, l2, ir.NewInt(base.Pos, 0)), nil, nil)
+ nifnz.Likely = true
+ nodes = append(nodes, nifnz)
+
elemtype := s.Type().Elem()
// n := s.len + l2
nn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
- nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
+ nifnz.Body = append(nifnz.Body, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
// if uint(n) <= uint(s.cap)
nuint := typecheck.Conv(nn, types.Types[types.TUINT])
@@ -697,7 +706,7 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
l2)),
}
- nodes = append(nodes, nif)
+ nifnz.Body = append(nifnz.Body, nif)
// hp := &s[s.len - l2]
// TODO: &s[s.len] - hn?
@@ -723,7 +732,7 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
// if growslice isn't called do we need to do the zeroing ourselves.
nif.Body = append(nif.Body, clr...)
} else {
- nodes = append(nodes, clr...)
+ nifnz.Body = append(nifnz.Body, clr...)
}
typecheck.Stmts(nodes)