aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal')
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go21
-rw-r--r--src/cmd/compile/internal/arm/ssa.go10
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go21
-rw-r--r--src/cmd/compile/internal/deadcode/deadcode.go1
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwarf.go3
-rw-r--r--src/cmd/compile/internal/escape/stmt.go3
-rw-r--r--src/cmd/compile/internal/importer/iimport.go26
-rw-r--r--src/cmd/compile/internal/inline/inl.go3
-rw-r--r--src/cmd/compile/internal/ir/expr.go9
-rw-r--r--src/cmd/compile/internal/ir/fmt.go5
-rw-r--r--src/cmd/compile/internal/ir/mini.go2
-rw-r--r--src/cmd/compile/internal/ir/name.go1
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go6
-rw-r--r--src/cmd/compile/internal/ir/stmt.go9
-rw-r--r--src/cmd/compile/internal/ir/symtab.go55
-rw-r--r--src/cmd/compile/internal/mips/ssa.go9
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go9
-rw-r--r--src/cmd/compile/internal/noder/decl.go2
-rw-r--r--src/cmd/compile/internal/noder/decoder.go3
-rw-r--r--src/cmd/compile/internal/noder/encoder.go3
-rw-r--r--src/cmd/compile/internal/noder/expr.go63
-rw-r--r--src/cmd/compile/internal/noder/helpers.go71
-rw-r--r--src/cmd/compile/internal/noder/import.go10
-rw-r--r--src/cmd/compile/internal/noder/irgen.go64
-rw-r--r--src/cmd/compile/internal/noder/noder.go8
-rw-r--r--src/cmd/compile/internal/noder/reader.go61
-rw-r--r--src/cmd/compile/internal/noder/reader2.go24
-rw-r--r--src/cmd/compile/internal/noder/stencil.go556
-rw-r--r--src/cmd/compile/internal/noder/stmt.go72
-rw-r--r--src/cmd/compile/internal/noder/transform.go100
-rw-r--r--src/cmd/compile/internal/noder/types.go14
-rw-r--r--src/cmd/compile/internal/noder/unified.go7
-rw-r--r--src/cmd/compile/internal/noder/unified_test.go8
-rw-r--r--src/cmd/compile/internal/noder/writer.go76
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go10
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go17
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go14
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go10
-rw-r--r--src/cmd/compile/internal/ssa/check.go3
-rw-r--r--src/cmd/compile/internal/ssa/compile.go59
-rw-r--r--src/cmd/compile/internal/ssa/debug_lines_test.go213
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go124
-rw-r--r--src/cmd/compile/internal/ssa/func.go2
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/386Ops.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules154
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules120
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go9
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARMOps.go10
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64Ops.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPSOps.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules5
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64Ops.go3
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390XOps.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/Wasm.rules1
-rw-r--r--src/cmd/compile/internal/ssa/gen/WasmOps.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go6
-rw-r--r--src/cmd/compile/internal/ssa/html.go17
-rw-r--r--src/cmd/compile/internal/ssa/location.go4
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go277
-rw-r--r--src/cmd/compile/internal/ssa/print.go52
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go32
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go3
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go3
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go504
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go981
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go3
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go3
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go3
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go9
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go3
-rw-r--r--src/cmd/compile/internal/ssa/rewriteWasm.go3
-rw-r--r--src/cmd/compile/internal/ssa/testdata/inline-dump.go17
-rw-r--r--src/cmd/compile/internal/ssa/testdata/sayhi.go12
-rw-r--r--src/cmd/compile/internal/ssa/value.go10
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go2
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go20
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go483
-rw-r--r--src/cmd/compile/internal/syntax/parser.go36
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue43527.go223
-rw-r--r--src/cmd/compile/internal/syntax/testdata/tparams.go210
-rw-r--r--src/cmd/compile/internal/typecheck/crawler.go45
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go68
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go67
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go9
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go67
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go1
-rw-r--r--src/cmd/compile/internal/types/identity.go31
-rw-r--r--src/cmd/compile/internal/types/type.go6
-rw-r--r--src/cmd/compile/internal/types2/api.go5
-rw-r--r--src/cmd/compile/internal/types2/api_test.go45
-rw-r--r--src/cmd/compile/internal/types2/assignments.go2
-rw-r--r--src/cmd/compile/internal/types2/builtins.go2
-rw-r--r--src/cmd/compile/internal/types2/call.go24
-rw-r--r--src/cmd/compile/internal/types2/check.go7
-rw-r--r--src/cmd/compile/internal/types2/decl.go10
-rw-r--r--src/cmd/compile/internal/types2/environment.go81
-rw-r--r--src/cmd/compile/internal/types2/errors.go2
-rw-r--r--src/cmd/compile/internal/types2/errors_test.go1
-rw-r--r--src/cmd/compile/internal/types2/index.go2
-rw-r--r--src/cmd/compile/internal/types2/infer.go2
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go62
-rw-r--r--src/cmd/compile/internal/types2/instantiate_test.go62
-rw-r--r--src/cmd/compile/internal/types2/interface.go15
-rw-r--r--src/cmd/compile/internal/types2/lookup.go24
-rw-r--r--src/cmd/compile/internal/types2/named.go140
-rw-r--r--src/cmd/compile/internal/types2/object.go20
-rw-r--r--src/cmd/compile/internal/types2/predicates.go18
-rw-r--r--src/cmd/compile/internal/types2/signature.go44
-rw-r--r--src/cmd/compile/internal/types2/sizeof_test.go4
-rw-r--r--src/cmd/compile/internal/types2/subst.go105
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go228
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go210
-rw-r--r--src/cmd/compile/internal/types2/type.go2
-rw-r--r--src/cmd/compile/internal/types2/typelists.go16
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go12
-rw-r--r--src/cmd/compile/internal/types2/typestring.go115
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go10
-rw-r--r--src/cmd/compile/internal/types2/unify.go3
-rw-r--r--src/cmd/compile/internal/types2/universe.go4
-rw-r--r--src/cmd/compile/internal/walk/convert.go38
-rw-r--r--src/cmd/compile/internal/walk/expr.go13
-rw-r--r--src/cmd/compile/internal/walk/order.go6
-rw-r--r--src/cmd/compile/internal/walk/stmt.go8
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go13
-rw-r--r--src/cmd/compile/internal/x86/ssa.go9
135 files changed, 3838 insertions, 2056 deletions
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index fc547ebba0..30131bd559 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -1008,7 +1008,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
}
r := v.Reg()
getgFromTLS(s, r)
- case ssa.OpAMD64CALLstatic:
+ case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail:
if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
@@ -1017,6 +1017,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
+ if v.Op == ssa.OpAMD64CALLtail {
+ s.TailCall(v)
+ break
+ }
s.Call(v)
if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
@@ -1314,22 +1318,9 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
- // zeroing X15 when entering ABIInternal from ABI0
- if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
- }
- // set G register from TLS
- getgFromTLS(s, x86.REG_R14)
- }
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockAMD64EQF:
s.CombJump(b, next, &eqfJumps)
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 4b083cec46..8aac80a22e 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -696,6 +696,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
s.Call(v)
+ case ssa.OpARMCALLtail:
+ s.TailCall(v)
case ssa.OpARMCALLudiv:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -936,17 +938,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
-
case ssa.BlockARMEQ, ssa.BlockARMNE,
ssa.BlockARMLT, ssa.BlockARMGE,
ssa.BlockARMLE, ssa.BlockARMGT,
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index b985246117..4770a0c488 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -315,6 +315,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRO:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
case ssa.OpARM64ADDshiftLL,
ssa.OpARM64SUBshiftLL,
ssa.OpARM64ANDshiftLL,
@@ -342,6 +344,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64ORNshiftRA,
ssa.OpARM64BICshiftRA:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64ANDshiftRO,
+ ssa.OpARM64ORshiftRO,
+ ssa.OpARM64XORshiftRO,
+ ssa.OpARM64EONshiftRO,
+ ssa.OpARM64ORNshiftRO,
+ ssa.OpARM64BICshiftRO:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
case ssa.OpARM64MOVDconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@@ -389,6 +398,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64TSTshiftRO:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_ROR, v.AuxInt)
case ssa.OpARM64MOVDaddr:
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
@@ -1046,6 +1057,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p4.To.SetTarget(p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
+ case ssa.OpARM64CALLtail:
+ s.TailCall(v)
case ssa.OpARM64LoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -1241,17 +1254,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
-
case ssa.BlockARM64EQ, ssa.BlockARM64NE,
ssa.BlockARM64LT, ssa.BlockARM64GE,
ssa.BlockARM64LE, ssa.BlockARM64GT,
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
index 3658c89912..65a48b6803 100644
--- a/src/cmd/compile/internal/deadcode/deadcode.go
+++ b/src/cmd/compile/internal/deadcode/deadcode.go
@@ -117,6 +117,7 @@ func stmts(nn *ir.Nodes) {
}
if cut {
+ ir.VisitList((*nn)[i+1:len(*nn)], markHiddenClosureDead)
*nn = (*nn)[:i+1]
break
}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index 30472a9ebd..3007262db9 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -217,6 +217,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
DeclCol: declpos.RelCol(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
+ DictIndex: n.DictIndex,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
@@ -374,6 +375,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
DeclCol: declpos.RelCol(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
+ DictIndex: n.DictIndex,
}
}
@@ -478,6 +480,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
DeclCol: declpos.RelCol(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
+ DictIndex: n.DictIndex,
}
list := debug.LocationLists[varID]
if len(list) != 0 {
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
index c71848b8a1..0afb5d64ef 100644
--- a/src/cmd/compile/internal/escape/stmt.go
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -180,7 +180,8 @@ func (e *escape) stmt(n ir.Node) {
e.goDeferStmt(n)
case ir.OTAILCALL:
- // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
+ n := n.(*ir.TailCallStmt)
+ e.call(nil, n.Call)
}
}
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index 38cb8db235..a92720d52e 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -72,7 +72,7 @@ const (
structType
interfaceType
typeParamType
- instType
+ instanceType
unionType
)
@@ -314,21 +314,21 @@ func (r *importReader) obj(name string) {
tparams = r.tparamList()
}
sig := r.signature(nil)
- sig.SetTParams(tparams)
+ sig.SetTypeParams(tparams)
r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
case 'T', 'U':
- var tparams []*types2.TypeParam
- if tag == 'U' {
- tparams = r.tparamList()
- }
-
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types2.NewTypeName(pos, r.currPkg, name, nil)
named := types2.NewNamed(obj, nil, nil)
- named.SetTParams(tparams)
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
r.declare(obj)
+ if tag == 'U' {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
underlying := r.p.typAt(r.uint64(), named).Underlying()
named.SetUnderlying(underlying)
@@ -343,13 +343,13 @@ func (r *importReader) obj(name string) {
// If the receiver has any targs, set those as the
// rparams of the method (since those are the
// typeparams being used in the method sig/body).
- targs := baseType(msig.Recv().Type()).TArgs()
+ targs := baseType(msig.Recv().Type()).TypeArgs()
if targs.Len() > 0 {
rparams := make([]*types2.TypeParam, targs.Len())
for i := range rparams {
rparams[i] = types2.AsTypeParam(targs.At(i))
}
- msig.SetRParams(rparams)
+ msig.SetRecvTypeParams(rparams)
}
named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
@@ -365,7 +365,7 @@ func (r *importReader) obj(name string) {
}
name0, sub := parseSubscript(name)
tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
- t := (*types2.Checker)(nil).NewTypeParam(tn, nil)
+ t := types2.NewTypeParam(tn, nil)
if sub == 0 {
errorf("missing subscript")
}
@@ -646,7 +646,7 @@ func (r *importReader) doType(base *types2.Named) types2.Type {
r.p.doDecl(pkg, name)
return r.p.tparamIndex[id]
- case instType:
+ case instanceType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected instantiation type")
}
@@ -661,7 +661,7 @@ func (r *importReader) doType(base *types2.Named) types2.Type {
baseType := r.typ()
// The imported instantiated type doesn't include any methods, so
// we must always use the methods of the base (orig) type.
- // TODO provide a non-nil *Checker
+ // TODO provide a non-nil *Environment
t, _ := types2.Instantiate(nil, baseType, targs, false)
return t
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 073373144d..04d751869b 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -544,6 +544,9 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
call := call.(*ir.CallExpr)
call.NoInline = true
}
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)?
// TODO do them here (or earlier),
// so escape analysis can avoid more heapmoves.
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index baf0117409..f526d987a7 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -570,11 +570,10 @@ func (*SelectorExpr) CanBeNtype() {}
// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max].
type SliceExpr struct {
miniExpr
- X Node
- Low Node
- High Node
- Max Node
- CheckPtrCall *CallExpr `mknode:"-"`
+ X Node
+ Low Node
+ High Node
+ Max Node
}
func NewSliceExpr(pos src.XPos, op Op, x, low, high, max Node) *SliceExpr {
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index d19fe453ef..29505357cc 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -386,7 +386,7 @@ func stmtFmt(n Node, s fmt.State) {
case OTAILCALL:
n := n.(*TailCallStmt)
- fmt.Fprintf(s, "tailcall %v", n.Target)
+ fmt.Fprintf(s, "tailcall %v", n.Call)
case OINLMARK:
n := n.(*InlineMarkStmt)
@@ -559,7 +559,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
nprec := OpPrec[n.Op()]
- if n.Op() == OTYPE && n.Type().IsPtr() {
+ if n.Op() == OTYPE && n.Type() != nil && n.Type().IsPtr() {
nprec = OpPrec[ODEREF]
}
@@ -1147,6 +1147,7 @@ func dumpNodeHeader(w io.Writer, n Node) {
}
// TODO(mdempsky): Print line pragma details too.
file := filepath.Base(pos.Filename())
+ // Note: this output will be parsed by ssa/html.go:(*HTMLWriter).WriteAST. Keep in sync.
fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
}
}
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
index a7ff4ac9c7..eeb74081fb 100644
--- a/src/cmd/compile/internal/ir/mini.go
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -62,7 +62,7 @@ const (
func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
func (n *miniNode) SetTypecheck(x uint8) {
- if x > 3 {
+ if x > 2 {
panic(fmt.Sprintf("cannot SetTypecheck %d", x))
}
n.bits.set2(miniTypecheckShift, x)
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index 9fb22378cd..dcfff7deba 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -40,6 +40,7 @@ type Name struct {
Class Class // uint8
pragma PragmaFlag // int16
flags bitset16
+ DictIndex uint16 // index of the dictionary entry describing the type of this variable declaration plus 1
sym *types.Sym
Func *Func // TODO(austin): nil for I.M, eqFor, hashfor, and hashmem
Offset_ int64
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index aa41c03beb..44988880c8 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -1331,15 +1331,15 @@ func (n *TailCallStmt) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
- if n.Target != nil && do(n.Target) {
+ if n.Call != nil && do(n.Call) {
return true
}
return false
}
func (n *TailCallStmt) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
- if n.Target != nil {
- n.Target = edit(n.Target).(*Name)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(*CallExpr)
}
}
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
index 69a74b9fdd..3482d7972e 100644
--- a/src/cmd/compile/internal/ir/stmt.go
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -385,14 +385,11 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
// code generation to jump directly to another function entirely.
type TailCallStmt struct {
miniStmt
- Target *Name
+ Call *CallExpr // the underlying call
}
-func NewTailCallStmt(pos src.XPos, target *Name) *TailCallStmt {
- if target.Op() != ONAME || target.Class != PFUNC {
- base.FatalfAt(pos, "tail call to non-func %v", target)
- }
- n := &TailCallStmt{Target: target}
+func NewTailCallStmt(pos src.XPos, call *CallExpr) *TailCallStmt {
+ n := &TailCallStmt{Call: call}
n.pos = pos
n.op = OTAILCALL
return n
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index 1e8261810f..1435e4313e 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -11,33 +11,34 @@ import (
// Syms holds known symbols.
var Syms struct {
- AssertE2I *obj.LSym
- AssertE2I2 *obj.LSym
- AssertI2I *obj.LSym
- AssertI2I2 *obj.LSym
- Deferproc *obj.LSym
- DeferprocStack *obj.LSym
- Deferreturn *obj.LSym
- Duffcopy *obj.LSym
- Duffzero *obj.LSym
- GCWriteBarrier *obj.LSym
- Goschedguarded *obj.LSym
- Growslice *obj.LSym
- Msanread *obj.LSym
- Msanwrite *obj.LSym
- Msanmove *obj.LSym
- Newobject *obj.LSym
- Newproc *obj.LSym
- Panicdivide *obj.LSym
- Panicshift *obj.LSym
- PanicdottypeE *obj.LSym
- PanicdottypeI *obj.LSym
- Panicnildottype *obj.LSym
- Panicoverflow *obj.LSym
- Raceread *obj.LSym
- Racereadrange *obj.LSym
- Racewrite *obj.LSym
- Racewriterange *obj.LSym
+ AssertE2I *obj.LSym
+ AssertE2I2 *obj.LSym
+ AssertI2I *obj.LSym
+ AssertI2I2 *obj.LSym
+ CheckPtrAlignment *obj.LSym
+ Deferproc *obj.LSym
+ DeferprocStack *obj.LSym
+ Deferreturn *obj.LSym
+ Duffcopy *obj.LSym
+ Duffzero *obj.LSym
+ GCWriteBarrier *obj.LSym
+ Goschedguarded *obj.LSym
+ Growslice *obj.LSym
+ Msanread *obj.LSym
+ Msanwrite *obj.LSym
+ Msanmove *obj.LSym
+ Newobject *obj.LSym
+ Newproc *obj.LSym
+ Panicdivide *obj.LSym
+ Panicshift *obj.LSym
+ PanicdottypeE *obj.LSym
+ PanicdottypeI *obj.LSym
+ Panicnildottype *obj.LSym
+ Panicoverflow *obj.LSym
+ Raceread *obj.LSym
+ Racereadrange *obj.LSym
+ Racewrite *obj.LSym
+ Racewriterange *obj.LSym
// Wasm
SigPanic *obj.LSym
Staticuint64s *obj.LSym
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index e0447f38cb..6326f966bf 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -475,6 +475,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p6.To.SetTarget(p2)
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v)
+ case ssa.OpMIPSCALLtail:
+ s.TailCall(v)
case ssa.OpMIPSLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -841,14 +843,9 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockMIPSEQ, ssa.BlockMIPSNE,
ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ,
ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ,
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index e821a00876..990b9788f7 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -491,6 +491,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p6.To.SetTarget(p2)
case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
s.Call(v)
+ case ssa.OpMIPS64CALLtail:
+ s.TailCall(v)
case ssa.OpMIPS64LoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -808,14 +810,9 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE,
ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ,
ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ,
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
index de481fb5fc..c9ab31f203 100644
--- a/src/cmd/compile/internal/noder/decl.go
+++ b/src/cmd/compile/internal/noder/decl.go
@@ -190,7 +190,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
- tparams := otyp.(*types2.Named).TParams()
+ tparams := otyp.(*types2.Named).TypeParams()
if n := tparams.Len(); n > 0 {
rparams := make([]*types.Type, n)
for i := range rparams {
diff --git a/src/cmd/compile/internal/noder/decoder.go b/src/cmd/compile/internal/noder/decoder.go
index 3dc61c6a69..2c18727420 100644
--- a/src/cmd/compile/internal/noder/decoder.go
+++ b/src/cmd/compile/internal/noder/decoder.go
@@ -255,7 +255,8 @@ func (r *decoder) strings() []string {
return res
}
-func (r *decoder) rawValue() constant.Value {
+func (r *decoder) value() constant.Value {
+ r.sync(syncValue)
isComplex := r.bool()
val := r.scalar()
if isComplex {
diff --git a/src/cmd/compile/internal/noder/encoder.go b/src/cmd/compile/internal/noder/encoder.go
index d8ab0f6255..b07b3a4a48 100644
--- a/src/cmd/compile/internal/noder/encoder.go
+++ b/src/cmd/compile/internal/noder/encoder.go
@@ -237,7 +237,8 @@ func (w *encoder) strings(ss []string) {
}
}
-func (w *encoder) rawValue(val constant.Value) {
+func (w *encoder) value(val constant.Value) {
+ w.sync(syncValue)
if w.bool(val.Kind() == constant.Complex) {
w.scalar(constant.Real(val))
w.scalar(constant.Imag(val))
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
index 7dbbc88f8f..9cd9545b75 100644
--- a/src/cmd/compile/internal/noder/expr.go
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -250,44 +250,6 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
// only be fully transformed once it has an instantiated type.
n := ir.NewSelectorExpr(pos, ir.OXDOT, x, typecheck.Lookup(expr.Sel.Value))
typed(g.typ(typ), n)
-
- // Fill in n.Selection for a generic method reference or a bound
- // interface method, even though we won't use it directly, since it
- // is useful for analysis. Specifically do not fill in for fields or
- // other interfaces methods (method call on an interface value), so
- // n.Selection being non-nil means a method reference for a generic
- // type or a method reference due to a bound.
- obj2 := g.info.Selections[expr].Obj()
- sig := types2.AsSignature(obj2.Type())
- if sig == nil || sig.Recv() == nil {
- return n
- }
- index := g.info.Selections[expr].Index()
- last := index[len(index)-1]
- // recvType is the receiver of the method being called. Because of the
- // way methods are imported, g.obj(obj2) doesn't work across
- // packages, so we have to lookup the method via the receiver type.
- recvType := deref2(sig.Recv().Type())
- if types2.AsInterface(recvType.Underlying()) != nil {
- fieldType := n.X.Type()
- for _, ix := range index[:len(index)-1] {
- fieldType = deref(fieldType).Field(ix).Type
- }
- if fieldType.Kind() == types.TTYPEPARAM {
- n.Selection = fieldType.Bound().AllMethods().Index(last)
- //fmt.Printf(">>>>> %v: Bound call %v\n", base.FmtPos(pos), n.Sel)
- } else {
- assert(fieldType.Kind() == types.TINTER)
- //fmt.Printf(">>>>> %v: Interface call %v\n", base.FmtPos(pos), n.Sel)
- }
- return n
- }
-
- recvObj := types2.AsNamed(recvType).Obj()
- recv := g.pkg(recvObj.Pkg()).Lookup(recvObj.Name()).Def
- n.Selection = recv.Type().Methods().Index(last)
- //fmt.Printf(">>>>> %v: Method call %v\n", base.FmtPos(pos), n.Sel)
-
return n
}
@@ -344,7 +306,7 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
if wantPtr {
recvType2Base = types2.AsPointer(recvType2).Elem()
}
- if types2.AsNamed(recvType2Base).TParams().Len() > 0 {
+ if types2.AsNamed(recvType2Base).TypeParams().Len() > 0 {
// recvType2 is the original generic type that is
// instantiated for this method call.
// selinfo.Recv() is the instantiated type
@@ -360,12 +322,10 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
n.(*ir.SelectorExpr).Selection.Nname = method
typed(method.Type(), n)
- // selinfo.Targs() are the types used to
- // instantiate the type of receiver
- targs2 := getTargs(selinfo)
- targs := make([]ir.Node, targs2.Len())
+ xt := deref(x.Type())
+ targs := make([]ir.Node, len(xt.RParams()))
for i := range targs {
- targs[i] = ir.TypeNode(g.typ(targs2.At(i)))
+ targs[i] = ir.TypeNode(xt.RParams()[i])
}
// Create function instantiation with the type
@@ -388,16 +348,6 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
return n
}
-// getTargs gets the targs associated with the receiver of a selected method
-func getTargs(selinfo *types2.Selection) *types2.TypeList {
- r := deref2(selinfo.Recv())
- n := types2.AsNamed(r)
- if n == nil {
- base.Fatalf("Incorrect type for selinfo %v", selinfo)
- }
- return n.TArgs()
-}
-
func (g *irgen) exprList(expr syntax.Expr) []ir.Node {
return g.exprs(unpackListExpr(expr))
}
@@ -440,9 +390,10 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
} else {
key = g.expr(elem.Key)
}
- exprs[i] = ir.NewKeyExpr(g.pos(elem), key, g.expr(elem.Value))
+ value := wrapname(g.pos(elem.Value), g.expr(elem.Value))
+ exprs[i] = ir.NewKeyExpr(g.pos(elem), key, value)
default:
- exprs[i] = g.expr(elem)
+ exprs[i] = wrapname(g.pos(elem), g.expr(elem))
}
}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
index 9487e76336..636b5d64cd 100644
--- a/src/cmd/compile/internal/noder/helpers.go
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -95,16 +95,12 @@ func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) ir.Node {
return typed(x.Type(), ir.NewLogicalExpr(pos, op, x, y))
case ir.OADD:
n := ir.NewBinaryExpr(pos, op, x, y)
- if x.Type().HasTParam() || y.Type().HasTParam() {
- // Delay transformAdd() if either arg has a type param,
- // since it needs to know the exact types to decide whether
- // to transform OADD to OADDSTR.
- n.SetType(typ)
- n.SetTypecheck(3)
- return n
- }
typed(typ, n)
- return transformAdd(n)
+ r := ir.Node(n)
+ if !delayTransform() {
+ r = transformAdd(n)
+ }
+ return r
default:
return typed(x.Type(), ir.NewBinaryExpr(pos, op, x, y))
}
@@ -189,17 +185,6 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
// A function instantiation (even if fully concrete) shouldn't be
// transformed yet, because we need to add the dictionary during the
// transformation.
- //
- // However, if we have a function type (even though it is
- // parameterized), then we can add in any needed CONVIFACE nodes via
- // typecheckaste(). We need to call transformArgs() to deal first
- // with the f(g(()) case where g returns multiple return values. We
- // can't do anything if fun is a type param (which is probably
- // described by a structural constraint)
- if fun.Type().Kind() == types.TFUNC {
- transformArgs(n)
- typecheckaste(ir.OCALL, fun, n.IsDDD, fun.Type().Params(), n.Args, true)
- }
return typed(typ, n)
}
@@ -212,22 +197,10 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) ir.Node {
n := ir.NewBinaryExpr(pos, op, x, y)
- if x.Type().HasTParam() || y.Type().HasTParam() {
- xIsInt := x.Type().IsInterface()
- yIsInt := y.Type().IsInterface()
- if !(xIsInt && !yIsInt || !xIsInt && yIsInt) {
- // If either arg is a type param, then we can still do the
- // transformCompare() if we know that one arg is an interface
- // and the other is not. Otherwise, we delay
- // transformCompare(), since it needs to know the exact types
- // to decide on any needed conversions.
- n.SetType(typ)
- n.SetTypecheck(3)
- return n
- }
- }
typed(typ, n)
- transformCompare(n)
+ if !delayTransform() {
+ transformCompare(n)
+ }
return n
}
@@ -299,15 +272,11 @@ func method(typ *types.Type, index int) *types.Field {
func Index(pos src.XPos, typ *types.Type, x, index ir.Node) ir.Node {
n := ir.NewIndexExpr(pos, x, index)
- if x.Type().HasTParam() {
- // transformIndex needs to know exact type
- n.SetType(typ)
- n.SetTypecheck(3)
- return n
- }
typed(typ, n)
- // transformIndex will modify n.Type() for OINDEXMAP.
- transformIndex(n)
+ if !delayTransform() {
+ // transformIndex will modify n.Type() for OINDEXMAP.
+ transformIndex(n)
+ }
return n
}
@@ -317,14 +286,10 @@ func Slice(pos src.XPos, typ *types.Type, x, low, high, max ir.Node) ir.Node {
op = ir.OSLICE3
}
n := ir.NewSliceExpr(pos, op, x, low, high, max)
- if x.Type().HasTParam() {
- // transformSlice needs to know if x.Type() is a string or an array or a slice.
- n.SetType(typ)
- n.SetTypecheck(3)
- return n
- }
typed(typ, n)
- transformSlice(n)
+ if !delayTransform() {
+ transformSlice(n)
+ }
return n
}
@@ -366,3 +331,9 @@ func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
}
return ir.NewAssignOpStmt(pos, op, x, bl)
}
+
+// delayTransform returns true if we should delay all transforms, because we are
+// creating the nodes for a generic function/method.
+func delayTransform() bool {
+ return ir.CurFunc != nil && ir.CurFunc.Type().HasTParam()
+}
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index c26340c960..f13f8ca7f5 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -43,12 +43,12 @@ var haveLegacyImports = false
// for an imported package by overloading writeNewExportFunc, then
// that payload will be mapped into memory and passed to
// newReadImportFunc.
-var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, env *types2.Environment, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
panic("unexpected new export data payload")
}
type gcimports struct {
- check *types2.Checker
+ env *types2.Environment
packages map[string]*types2.Package
}
@@ -61,7 +61,7 @@ func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*ty
panic("mode must be 0")
}
- _, pkg, err := readImportFile(path, typecheck.Target, m.check, m.packages)
+ _, pkg, err := readImportFile(path, typecheck.Target, m.env, m.packages)
return pkg, err
}
@@ -224,7 +224,7 @@ func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
// readImportFile reads the import file for the given package path and
// returns its types.Pkg representation. If packages is non-nil, the
// types2.Package representation is also returned.
-func readImportFile(path string, target *ir.Package, check *types2.Checker, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
+func readImportFile(path string, target *ir.Package, env *types2.Environment, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
path, err = resolveImportPath(path)
if err != nil {
return
@@ -279,7 +279,7 @@ func readImportFile(path string, target *ir.Package, check *types2.Checker, pack
return
}
- pkg2, err = newReadImportFunc(data, pkg1, check, packages)
+ pkg2, err = newReadImportFunc(data, pkg1, env, packages)
} else {
// We only have old data. Oh well, fall back to the legacy importers.
haveLegacyImports = true
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index a67b3994da..4f1b4e6bfd 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -34,10 +34,13 @@ func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
}
// typechecking
+ env := types2.NewEnvironment()
importer := gcimports{
+ env: env,
packages: map[string]*types2.Package{"unsafe": types2.Unsafe},
}
conf := types2.Config{
+ Environment: env,
GoVersion: base.Flag.Lang,
IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
CompilerErrorMessages: true, // use error strings matching existing compiler errors
@@ -60,9 +63,7 @@ func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
// expand as needed
}
- pkg := types2.NewPackage(base.Ctxt.Pkgpath, "")
- importer.check = types2.NewChecker(&conf, pkg, info)
- err := importer.check.Files(files)
+ pkg, err := conf.Check(base.Ctxt.Pkgpath, files, info)
base.ExitIfErrors()
if err != nil {
@@ -96,39 +97,42 @@ func check2(noders []*noder) {
}
}
-// gfInfo is information gathered on a generic function.
-type gfInfo struct {
- tparams []*types.Type
+// dictInfo is the dictionary format for an instantiation of a generic function with
+// particular shapes. shapeParams, derivedTypes, subDictCalls, and itabConvs describe
+// the actual dictionary entries in order, and the remaining fields are other info
+// needed in doing dictionary processing during compilation.
+type dictInfo struct {
+ // Types substituted for the type parameters, which are shape types.
+ shapeParams []*types.Type
+ // All types derived from those typeparams used in the instantiation.
derivedTypes []*types.Type
- // Nodes in generic function that requires a subdictionary. Includes
+ // Nodes in the instantiation that requires a subdictionary. Includes
// method and function calls (OCALL), function values (OFUNCINST), method
// values/expressions (OXDOT).
subDictCalls []ir.Node
- // Nodes in generic functions that are a conversion from a typeparam/derived
+ // Nodes in the instantiation that are a conversion from a typeparam/derived
// type to a specific interface.
itabConvs []ir.Node
+
+ // Mapping from each shape type that substitutes a type param, to its
+ // type bound (which is also substitued with shapes if it is parameterized)
+ shapeToBound map[*types.Type]*types.Type
+
// For type switches on nonempty interfaces, a map from OTYPE entries of
- // HasTParam type, to the interface type we're switching from.
- // TODO: what if the type we're switching from is a shape type?
+ // HasShape type, to the interface type we're switching from.
type2switchType map[ir.Node]*types.Type
+
+ startSubDict int // Start of dict entries for subdictionaries
+ startItabConv int // Start of dict entries for itab conversions
+ dictLen int // Total number of entries in dictionary
}
-// instInfo is information gathered on an gcshape (or fully concrete)
-// instantiation of a function.
+// instInfo is information gathered on an shape instantiation of a function.
type instInfo struct {
fun *ir.Func // The instantiated function (with body)
dictParam *ir.Name // The node inside fun that refers to the dictionary param
- gf *ir.Name // The associated generic function
- gfInfo *gfInfo
-
- startSubDict int // Start of dict entries for subdictionaries
- startItabConv int // Start of dict entries for itab conversions
- dictLen int // Total number of entries in dictionary
-
- // Map from nodes in instantiated fun (OCALL, OCALLMETHOD, OFUNCINST, and
- // OMETHEXPR) to the associated dictionary entry for a sub-dictionary
- dictEntryMap map[ir.Node]int
+ dictInfo *dictInfo
}
type irgen struct {
@@ -154,13 +158,8 @@ type irgen struct {
dnum int // for generating unique dictionary variables
- // Map from generic function to information about its type params, derived
- // types, and subdictionaries.
- gfInfoMap map[*types.Sym]*gfInfo
-
// Map from a name of function that been instantiated to information about
- // its instantiated function, associated generic function/method, and the
- // mapping from IR nodes to dictionary entries.
+ // its instantiated function (including dictionary format).
instInfoMap map[*types.Sym]*instInfo
// dictionary syms which we need to finish, by writing out any itabconv
@@ -178,10 +177,11 @@ func (g *irgen) later(fn func()) {
}
type delayInfo struct {
- gf *ir.Name
- targs []*types.Type
- sym *types.Sym
- off int
+ gf *ir.Name
+ targs []*types.Type
+ sym *types.Sym
+ off int
+ isMeth bool
}
type typeDelayInfo struct {
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index 2f18a2f231..7c14fcf041 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -1537,7 +1537,7 @@ func (p *noder) mkname(name *syntax.Name) ir.Node {
return mkname(p.name(name))
}
-func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
+func wrapname(pos src.XPos, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
switch x.Op() {
@@ -1547,13 +1547,17 @@ func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
}
fallthrough
case ir.ONAME, ir.ONONAME, ir.OPACK:
- p := ir.NewParenExpr(p.pos(n), x)
+ p := ir.NewParenExpr(pos, x)
p.SetImplicit(true)
return p
}
return x
}
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
+ return wrapname(p.pos(n), x)
+}
+
func (p *noder) setlineno(n syntax.Node) {
if n != nil {
base.Pos = p.pos(n)
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 204d25bce8..48f4368113 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -10,6 +10,7 @@ import (
"bytes"
"fmt"
"go/constant"
+ "internal/buildcfg"
"strings"
"cmd/compile/internal/base"
@@ -78,8 +79,6 @@ type reader struct {
p *pkgReader
- ext *reader
-
dict *readerDict
// TODO(mdempsky): The state below is all specific to reading
@@ -194,15 +193,32 @@ func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
r := pr.newReader(relocPosBase, idx, syncPosBase)
var b *src.PosBase
- filename := r.string()
+ absFilename := r.string()
+ filename := absFilename
+
+ // For build artifact stability, the export data format only
+ // contains the "absolute" filename as returned by objabi.AbsFile.
+ // However, some tests (e.g., test/run.go's asmcheck tests) expect
+ // to see the full, original filename printed out. Re-expanding
+ // "$GOROOT" to buildcfg.GOROOT is a close-enough approximation to
+ // satisfy this.
+ //
+ // TODO(mdempsky): De-duplicate this logic with similar logic in
+ // cmd/link/internal/ld's expandGoroot. However, this will probably
+ // require being more consistent about when we use native vs UNIX
+ // file paths.
+ const dollarGOROOT = "$GOROOT"
+ if strings.HasPrefix(filename, dollarGOROOT) {
+ filename = buildcfg.GOROOT + filename[len(dollarGOROOT):]
+ }
if r.bool() {
- b = src.NewFileBase(filename, filename)
+ b = src.NewFileBase(filename, absFilename)
} else {
pos := r.pos0()
line := r.uint()
col := r.uint()
- b = src.NewLinePragmaBase(pos, filename, filename, line, col)
+ b = src.NewLinePragmaBase(pos, filename, absFilename, line, col)
}
pr.posBases[idx] = b
@@ -568,10 +584,10 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
dict := pr.objDictIdx(sym, idx, implicits, explicits)
r := pr.newReader(relocObj, idx, syncObject1)
- r.ext = pr.newReader(relocObjExt, idx, syncObject1)
+ rext := pr.newReader(relocObjExt, idx, syncObject1)
r.dict = dict
- r.ext.dict = dict
+ rext.dict = dict
sym = r.mangle(sym)
if !sym.IsBlank() && sym.Def != nil {
@@ -608,7 +624,8 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
case objConst:
name := do(ir.OLITERAL, false)
- typ, val := r.value()
+ typ := r.typ()
+ val := FixValue(typ, r.value())
setType(name, typ)
setValue(name, val)
return name
@@ -623,7 +640,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
name.Func = ir.NewFunc(r.pos())
name.Func.Nname = name
- r.ext.funcExt(name)
+ rext.funcExt(name)
return name
case objType:
@@ -632,7 +649,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
setType(name, typ)
// Important: We need to do this before SetUnderlying.
- r.ext.typeExt(name)
+ rext.typeExt(name)
// We need to defer CheckSize until we've called SetUnderlying to
// handle recursive types.
@@ -642,7 +659,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
methods := make([]*types.Field, r.len())
for i := range methods {
- methods[i] = r.method()
+ methods[i] = r.method(rext)
}
if len(methods) != 0 {
typ.Methods().Set(methods)
@@ -655,7 +672,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
case objVar:
name := do(ir.ONAME, false)
setType(name, r.typ())
- r.ext.varExt(name)
+ rext.varExt(name)
return name
}
}
@@ -737,13 +754,7 @@ func (r *reader) typeParamNames() {
}
}
-func (r *reader) value() (*types.Type, constant.Value) {
- r.sync(syncValue)
- typ := r.typ()
- return typ, FixValue(typ, r.rawValue())
-}
-
-func (r *reader) method() *types.Field {
+func (r *reader) method(rext *reader) *types.Field {
r.sync(syncMethod)
pos := r.pos()
pkg, sym := r.selector()
@@ -759,7 +770,7 @@ func (r *reader) method() *types.Field {
name.Func = ir.NewFunc(r.pos())
name.Func.Nname = name
- r.ext.funcExt(name)
+ rext.funcExt(name)
meth := types.NewField(name.Func.Pos(), sym, typ)
meth.Nname = name
@@ -916,6 +927,11 @@ var bodyReader = map[*ir.Func]pkgReaderIndex{}
// constructed.
var todoBodies []*ir.Func
+// todoBodiesDone signals that we constructed all function in todoBodies.
+// This is necessary to prevent reader.addBody adds thing to todoBodies
+// when nested inlining happens.
+var todoBodiesDone = false
+
func (r *reader) addBody(fn *ir.Func) {
pri := pkgReaderIndex{r.p, r.reloc(relocBody), r.dict}
bodyReader[fn] = pri
@@ -926,7 +942,7 @@ func (r *reader) addBody(fn *ir.Func) {
return
}
- if r.curfn == nil {
+ if r.curfn == nil && !todoBodiesDone {
todoBodies = append(todoBodies, fn)
return
}
@@ -1538,7 +1554,8 @@ func (r *reader) expr() (res ir.Node) {
case exprConst:
pos := r.pos()
- typ, val := r.value()
+ typ := r.typ()
+ val := FixValue(typ, r.value())
op := r.op()
orig := r.string()
return typecheck.Expr(OrigConst(pos, typ, val, op, orig))
diff --git a/src/cmd/compile/internal/noder/reader2.go b/src/cmd/compile/internal/noder/reader2.go
index 296d84289c..dcd9a65f40 100644
--- a/src/cmd/compile/internal/noder/reader2.go
+++ b/src/cmd/compile/internal/noder/reader2.go
@@ -7,8 +7,6 @@
package noder
import (
- "go/constant"
-
"cmd/compile/internal/base"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
@@ -18,7 +16,7 @@ import (
type pkgReader2 struct {
pkgDecoder
- check *types2.Checker
+ env *types2.Environment
imports map[string]*types2.Package
posBases []*syntax.PosBase
@@ -26,11 +24,11 @@ type pkgReader2 struct {
typs []types2.Type
}
-func readPackage2(check *types2.Checker, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
+func readPackage2(env *types2.Environment, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
pr := pkgReader2{
pkgDecoder: input,
- check: check,
+ env: env,
imports: imports,
posBases: make([]*syntax.PosBase, input.numElems(relocPosBase)),
@@ -233,7 +231,7 @@ func (r *reader2) doTyp() (res types2.Type) {
obj, targs := r.obj()
name := obj.(*types2.TypeName)
if len(targs) != 0 {
- t, _ := types2.Instantiate(types2.NewEnvironment(r.p.check), name.Type(), targs, false)
+ t, _ := types2.Instantiate(r.p.env, name.Type(), targs, false)
return t
}
return name.Type()
@@ -388,14 +386,15 @@ func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
case objConst:
pos := r.pos()
- typ, val := r.value()
+ typ := r.typ()
+ val := r.value()
return types2.NewConst(pos, objPkg, objName, typ, val)
case objFunc:
pos := r.pos()
tparams := r.typeParamNames()
sig := r.signature(nil)
- sig.SetTParams(tparams)
+ sig.SetTypeParams(tparams)
return types2.NewFunc(pos, objPkg, objName, sig)
case objType:
@@ -428,11 +427,6 @@ func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
return objPkg, objName
}
-func (r *reader2) value() (types2.Type, constant.Value) {
- r.sync(syncValue)
- return r.typ(), r.rawValue()
-}
-
func (pr *pkgReader2) objDictIdx(idx int) *reader2Dict {
r := pr.newReader(relocObjDict, idx, syncObject1)
@@ -481,7 +475,7 @@ func (r *reader2) typeParamNames() []*types2.TypeParam {
pkg, name := r.localIdent()
tname := types2.NewTypeName(pos, pkg, name, nil)
- r.dict.tparams[i] = r.p.check.NewTypeParam(tname, nil)
+ r.dict.tparams[i] = types2.NewTypeParam(tname, nil)
}
for i, bound := range r.dict.bounds {
@@ -498,7 +492,7 @@ func (r *reader2) method() *types2.Func {
rparams := r.typeParamNames()
sig := r.signature(r.param())
- sig.SetRParams(rparams)
+ sig.SetRecvTypeParams(rparams)
_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
return types2.NewFunc(pos, pkg, name, sig)
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index 1c22fc2ac0..7fca674132 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -44,7 +44,6 @@ func infoPrint(format string, a ...interface{}) {
// process.
func (g *irgen) stencil() {
g.instInfoMap = make(map[*types.Sym]*instInfo)
- g.gfInfoMap = make(map[*types.Sym]*gfInfo)
// Instantiate the methods of instantiated generic types that we have seen so far.
g.instantiateMethods()
@@ -106,7 +105,7 @@ func (g *irgen) stencil() {
inst := call.X.(*ir.InstExpr)
nameNode, isMeth := g.getInstNameNode(inst)
targs := typecheck.TypesOf(inst.Targs)
- st := g.getInstantiation(nameNode, targs, isMeth)
+ st := g.getInstantiation(nameNode, targs, isMeth).fun
dictValue, usingSubdict := g.getDictOrSubdict(declInfo, n, nameNode, targs, isMeth)
if infoPrintMode {
dictkind := "Main dictionary"
@@ -165,7 +164,7 @@ func (g *irgen) stencil() {
// to OCALLFUNC and does typecheckaste/assignconvfn.
transformCall(call)
- st := g.getInstantiation(gf, targs, true)
+ st := g.getInstantiation(gf, targs, true).fun
dictValue, usingSubdict := g.getDictOrSubdict(declInfo, n, gf, targs, true)
// We have to be using a subdictionary, since this is
// a generic method call.
@@ -232,6 +231,31 @@ func (g *irgen) stencil() {
}
g.finalizeSyms()
+
+ // All the instantiations and dictionaries have been created. Now go through
+ // each instantiation and transform the various operations that need to make
+ // use of their dictionary.
+ l := len(g.instInfoMap)
+ for _, info := range g.instInfoMap {
+ g.dictPass(info)
+ if doubleCheck {
+ ir.Visit(info.fun, func(n ir.Node) {
+ if n.Op() != ir.OCONVIFACE {
+ return
+ }
+ c := n.(*ir.ConvExpr)
+ if c.X.Type().HasShape() && !c.X.Type().IsInterface() {
+ ir.Dump("BAD FUNCTION", info.fun)
+ ir.Dump("BAD CONVERSION", c)
+ base.Fatalf("converting shape type to interface")
+ }
+ })
+ }
+ if base.Flag.W > 1 {
+ ir.Dump(fmt.Sprintf("\ndictpass %v", info.fun), info.fun)
+ }
+ }
+ assert(l == len(g.instInfoMap))
}
// buildClosure makes a closure to implement x, a OFUNCINST or OMETHEXPR
@@ -274,7 +298,7 @@ func (g *irgen) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
// For method values, the target expects a dictionary and the receiver
// as its first two arguments.
// dictValue is the value to use for the dictionary argument.
- target = g.getInstantiation(gf, targs, rcvrValue != nil)
+ target = g.getInstantiation(gf, targs, rcvrValue != nil).fun
dictValue, usingSubdict = g.getDictOrSubdict(outerInfo, x, gf, targs, rcvrValue != nil)
if infoPrintMode {
dictkind := "Main dictionary"
@@ -321,7 +345,7 @@ func (g *irgen) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
// Remember if value method, so we can detect (*T).M case.
valueMethod = true
}
- target = g.getInstantiation(gf, targs, true)
+ target = g.getInstantiation(gf, targs, true).fun
dictValue, usingSubdict = g.getDictOrSubdict(outerInfo, x, gf, targs, true)
if infoPrintMode {
dictkind := "Main dictionary"
@@ -396,13 +420,19 @@ func (g *irgen) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
if rcvrValue != nil {
rcvrVar = ir.NewNameAt(pos, typecheck.LookupNum(".rcvr", g.dnum))
g.dnum++
- rcvrVar.Class = ir.PAUTO
typed(rcvrValue.Type(), rcvrVar)
- rcvrVar.Curfn = outer
rcvrAssign = ir.NewAssignStmt(pos, rcvrVar, rcvrValue)
rcvrAssign.SetTypecheck(1)
rcvrVar.Defn = rcvrAssign
- outer.Dcl = append(outer.Dcl, rcvrVar)
+ if outer == nil {
+ rcvrVar.Class = ir.PEXTERN
+ g.target.Decls = append(g.target.Decls, rcvrAssign)
+ g.target.Externs = append(g.target.Externs, rcvrVar)
+ } else {
+ rcvrVar.Class = ir.PAUTO
+ rcvrVar.Curfn = outer
+ outer.Dcl = append(outer.Dcl, rcvrVar)
+ }
}
// Build body of closure. This involves just calling the wrapped function directly
@@ -538,13 +568,18 @@ func (g *irgen) getDictOrSubdict(declInfo *instInfo, n ir.Node, nameNode *ir.Nam
var dict ir.Node
usingSubdict := false
if declInfo != nil {
- // Get the dictionary arg via sub-dictionary reference
- entry, ok := declInfo.dictEntryMap[n]
+ entry := -1
+ for i, de := range declInfo.dictInfo.subDictCalls {
+ if n == de {
+ entry = declInfo.dictInfo.startSubDict + i
+ break
+ }
+ }
// If the entry is not found, it may be that this node did not have
// any type args that depend on type params, so we need a main
// dictionary, not a sub-dictionary.
- if ok {
- dict = getDictionaryEntry(n.Pos(), declInfo.dictParam, entry, declInfo.dictLen)
+ if entry >= 0 {
+ dict = getDictionaryEntry(n.Pos(), declInfo.dictParam, entry, declInfo.dictInfo.dictLen)
usingSubdict = true
}
}
@@ -571,7 +606,7 @@ func checkFetchBody(nameNode *ir.Name) {
// getInstantiation gets the instantiantion and dictionary of the function or method nameNode
// with the type arguments shapes. If the instantiated function is not already
// cached, then it calls genericSubst to create the new instantiation.
-func (g *irgen) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMeth bool) *ir.Func {
+func (g *irgen) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMeth bool) *instInfo {
checkFetchBody(nameNode)
// Convert any non-shape type arguments to their shape, so we can reduce the
@@ -580,12 +615,12 @@ func (g *irgen) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMeth
// specified concrete type args.
var s1 []*types.Type
for i, t := range shapes {
- if !t.HasShape() {
+ if !t.IsShape() {
if s1 == nil {
s1 = make([]*types.Type, len(shapes))
copy(s1[0:i], shapes[0:i])
}
- s1[i] = typecheck.Shapify(t)
+ s1[i] = typecheck.Shapify(t, i)
} else if s1 != nil {
s1[i] = shapes[i]
}
@@ -599,28 +634,28 @@ func (g *irgen) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMeth
if info == nil {
// If instantiation doesn't exist yet, create it and add
// to the list of decls.
- gfInfo := g.getGfInfo(nameNode)
info = &instInfo{
- gf: nameNode,
- gfInfo: gfInfo,
- startSubDict: len(shapes) + len(gfInfo.derivedTypes),
- startItabConv: len(shapes) + len(gfInfo.derivedTypes) + len(gfInfo.subDictCalls),
- dictLen: len(shapes) + len(gfInfo.derivedTypes) + len(gfInfo.subDictCalls) + len(gfInfo.itabConvs),
- dictEntryMap: make(map[ir.Node]int),
- }
- // genericSubst fills in info.dictParam and info.dictEntryMap.
+ dictInfo: &dictInfo{},
+ }
+ info.dictInfo.shapeToBound = make(map[*types.Type]*types.Type)
+
+ // genericSubst fills in info.dictParam and info.tparamToBound.
st := g.genericSubst(sym, nameNode, shapes, isMeth, info)
info.fun = st
g.instInfoMap[sym] = info
+
+ // getInstInfo fills in info.dictInfo.
+ g.getInstInfo(st, shapes, info)
+ if base.Flag.W > 1 {
+ ir.Dump(fmt.Sprintf("\nstenciled %v", st), st)
+ }
+
// This ensures that the linker drops duplicates of this instantiation.
// All just works!
st.SetDupok(true)
g.target.Decls = append(g.target.Decls, st)
- if base.Flag.W > 1 {
- ir.Dump(fmt.Sprintf("\nstenciled %v", st), st)
- }
}
- return info.fun
+ return info
}
// Struct containing info needed for doing the substitution as we create the
@@ -641,7 +676,7 @@ type subster struct {
// args shapes. For a method with a generic receiver, it returns an instantiated
// function type where the receiver becomes the first parameter. For either a generic
// method or function, a dictionary parameter is the added as the very first
-// parameter. genericSubst fills in info.dictParam and info.dictEntryMap.
+// parameter. genericSubst fills in info.dictParam and info.tparamToBound.
func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, shapes []*types.Type, isMethod bool, info *instInfo) *ir.Func {
var tparams []*types.Type
if isMethod {
@@ -738,23 +773,13 @@ func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, shapes []*typ
base.Fatalf("defnMap is not empty")
}
- ir.CurFunc = savef
-
- if doubleCheck {
- ir.Visit(newf, func(n ir.Node) {
- if n.Op() != ir.OCONVIFACE {
- return
- }
- c := n.(*ir.ConvExpr)
- if c.X.Type().HasShape() && !c.X.Type().IsInterface() {
- ir.Dump("BAD FUNCTION", newf)
- ir.Dump("BAD CONVERSION", c)
- base.Fatalf("converting shape type to interface")
- }
- })
+ for i, tp := range tparams {
+ info.dictInfo.shapeToBound[shapes[i]] = subst.ts.Typ(tp.Bound())
}
- return newf
+ ir.CurFunc = savef
+
+ return subst.newf
}
// localvar creates a new name node for the specified local variable and enters it
@@ -773,6 +798,7 @@ func (subst *subster) localvar(name *ir.Name) *ir.Name {
m.Func = name.Func
subst.ts.Vars[name] = m
m.SetTypecheck(1)
+ m.DictIndex = name.DictIndex
if name.Defn != nil {
if name.Defn.Op() == ir.ONAME {
// This is a closure variable, so its Defn is the outer
@@ -863,11 +889,11 @@ func getDictionaryEntry(pos src.XPos, dict *ir.Name, i int, size int) ir.Node {
// refers to a type param or a derived type that uses type params). It uses the
// specified dictionary dictParam, rather than the one in info.dictParam.
func getDictionaryType(info *instInfo, dictParam *ir.Name, pos src.XPos, i int) ir.Node {
- if i < 0 || i >= info.startSubDict {
+ if i < 0 || i >= info.dictInfo.startSubDict {
base.Fatalf(fmt.Sprintf("bad dict index %d", i))
}
- r := getDictionaryEntry(pos, info.dictParam, i, info.startSubDict)
+ r := getDictionaryEntry(pos, info.dictParam, i, info.dictInfo.startSubDict)
// change type of retrieved dictionary entry to *byte, which is the
// standard typing of a *runtime._type in the compiler
typed(types.Types[types.TUINT8].PtrTo(), r)
@@ -929,17 +955,6 @@ func (subst *subster) node(n ir.Node) ir.Node {
}
}
- for i, de := range subst.info.gfInfo.subDictCalls {
- if de == x {
- // Remember the dictionary entry associated with this
- // node in the instantiated function
- // TODO: make sure this remains correct with respect to the
- // transformations below.
- subst.info.dictEntryMap[m] = subst.info.startSubDict + i
- break
- }
- }
-
ir.EditChildren(m, edit)
m.SetTypecheck(1)
@@ -980,6 +995,9 @@ func (subst *subster) node(n ir.Node) ir.Node {
case ir.OSEND:
transformSend(m.(*ir.SendStmt))
+ case ir.OSELECT:
+ transformSelect(m.(*ir.SelectStmt))
+
}
}
@@ -1012,36 +1030,9 @@ func (subst *subster) node(n ir.Node) ir.Node {
// we find in the OCALL case below that the method value
// is actually called.
mse := m.(*ir.SelectorExpr)
- if src := mse.X.Type(); src.IsShape() {
- // The only dot on a shape type value are methods.
- if mse.X.Op() == ir.OTYPE {
- // Method expression T.M
- m = subst.g.buildClosure2(subst, m, x)
- // No need for transformDot - buildClosure2 has already
- // transformed to OCALLINTER/ODOTINTER.
- } else {
- // Implement x.M as a conversion-to-bound-interface
- // 1) convert x to the bound interface
- // 2) call M on that interface
- gsrc := x.(*ir.SelectorExpr).X.Type()
- bound := gsrc.Bound()
- dst := bound
- if dst.HasTParam() {
- dst = subst.ts.Typ(dst)
- }
- if src.IsInterface() {
- // If type arg is an interface (unusual case),
- // we do a type assert to the type bound.
- mse.X = assertToBound(subst.info, subst.info.dictParam, m.Pos(), mse.X, bound, dst)
- } else {
- mse.X = convertUsingDictionary(subst.info, subst.info.dictParam, m.Pos(), mse.X, x, dst, gsrc)
- }
- transformDot(mse, false)
- }
- } else {
+ if src := mse.X.Type(); !src.IsShape() {
transformDot(mse, false)
}
- m.SetTypecheck(1)
case ir.OCALL:
call := m.(*ir.CallExpr)
@@ -1096,7 +1087,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
// or channel receive to compute function value.
transformCall(call)
- case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.ODYNAMICDOTTYPE:
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
transformCall(call)
case ir.OFUNCINST:
@@ -1104,6 +1095,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
// in stencil() once we have created & attached the
// instantiation to be called.
+ case ir.OXDOT, ir.ODOTTYPE, ir.ODOTTYPE2:
default:
base.FatalfAt(call.Pos(), fmt.Sprintf("Unexpected op with CALL during stenciling: %v", call.X.Op()))
}
@@ -1135,7 +1127,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
// Copy that closure variable to a local one.
// Note: this allows the dictionary to be captured by child closures.
// See issue 47723.
- ldict := ir.NewNameAt(x.Pos(), subst.info.gf.Sym().Pkg.Lookup(".dict"))
+ ldict := ir.NewNameAt(x.Pos(), newfn.Sym().Pkg.Lookup(".dict"))
typed(types.Types[types.TUINTPTR], ldict)
ldict.Class = ir.PAUTO
ldict.Curfn = newfn
@@ -1147,16 +1139,11 @@ func (subst *subster) node(n ir.Node) ir.Node {
// Create inst info for the instantiated closure. The dict
// param is the closure variable for the dictionary of the
// outer function. Since the dictionary is shared, use the
- // same entries for startSubDict, dictLen, dictEntryMap.
+ // same dictInfo.
cinfo := &instInfo{
- fun: newfn,
- dictParam: ldict,
- gf: subst.info.gf,
- gfInfo: subst.info.gfInfo,
- startSubDict: subst.info.startSubDict,
- startItabConv: subst.info.startItabConv,
- dictLen: subst.info.dictLen,
- dictEntryMap: subst.info.dictEntryMap,
+ fun: newfn,
+ dictParam: ldict,
+ dictInfo: subst.info.dictInfo,
}
subst.g.instInfoMap[newfn.Nname.Sym()] = cinfo
@@ -1175,107 +1162,176 @@ func (subst *subster) node(n ir.Node) ir.Node {
m = ir.UseClosure(newfn.OClosure, subst.g.target)
m.(*ir.ClosureExpr).SetInit(subst.list(x.Init()))
+ }
+ return m
+ }
+
+ return edit(n)
+}
+
+// dictPass takes a function instantiation and does the transformations on the
+// operations that need to make use of the dictionary param.
+func (g *irgen) dictPass(info *instInfo) {
+ savef := ir.CurFunc
+ ir.CurFunc = info.fun
+
+ var edit func(ir.Node) ir.Node
+ edit = func(m ir.Node) ir.Node {
+ ir.EditChildren(m, edit)
+
+ switch m.Op() {
+ case ir.OCLOSURE:
+ newf := m.(*ir.ClosureExpr).Func
+ ir.CurFunc = newf
+ outerinfo := info
+ info = g.instInfoMap[newf.Nname.Sym()]
+
+ body := newf.Body
+ for i, n := range body {
+ body[i] = edit(n)
+ }
+
+ info = outerinfo
+ ir.CurFunc = info.fun
+
+ case ir.OXDOT:
+ mse := m.(*ir.SelectorExpr)
+ src := mse.X.Type()
+ assert(src.IsShape())
+
+ // The only dot on a shape type value are methods.
+ if mse.X.Op() == ir.OTYPE {
+ // Method expression T.M
+ m = g.buildClosure2(info, m)
+ // No need for transformDot - buildClosure2 has already
+ // transformed to OCALLINTER/ODOTINTER.
+ } else {
+ // Implement x.M as a conversion-to-bound-interface
+ // 1) convert x to the bound interface
+ // 2) call M on that interface
+ dst := info.dictInfo.shapeToBound[m.(*ir.SelectorExpr).X.Type()]
+ if src.IsInterface() {
+ // If type arg is an interface (unusual case),
+ // we do a type assert to the type bound.
+ mse.X = assertToBound(info, info.dictParam, m.Pos(), mse.X, dst)
+ } else {
+ mse.X = convertUsingDictionary(info, info.dictParam, m.Pos(), mse.X, m, dst)
+ }
+ transformDot(mse, false)
+ }
+ case ir.OCALL:
+ op := m.(*ir.CallExpr).X.Op()
+ if op != ir.OFUNCINST {
+ assert(op == ir.OMETHVALUE || op == ir.OCLOSURE || op == ir.ODYNAMICDOTTYPE || op == ir.ODYNAMICDOTTYPE2)
+ transformCall(m.(*ir.CallExpr))
+ }
+
case ir.OCONVIFACE:
- x := x.(*ir.ConvExpr)
+ if m.Type().IsEmptyInterface() && m.(*ir.ConvExpr).X.Type().IsEmptyInterface() {
+ // Was T->interface{}, after stenciling it is now interface{}->interface{}.
+ // No longer need the conversion. See issue 48276.
+ m.(*ir.ConvExpr).SetOp(ir.OCONVNOP)
+ break
+ }
+ mce := m.(*ir.ConvExpr)
// Note: x's argument is still typed as a type parameter.
// m's argument now has an instantiated type.
- if x.X.Type().HasTParam() || (x.X.Type().IsInterface() && x.Type().HasTParam()) {
- m = convertUsingDictionary(subst.info, subst.info.dictParam, m.Pos(), m.(*ir.ConvExpr).X, x, m.Type(), x.X.Type())
+ if mce.X.Type().HasShape() || (mce.X.Type().IsInterface() && m.Type().HasShape()) {
+ m = convertUsingDictionary(info, info.dictParam, m.Pos(), m.(*ir.ConvExpr).X, m, m.Type())
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
- if !x.Type().HasTParam() {
+ if !m.Type().HasShape() {
break
}
dt := m.(*ir.TypeAssertExpr)
var rt ir.Node
if dt.Type().IsInterface() || dt.X.Type().IsEmptyInterface() {
- ix := findDictType(subst.info, x.Type())
+ ix := findDictType(info, m.Type())
assert(ix >= 0)
- rt = getDictionaryType(subst.info, subst.info.dictParam, dt.Pos(), ix)
+ rt = getDictionaryType(info, info.dictParam, dt.Pos(), ix)
} else {
// nonempty interface to noninterface. Need an itab.
ix := -1
- for i, ic := range subst.info.gfInfo.itabConvs {
- if ic == x {
- ix = subst.info.startItabConv + i
+ for i, ic := range info.dictInfo.itabConvs {
+ if ic == m {
+ ix = info.dictInfo.startItabConv + i
break
}
}
assert(ix >= 0)
- rt = getDictionaryEntry(dt.Pos(), subst.info.dictParam, ix, subst.info.dictLen)
+ rt = getDictionaryEntry(dt.Pos(), info.dictParam, ix, info.dictInfo.dictLen)
}
op := ir.ODYNAMICDOTTYPE
- if x.Op() == ir.ODOTTYPE2 {
+ if m.Op() == ir.ODOTTYPE2 {
op = ir.ODYNAMICDOTTYPE2
}
m = ir.NewDynamicTypeAssertExpr(dt.Pos(), op, dt.X, rt)
m.SetType(dt.Type())
m.SetTypecheck(1)
case ir.OCASE:
- if _, ok := x.(*ir.CommClause); ok {
+ if _, ok := m.(*ir.CommClause); ok {
// This is not a type switch. TODO: Should we use an OSWITCH case here instead of OCASE?
break
}
- x := x.(*ir.CaseClause)
m := m.(*ir.CaseClause)
- for i, c := range x.List {
- if c.Op() == ir.OTYPE && c.Type().HasTParam() {
+ for i, c := range m.List {
+ if c.Op() == ir.OTYPE && c.Type().HasShape() {
// Use a *runtime._type for the dynamic type.
- ix := findDictType(subst.info, c.Type())
+ ix := findDictType(info, m.List[i].Type())
assert(ix >= 0)
- dt := ir.NewDynamicType(c.Pos(), getDictionaryEntry(c.Pos(), subst.info.dictParam, ix, subst.info.dictLen))
+ dt := ir.NewDynamicType(c.Pos(), getDictionaryEntry(c.Pos(), info.dictParam, ix, info.dictInfo.dictLen))
// For type switch from nonempty interfaces to non-interfaces, we need an itab as well.
if !m.List[i].Type().IsInterface() {
- if _, ok := subst.info.gfInfo.type2switchType[c]; ok {
+ if _, ok := info.dictInfo.type2switchType[m.List[i]]; ok {
// Type switch from nonempty interface. We need a *runtime.itab
// for the dynamic type.
ix := -1
- for i, ic := range subst.info.gfInfo.itabConvs {
- if ic == c {
- ix = subst.info.startItabConv + i
+ for i, ic := range info.dictInfo.itabConvs {
+ if ic == m.List[i] {
+ ix = info.dictInfo.startItabConv + i
break
}
}
assert(ix >= 0)
- dt.ITab = getDictionaryEntry(c.Pos(), subst.info.dictParam, ix, subst.info.dictLen)
+ dt.ITab = getDictionaryEntry(c.Pos(), info.dictParam, ix, info.dictInfo.dictLen)
}
}
typed(m.List[i].Type(), dt)
m.List[i] = dt
}
}
+
}
return m
}
-
- return edit(n)
+ edit(info.fun)
+ ir.CurFunc = savef
}
// findDictType looks for type t in the typeparams or derived types in the generic
// function info.gfInfo. This will indicate the dictionary entry with the
// correct concrete type for the associated instantiated function.
func findDictType(info *instInfo, t *types.Type) int {
- for i, dt := range info.gfInfo.tparams {
+ for i, dt := range info.dictInfo.shapeParams {
if dt == t {
return i
}
}
- for i, dt := range info.gfInfo.derivedTypes {
- if types.Identical(dt, t) {
- return i + len(info.gfInfo.tparams)
+ for i, dt := range info.dictInfo.derivedTypes {
+ if types.IdenticalStrict(dt, t) {
+ return i + len(info.dictInfo.shapeParams)
}
}
return -1
}
-// convertUsingDictionary converts value v from instantiated type src to an interface
-// type dst, by returning a new set of nodes that make use of a dictionary entry. src
-// is the generic (not shape) type, and gn is the original generic node of the
-// CONVIFACE node or XDOT node (for a bound method call) that is causing the
+// convertUsingDictionary converts instantiated value v (type v.Type()) to an interface
+// type dst, by returning a new set of nodes that make use of a dictionary entry. in is the
+// instantiated node of the CONVIFACE node or XDOT node (for a bound method call) that is causing the
// conversion.
-func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v ir.Node, gn ir.Node, dst, src *types.Type) ir.Node {
- assert(src.HasTParam() || src.IsInterface() && gn.Type().HasTParam())
+func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v ir.Node, in ir.Node, dst *types.Type) ir.Node {
+ assert(v.Type().HasShape() || v.Type().IsInterface() && in.Type().HasShape())
assert(dst.IsInterface())
if v.Type().IsInterface() {
@@ -1288,8 +1344,7 @@ func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v
v.SetTypecheck(1)
return v
}
- gdst := gn.Type() // pre-stenciled destination type
- if !gdst.HasTParam() {
+ if !in.Type().HasShape() {
// Regular OCONVIFACE works if the destination isn't parameterized.
v = ir.NewConvExpr(pos, ir.OCONVIFACE, dst, v)
v.SetTypecheck(1)
@@ -1311,7 +1366,7 @@ func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v
types.CalcSize(fn.Type())
call := ir.NewCallExpr(pos, ir.OCALLFUNC, fn, nil)
typed(types.Types[types.TUINT8].PtrTo(), call)
- ix := findDictType(info, gdst)
+ ix := findDictType(info, in.Type())
assert(ix >= 0)
inter := getDictionaryType(info, dictParam, pos, ix)
call.Args = []ir.Node{inter, itab}
@@ -1327,16 +1382,16 @@ func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v
// will be more efficient than converting to an empty interface first
// and then type asserting to dst.
ix := -1
- for i, ic := range info.gfInfo.itabConvs {
- if ic == gn {
- ix = info.startItabConv + i
+ for i, ic := range info.dictInfo.itabConvs {
+ if ic == in {
+ ix = info.dictInfo.startItabConv + i
break
}
}
assert(ix >= 0)
- rt = getDictionaryEntry(pos, dictParam, ix, info.dictLen)
+ rt = getDictionaryEntry(pos, dictParam, ix, info.dictInfo.dictLen)
} else {
- ix := findDictType(info, src)
+ ix := findDictType(info, v.Type())
assert(ix >= 0)
// Load the actual runtime._type of the type parameter from the dictionary.
rt = getDictionaryType(info, dictParam, pos, ix)
@@ -1424,6 +1479,7 @@ func markTypeUsed(t *types.Type, lsym *obj.LSym) {
} else {
// TODO: This is somewhat overkill, we really only need it
// for types that are put into interfaces.
+ // Note: this relocation is also used in cmd/link/internal/ld/dwarf.go
reflectdata.MarkTypeUsedInInterface(t, lsym)
}
}
@@ -1452,8 +1508,6 @@ func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool)
return sym
}
- info := g.getGfInfo(gf)
-
infoPrint("=== Creating dictionary %v\n", sym.Name)
off := 0
// Emit an entry for each targ (concrete type or gcshape).
@@ -1463,8 +1517,12 @@ func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool)
off = objw.SymPtr(lsym, off, s, 0)
markTypeUsed(t, lsym)
}
+
+ instInfo := g.getInstantiation(gf, targs, isMeth)
+ info := instInfo.dictInfo
+
subst := typecheck.Tsubster{
- Tparams: info.tparams,
+ Tparams: info.shapeParams,
Targs: targs,
}
// Emit an entry for each derived type (after substituting targs)
@@ -1479,18 +1537,33 @@ func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool)
for _, n := range info.subDictCalls {
var sym *types.Sym
switch n.Op() {
- case ir.OCALL:
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH:
call := n.(*ir.CallExpr)
- if call.X.Op() == ir.OXDOT {
+ if call.X.Op() == ir.OXDOT || call.X.Op() == ir.ODOTMETH {
var nameNode *ir.Name
se := call.X.(*ir.SelectorExpr)
- if types.IsInterfaceMethod(se.Selection.Type) {
+ if se.X.Type().IsShape() {
// This is a method call enabled by a type bound.
+
+ // We need this extra check for type expressions, which
+ // don't add in the implicit XDOTs.
tmpse := ir.NewSelectorExpr(base.Pos, ir.OXDOT, se.X, se.Sel)
tmpse = typecheck.AddImplicitDots(tmpse)
tparam := tmpse.X.Type()
- assert(tparam.IsTypeParam())
- recvType := targs[tparam.Index()]
+ if !tparam.IsShape() {
+ // The method expression is not
+ // really on a typeparam.
+ break
+ }
+ ix := -1
+ for i, shape := range info.shapeParams {
+ if shape == tparam {
+ ix = i
+ break
+ }
+ }
+ assert(ix >= 0)
+ recvType := targs[ix]
if recvType.IsInterface() || len(recvType.RParams()) == 0 {
// No sub-dictionary entry is
// actually needed, since the
@@ -1509,8 +1582,10 @@ func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool)
} else {
// This is the case of a normal
// method call on a generic type.
- nameNode = call.X.(*ir.SelectorExpr).Selection.Nname.(*ir.Name)
- subtargs := deref(call.X.(*ir.SelectorExpr).X.Type()).RParams()
+ recvType := deref(call.X.(*ir.SelectorExpr).X.Type())
+ genRecvType := recvType.OrigSym().Def.Type()
+ nameNode = typecheck.Lookdot1(call.X, se.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
+ subtargs := recvType.RParams()
s2targs := make([]*types.Type, len(subtargs))
for i, t := range subtargs {
s2targs[i] = subst.Typ(t)
@@ -1543,14 +1618,16 @@ func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool)
}
sym = g.getDictionarySym(nameNode, subtargs, false)
- case ir.OXDOT:
+ case ir.OXDOT, ir.OMETHEXPR, ir.OMETHVALUE:
selExpr := n.(*ir.SelectorExpr)
- subtargs := deref(selExpr.X.Type()).RParams()
+ recvType := deref(selExpr.Selection.Type.Recv().Type)
+ genRecvType := recvType.OrigSym().Def.Type()
+ subtargs := recvType.RParams()
s2targs := make([]*types.Type, len(subtargs))
for i, t := range subtargs {
s2targs[i] = subst.Typ(t)
}
- nameNode := selExpr.Selection.Nname.(*ir.Name)
+ nameNode := typecheck.Lookdot1(selExpr, selExpr.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
sym = g.getDictionarySym(nameNode, s2targs, true)
default:
@@ -1567,11 +1644,13 @@ func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool)
}
}
+ g.instantiateMethods()
delay := &delayInfo{
- gf: gf,
- targs: targs,
- sym: sym,
- off: off,
+ gf: gf,
+ targs: targs,
+ sym: sym,
+ off: off,
+ isMeth: isMeth,
}
g.dictSymsToFinalize = append(g.dictSymsToFinalize, delay)
return sym
@@ -1587,10 +1666,11 @@ func (g *irgen) finalizeSyms() {
infoPrint("=== Finalizing dictionary %s\n", d.sym.Name)
lsym := d.sym.Linksym()
- info := g.getGfInfo(d.gf)
+ instInfo := g.getInstantiation(d.gf, d.targs, d.isMeth)
+ info := instInfo.dictInfo
subst := typecheck.Tsubster{
- Tparams: info.tparams,
+ Tparams: info.shapeParams,
Targs: d.targs,
}
@@ -1598,10 +1678,10 @@ func (g *irgen) finalizeSyms() {
for _, n := range info.itabConvs {
var srctype, dsttype *types.Type
switch n.Op() {
- case ir.OXDOT:
+ case ir.OXDOT, ir.OMETHVALUE:
se := n.(*ir.SelectorExpr)
srctype = subst.Typ(se.X.Type())
- dsttype = subst.Typ(se.X.Type().Bound())
+ dsttype = subst.Typ(info.shapeToBound[se.X.Type()])
found := false
for i, m := range dsttype.AllMethods().Slice() {
if se.Sel == m.Sym {
@@ -1632,6 +1712,9 @@ func (g *irgen) finalizeSyms() {
d.off = objw.Uintptr(lsym, d.off, 0)
infoPrint(" + Unused itab entry for %v\n", srctype)
} else {
+ // Make sure all new fully-instantiated types have
+ // their methods created before generating any itabs.
+ g.instantiateMethods()
itabLsym := reflectdata.ITabLsym(srctype, dsttype)
d.off = objw.SymPtr(lsym, d.off, itabLsym, 0)
infoPrint(" + Itab for (%v,%v)\n", srctype, dsttype)
@@ -1670,65 +1753,50 @@ func (g *irgen) getDictionaryValue(gf *ir.Name, targs []*types.Type, isMeth bool
return np
}
-// hasTParamNodes returns true if the type of any node in targs has a typeparam.
-func hasTParamNodes(targs []ir.Node) bool {
+// hasShapeNodes returns true if the type of any node in targs has a shape.
+func hasShapeNodes(targs []ir.Node) bool {
for _, n := range targs {
- if n.Type().HasTParam() {
+ if n.Type().HasShape() {
return true
}
}
return false
}
-// hasTParamNodes returns true if any type in targs has a typeparam.
-func hasTParamTypes(targs []*types.Type) bool {
+// hasShapeTypes returns true if any type in targs has a shape.
+func hasShapeTypes(targs []*types.Type) bool {
for _, t := range targs {
- if t.HasTParam() {
+ if t.HasShape() {
return true
}
}
return false
}
-// getGfInfo get information for a generic function - type params, derived generic
-// types, and subdictionaries.
-func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
- infop := g.gfInfoMap[gn.Sym()]
- if infop != nil {
- return infop
- }
-
- checkFetchBody(gn)
- var info gfInfo
- gf := gn.Func
- recv := gf.Type().Recv()
- if recv != nil {
- info.tparams = deref(recv.Type).RParams()
- } else {
- tparams := gn.Type().TParams().FieldSlice()
- info.tparams = make([]*types.Type, len(tparams))
- for i, f := range tparams {
- info.tparams[i] = f.Type
- }
- }
+// getInstInfo get the dictionary format for a function instantiation- type params, derived
+// types, and needed subdictionaries and itabs.
+func (g *irgen) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instInfo) {
+ info := instInfo.dictInfo
+ info.shapeParams = shapes
- for _, t := range info.tparams {
- b := t.Bound()
- if b.HasTParam() {
+ for _, t := range info.shapeParams {
+ b := info.shapeToBound[t]
+ if b.HasShape() {
// If a type bound is parameterized (unusual case), then we
// may need its derived type to do a type assert when doing a
// bound call for a type arg that is an interface.
- addType(&info, nil, b)
+ addType(info, nil, b)
}
}
- for _, n := range gf.Dcl {
- addType(&info, n, n.Type())
+ for _, n := range st.Dcl {
+ addType(info, n, n.Type())
+ n.DictIndex = uint16(findDictType(instInfo, n.Type()) + 1)
}
if infoPrintMode {
- fmt.Printf(">>> GfInfo for %v\n", gn)
- for _, t := range info.tparams {
+ fmt.Printf(">>> InstInfo for %v\n", st)
+ for _, t := range info.shapeParams {
fmt.Printf(" Typeparam %v\n", t)
}
}
@@ -1736,14 +1804,14 @@ func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
var visitFunc func(ir.Node)
visitFunc = func(n ir.Node) {
if n.Op() == ir.OFUNCINST && !n.(*ir.InstExpr).Implicit() {
- if hasTParamNodes(n.(*ir.InstExpr).Targs) {
+ if hasShapeNodes(n.(*ir.InstExpr).Targs) {
infoPrint(" Closure&subdictionary required at generic function value %v\n", n.(*ir.InstExpr).X)
info.subDictCalls = append(info.subDictCalls, n)
}
- } else if n.Op() == ir.OXDOT && !n.(*ir.SelectorExpr).Implicit() &&
- n.(*ir.SelectorExpr).Selection != nil &&
+ } else if (n.Op() == ir.OMETHEXPR || n.Op() == ir.OMETHVALUE) && !n.(*ir.SelectorExpr).Implicit() &&
+ !types.IsInterfaceMethod(n.(*ir.SelectorExpr).Selection.Type) &&
len(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) > 0 {
- if hasTParamTypes(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) {
+ if hasShapeTypes(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) {
if n.(*ir.SelectorExpr).X.Op() == ir.OTYPE {
infoPrint(" Closure&subdictionary required at generic meth expr %v\n", n)
} else {
@@ -1754,34 +1822,33 @@ func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
}
if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
n.(*ir.CallExpr).X.(*ir.InstExpr).SetImplicit(true)
- if hasTParamNodes(n.(*ir.CallExpr).X.(*ir.InstExpr).Targs) {
+ if hasShapeNodes(n.(*ir.CallExpr).X.(*ir.InstExpr).Targs) {
infoPrint(" Subdictionary at generic function/method call: %v - %v\n", n.(*ir.CallExpr).X.(*ir.InstExpr).X, n)
info.subDictCalls = append(info.subDictCalls, n)
}
}
- if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OXDOT &&
- n.(*ir.CallExpr).X.(*ir.SelectorExpr).Selection != nil &&
+ if n.Op() == ir.OCALLMETH && n.(*ir.CallExpr).X.Op() == ir.ODOTMETH &&
+ //n.(*ir.CallExpr).X.(*ir.SelectorExpr).Selection != nil &&
len(deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()) > 0 {
n.(*ir.CallExpr).X.(*ir.SelectorExpr).SetImplicit(true)
- if hasTParamTypes(deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()) {
+ if hasShapeTypes(deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()) {
infoPrint(" Subdictionary at generic method call: %v\n", n)
info.subDictCalls = append(info.subDictCalls, n)
}
}
if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OXDOT &&
- n.(*ir.CallExpr).X.(*ir.SelectorExpr).Selection != nil &&
- deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).IsTypeParam() {
+ isShapeDeref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()) {
n.(*ir.CallExpr).X.(*ir.SelectorExpr).SetImplicit(true)
infoPrint(" Optional subdictionary at generic bound call: %v\n", n)
info.subDictCalls = append(info.subDictCalls, n)
}
if n.Op() == ir.OCONVIFACE && n.Type().IsInterface() &&
!n.Type().IsEmptyInterface() &&
- n.(*ir.ConvExpr).X.Type().HasTParam() {
+ n.(*ir.ConvExpr).X.Type().HasShape() {
infoPrint(" Itab for interface conv: %v\n", n)
info.itabConvs = append(info.itabConvs, n)
}
- if n.Op() == ir.OXDOT && n.(*ir.SelectorExpr).X.Type().IsTypeParam() {
+ if n.Op() == ir.OXDOT && n.(*ir.SelectorExpr).X.Type().IsShape() {
infoPrint(" Itab for bound call: %v\n", n)
info.itabConvs = append(info.itabConvs, n)
}
@@ -1793,14 +1860,18 @@ func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
// Visit the closure body and add all relevant entries to the
// dictionary of the outer function (closure will just use
// the dictionary of the outer function).
- for _, n1 := range n.(*ir.ClosureExpr).Func.Body {
+ cfunc := n.(*ir.ClosureExpr).Func
+ for _, n1 := range cfunc.Body {
ir.Visit(n1, visitFunc)
}
+ for _, n := range cfunc.Dcl {
+ n.DictIndex = uint16(findDictType(instInfo, n.Type()) + 1)
+ }
}
if n.Op() == ir.OSWITCH && n.(*ir.SwitchStmt).Tag != nil && n.(*ir.SwitchStmt).Tag.Op() == ir.OTYPESW && !n.(*ir.SwitchStmt).Tag.(*ir.TypeSwitchGuard).X.Type().IsEmptyInterface() {
for _, cc := range n.(*ir.SwitchStmt).Cases {
for _, c := range cc.List {
- if c.Op() == ir.OTYPE && c.Type().HasTParam() {
+ if c.Op() == ir.OTYPE && c.Type().HasShape() {
// Type switch from a non-empty interface - might need an itab.
infoPrint(" Itab for type switch: %v\n", c)
info.itabConvs = append(info.itabConvs, c)
@@ -1812,41 +1883,48 @@ func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
}
}
}
- addType(&info, n, n.Type())
+ addType(info, n, n.Type())
}
- for _, stmt := range gf.Body {
+ for _, stmt := range st.Body {
ir.Visit(stmt, visitFunc)
}
if infoPrintMode {
for _, t := range info.derivedTypes {
fmt.Printf(" Derived type %v\n", t)
}
- fmt.Printf(">>> Done Gfinfo\n")
+ fmt.Printf(">>> Done Instinfo\n")
}
- g.gfInfoMap[gn.Sym()] = &info
- return &info
+ info.startSubDict = len(info.shapeParams) + len(info.derivedTypes)
+ info.startItabConv = len(info.shapeParams) + len(info.derivedTypes) + len(info.subDictCalls)
+ info.dictLen = len(info.shapeParams) + len(info.derivedTypes) + len(info.subDictCalls) + len(info.itabConvs)
+}
+
+// isShapeDeref returns true if t is either a shape or a pointer to a shape. (We
+// can't just use deref(t).IsShape(), since a shape type is a complex type and may
+// have a pointer as part of its shape.)
+func isShapeDeref(t *types.Type) bool {
+ return t.IsShape() || t.IsPtr() && t.Elem().IsShape()
}
// addType adds t to info.derivedTypes if it is parameterized type (which is not
-// just a simple type param) that is different from any existing type on
+// just a simple shape) that is different from any existing type on
// info.derivedTypes.
-func addType(info *gfInfo, n ir.Node, t *types.Type) {
- if t == nil || !t.HasTParam() {
+func addType(info *dictInfo, n ir.Node, t *types.Type) {
+ if t == nil || !t.HasShape() {
return
}
- if t.IsTypeParam() && t.Underlying() == t {
+ if t.IsShape() {
return
}
if t.Kind() == types.TFUNC && n != nil &&
- (t.Recv() != nil ||
- n.Op() == ir.ONAME && n.Name().Class == ir.PFUNC) {
+ (t.Recv() != nil || n.Op() == ir.ONAME && n.Name().Class == ir.PFUNC) {
// Don't use the type of a named generic function or method,
// since that is parameterized by other typeparams.
// (They all come from arguments of a FUNCINST node.)
return
}
- if doubleCheck && !parameterizedBy(t, info.tparams) {
+ if doubleCheck && !parameterizedBy(t, info.shapeParams) {
base.Fatalf("adding type with invalid parameters %+v", t)
}
if t.Kind() == types.TSTRUCT && t.IsFuncArgStruct() {
@@ -1855,7 +1933,7 @@ func addType(info *gfInfo, n ir.Node, t *types.Type) {
}
// Ignore a derived type we've already added.
for _, et := range info.derivedTypes {
- if types.Identical(t, et) {
+ if types.IdenticalStrict(t, et) {
return
}
}
@@ -1881,8 +1959,7 @@ func parameterizedBy1(t *types.Type, params []*types.Type, visited map[*types.Ty
}
return true
}
- switch t.Kind() {
- case types.TTYPEPARAM:
+ if t.IsShape() {
// Check if t is one of the allowed parameters in scope.
for _, p := range params {
if p == t {
@@ -1892,6 +1969,8 @@ func parameterizedBy1(t *types.Type, params []*types.Type, visited map[*types.Ty
// Couldn't find t in the list of allowed parameters.
return false
+ }
+ switch t.Kind() {
case types.TARRAY, types.TPTR, types.TSLICE, types.TCHAN:
return parameterizedBy1(t.Elem(), params, visited)
@@ -1982,17 +2061,17 @@ func startClosure(pos src.XPos, outer *ir.Func, typ *types.Type) (*ir.Func, []*t
}
// assertToBound returns a new node that converts a node rcvr with interface type to
-// the 'dst' interface type. bound is the unsubstituted form of dst.
-func assertToBound(info *instInfo, dictVar *ir.Name, pos src.XPos, rcvr ir.Node, bound, dst *types.Type) ir.Node {
- if bound.HasTParam() {
- ix := findDictType(info, bound)
+// the 'dst' interface type.
+func assertToBound(info *instInfo, dictVar *ir.Name, pos src.XPos, rcvr ir.Node, dst *types.Type) ir.Node {
+ if dst.HasShape() {
+ ix := findDictType(info, dst)
assert(ix >= 0)
rt := getDictionaryType(info, dictVar, pos, ix)
rcvr = ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, rcvr, rt)
typed(dst, rcvr)
} else {
rcvr = ir.NewTypeAssertExpr(pos, rcvr, nil)
- typed(bound, rcvr)
+ typed(dst, rcvr)
}
return rcvr
}
@@ -2004,9 +2083,8 @@ func assertToBound(info *instInfo, dictVar *ir.Name, pos src.XPos, rcvr ir.Node,
//
// The returned closure is fully substituted and has already had any needed
// transformations done.
-func (g *irgen) buildClosure2(subst *subster, m, x ir.Node) ir.Node {
- outer := subst.newf
- info := subst.info
+func (g *irgen) buildClosure2(info *instInfo, m ir.Node) ir.Node {
+ outer := info.fun
pos := m.Pos()
typ := m.Type() // type of the closure
@@ -2029,24 +2107,18 @@ func (g *irgen) buildClosure2(subst *subster, m, x ir.Node) ir.Node {
rcvr := args[0]
args = args[1:]
assert(m.(*ir.SelectorExpr).X.Type().IsShape())
- gsrc := x.(*ir.SelectorExpr).X.Type()
- bound := gsrc.Bound()
- dst := bound
- if dst.HasTParam() {
- dst = subst.ts.Typ(bound)
- }
+ dst := info.dictInfo.shapeToBound[m.(*ir.SelectorExpr).X.Type()]
if m.(*ir.SelectorExpr).X.Type().IsInterface() {
// If type arg is an interface (unusual case), we do a type assert to
// the type bound.
- rcvr = assertToBound(info, dictVar, pos, rcvr, bound, dst)
+ rcvr = assertToBound(info, dictVar, pos, rcvr, dst)
} else {
- rcvr = convertUsingDictionary(info, dictVar, pos, rcvr, x, dst, gsrc)
+ rcvr = convertUsingDictionary(info, dictVar, pos, rcvr, m, dst)
}
- dot := ir.NewSelectorExpr(pos, ir.ODOTINTER, rcvr, x.(*ir.SelectorExpr).Sel)
+ dot := ir.NewSelectorExpr(pos, ir.ODOTINTER, rcvr, m.(*ir.SelectorExpr).Sel)
dot.Selection = typecheck.Lookdot1(dot, dot.Sel, dot.X.Type(), dot.X.Type().AllMethods(), 1)
- // Do a type substitution on the generic bound, in case it is parameterized.
- typed(subst.ts.Typ(x.(*ir.SelectorExpr).Selection.Type), dot)
+ typed(dot.Selection.Type, dot)
innerCall = ir.NewCallExpr(pos, ir.OCALLINTER, dot, args)
t := m.Type()
if t.NumResults() == 0 {
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
index 146761c23f..805a4710c4 100644
--- a/src/cmd/compile/internal/noder/stmt.go
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -37,16 +37,12 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
case *syntax.BlockStmt:
return ir.NewBlockStmt(g.pos(stmt), g.blockStmt(stmt))
case *syntax.ExprStmt:
- return g.expr(stmt.X)
+ return wrapname(g.pos(stmt.X), g.expr(stmt.X))
case *syntax.SendStmt:
n := ir.NewSendStmt(g.pos(stmt), g.expr(stmt.Chan), g.expr(stmt.Value))
- if n.Chan.Type().HasTParam() || n.Value.Type().HasTParam() {
- // Delay transforming the send if the channel or value
- // have a type param.
- n.SetTypecheck(3)
- return n
+ if !delayTransform() {
+ transformSend(n)
}
- transformSend(n)
n.SetTypecheck(1)
return n
case *syntax.DeclStmt:
@@ -66,11 +62,9 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
lhs := g.expr(stmt.Lhs)
n = ir.NewAssignOpStmt(g.pos(stmt), op, lhs, rhs)
}
- if n.X.Typecheck() == 3 {
- n.SetTypecheck(3)
- return n
+ if !delayTransform() {
+ transformAsOp(n)
}
- transformAsOp(n)
n.SetTypecheck(1)
return n
}
@@ -79,49 +73,24 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
rhs := g.exprList(stmt.Rhs)
names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def)
- // We must delay transforming the assign statement if any of the
- // lhs or rhs nodes are also delayed, since transformAssign needs
- // to know the types of the left and right sides in various cases.
- delay := false
- for _, e := range lhs {
- if e.Typecheck() == 3 {
- delay = true
- break
- }
- }
- for _, e := range rhs {
- if e.Typecheck() == 3 {
- delay = true
- break
- }
- }
-
if len(lhs) == 1 && len(rhs) == 1 {
n := ir.NewAssignStmt(g.pos(stmt), lhs[0], rhs[0])
n.Def = initDefn(n, names)
- if delay {
- earlyTransformAssign(n, lhs, rhs)
+ if !delayTransform() {
+ lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y}
+ transformAssign(n, lhs, rhs)
n.X, n.Y = lhs[0], rhs[0]
- n.SetTypecheck(3)
- return n
}
-
- lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y}
- transformAssign(n, lhs, rhs)
- n.X, n.Y = lhs[0], rhs[0]
n.SetTypecheck(1)
return n
}
n := ir.NewAssignListStmt(g.pos(stmt), ir.OAS2, lhs, rhs)
n.Def = initDefn(n, names)
- if delay {
- earlyTransformAssign(n, lhs, rhs)
- n.SetTypecheck(3)
- return n
+ if !delayTransform() {
+ transformAssign(n, n.Lhs, n.Rhs)
}
- transformAssign(n, n.Lhs, n.Rhs)
n.SetTypecheck(1)
return n
@@ -131,21 +100,9 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
return ir.NewGoDeferStmt(g.pos(stmt), g.tokOp(int(stmt.Tok), callOps[:]), g.expr(stmt.Call))
case *syntax.ReturnStmt:
n := ir.NewReturnStmt(g.pos(stmt), g.exprList(stmt.Results))
- for _, e := range n.Results {
- if e.Type().HasTParam() {
- // Delay transforming the return statement if any of the
- // return values have a type param.
- if !ir.HasNamedResults(ir.CurFunc) {
- transformArgs(n)
- // But add CONVIFACE nodes where needed if
- // any of the return values have interface type.
- typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, true)
- }
- n.SetTypecheck(3)
- return n
- }
+ if !delayTransform() {
+ transformReturn(n)
}
- transformReturn(n)
n.SetTypecheck(1)
return n
case *syntax.IfStmt:
@@ -154,7 +111,10 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
return g.forStmt(stmt)
case *syntax.SelectStmt:
n := g.selectStmt(stmt)
- transformSelect(n.(*ir.SelectStmt))
+
+ if !delayTransform() {
+ transformSelect(n.(*ir.SelectStmt))
+ }
n.SetTypecheck(1)
return n
case *syntax.SwitchStmt:
diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go
index b278f3db09..953036eb42 100644
--- a/src/cmd/compile/internal/noder/transform.go
+++ b/src/cmd/compile/internal/noder/transform.go
@@ -157,7 +157,7 @@ func transformCall(n *ir.CallExpr) {
n.SetOp(ir.OCALLFUNC)
}
- typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, false)
+ typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)
if l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {
typecheck.FixMethodCall(n)
}
@@ -195,7 +195,7 @@ func transformCompare(n *ir.BinaryExpr) {
aop, _ := typecheck.Assignop(lt, rt)
if aop != ir.OXXX {
types.CalcSize(lt)
- if lt.HasTParam() || rt.IsInterface() == lt.IsInterface() || lt.Size() >= 1<<16 {
+ if lt.HasShape() || rt.IsInterface() == lt.IsInterface() || lt.Size() >= 1<<16 {
l = ir.NewConvExpr(base.Pos, aop, rt, l)
l.SetTypecheck(1)
}
@@ -365,59 +365,6 @@ assignOK:
}
}
-// Version of transformAssign that can run on generic code that adds CONVIFACE calls
-// as needed (and rewrites multi-value calls).
-func earlyTransformAssign(stmt ir.Node, lhs, rhs []ir.Node) {
- cr := len(rhs)
- if len(rhs) == 1 {
- if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() {
- cr = rtyp.NumFields()
- }
- }
-
- // x,y,z = f()
- _, isCallExpr := rhs[0].(*ir.CallExpr)
- if isCallExpr && cr > len(rhs) {
- stmt := stmt.(*ir.AssignListStmt)
- stmt.SetOp(ir.OAS2FUNC)
- r := rhs[0].(*ir.CallExpr)
- rtyp := r.Type()
-
- mismatched := false
- failed := false
- for i := range lhs {
- result := rtyp.Field(i).Type
-
- if lhs[i].Type() == nil || result == nil {
- failed = true
- } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
- mismatched = true
- }
- }
- if mismatched && !failed {
- typecheck.RewriteMultiValueCall(stmt, r)
- }
- return
- }
-
- // x, ok = y
- if len(lhs) != len(rhs) {
- assert(len(lhs) == 2 && len(rhs) == 1)
- // TODO(danscales): deal with case where x or ok is an interface
- // type. We want to add CONVIFACE now, but that is tricky, because
- // the rhs may be AS2MAPR, AS2RECV, etc. which has two result values,
- // and that is not rewritten until the order phase (o.stmt, as2ok).
- return
- }
-
- // Check for interface conversion on each assignment
- for i, r := range rhs {
- if lhs[i].Type() != nil && lhs[i].Type().IsInterface() {
- rhs[i] = assignconvfn(r, lhs[i].Type())
- }
- }
-}
-
// Corresponds to typecheck.typecheckargs. Really just deals with multi-value calls.
func transformArgs(n ir.InitNode) {
var list []ir.Node
@@ -457,11 +404,15 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return n
}
- if types.Identical(n.Type(), t) {
+ if n.Op() == ir.OPAREN {
+ n = n.(*ir.ParenExpr).X
+ }
+
+ if types.IdenticalStrict(n.Type(), t) {
return n
}
- op, why := typecheck.Assignop(n.Type(), t)
+ op, why := Assignop(n.Type(), t)
if op == ir.OXXX {
base.Fatalf("found illegal assignment %+v -> %+v; %s", n.Type(), t, why)
}
@@ -472,11 +423,35 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return r
}
+func Assignop(src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+ return ir.OXXX, ""
+ }
+
+ // 1. src type is identical to dst (taking shapes into account)
+ if types.Identical(src, dst) {
+ // We already know from assignconvfn above that IdenticalStrict(src,
+ // dst) is false, so the types are not exactly the same and one of
+ // src or dst is a shape. If dst is an interface (which means src is
+ // an interface too), we need a real OCONVIFACE op; otherwise we need a
+ // OCONVNOP. See issue #48453.
+ if dst.IsInterface() {
+ return ir.OCONVIFACE, ""
+ } else {
+ return ir.OCONVNOP, ""
+ }
+ }
+ return typecheck.Assignop1(src, dst)
+}
+
// Corresponds to typecheck.typecheckaste, but we add an extra flag convifaceOnly
// only. If convifaceOnly is true, we only do interface conversion. We use this to do
// early insertion of CONVIFACE nodes during noder2, when the function or args may
// have typeparams.
-func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, convifaceOnly bool) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes) {
var t *types.Type
var i int
@@ -495,7 +470,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
if isddd {
n = nl[i]
ir.SetPos(n)
- if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
+ if n.Type() != nil {
nl[i] = assignconvfn(n, t)
}
return
@@ -505,7 +480,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
for ; i < len(nl); i++ {
n = nl[i]
ir.SetPos(n)
- if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
+ if n.Type() != nil {
nl[i] = assignconvfn(n, t.Elem())
}
}
@@ -514,7 +489,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
n = nl[i]
ir.SetPos(n)
- if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
+ if n.Type() != nil {
nl[i] = assignconvfn(n, t)
}
i++
@@ -536,7 +511,7 @@ func transformReturn(rs *ir.ReturnStmt) {
return
}
- typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl, false)
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl)
}
// transformSelect transforms a select node, creating an assignment list as needed
@@ -554,6 +529,7 @@ func transformSelect(sel *ir.SelectStmt) {
}
selrecv.Def = def
selrecv.SetTypecheck(1)
+ selrecv.SetInit(n.Init())
ncase.Comm = selrecv
}
switch n.Op() {
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
index 5c9aafe490..03fb96c48b 100644
--- a/src/cmd/compile/internal/noder/types.go
+++ b/src/cmd/compile/internal/noder/types.go
@@ -91,7 +91,7 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// since that is the only use of a generic type that doesn't
// involve instantiation. We just translate the named type in the
// normal way below using g.obj().
- if typ.TParams() != nil && typ.TArgs() != nil {
+ if typ.TypeParams() != nil && typ.TypeArgs() != nil {
// typ is an instantiation of a defined (named) generic type.
// This instantiation should also be a defined (named) type.
// types2 gives us the substituted type in t.Underlying()
@@ -101,7 +101,7 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
//
// When converted to types.Type, typ has a unique name,
// based on the names of the type arguments.
- instName := g.instTypeName2(typ.Obj().Name(), typ.TArgs())
+ instName := g.instTypeName2(typ.Obj().Name(), typ.TypeArgs())
s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
if s.Def != nil {
// We have already encountered this instantiation.
@@ -135,7 +135,7 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// non-generic types used to instantiate this type. We'll
// use these when instantiating the methods of the
// instantiated type.
- targs := typ.TArgs()
+ targs := typ.TypeArgs()
rparams := make([]*types.Type, targs.Len())
for i := range rparams {
rparams[i] = g.typ1(targs.At(i))
@@ -272,7 +272,7 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// instantiated types, and for actually generating the methods for instantiated
// types.
func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
- targs2 := typ.TArgs()
+ targs2 := typ.TypeArgs()
targs := make([]*types.Type, targs2.Len())
for i := range targs {
targs[i] = g.typ1(targs2.At(i))
@@ -296,7 +296,7 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
// generic type, so we have to do a substitution to get
// the name/type of the method of the instantiated type,
// using m.Type().RParams() and typ.TArgs()
- inst2 := g.instTypeName2("", typ.TArgs())
+ inst2 := g.instTypeName2("", typ.TypeArgs())
name := meth.Sym().Name
i1 := strings.Index(name, "[")
i2 := strings.Index(name[i1:], "]")
@@ -309,7 +309,7 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
meth2 = newsym.Def.(*ir.Name)
} else {
meth2 = ir.NewNameAt(meth.Pos(), newsym)
- rparams := types2.AsSignature(m.Type()).RParams()
+ rparams := types2.AsSignature(m.Type()).RecvTypeParams()
tparams := make([]*types.Type, rparams.Len())
for i := range tparams {
tparams[i] = g.typ1(rparams.At(i))
@@ -336,7 +336,7 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
}
func (g *irgen) signature(recv *types.Field, sig *types2.Signature) *types.Type {
- tparams2 := sig.TParams()
+ tparams2 := sig.TypeParams()
tparams := make([]*types.Field, tparams2.Len())
for i := range tparams {
tp := tparams2.At(i).Obj()
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index bf63608bf1..3d4650a01f 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -78,12 +78,12 @@ func unified(noders []*noder) {
base.Errorf("cannot use -G and -d=quirksmode together")
}
- newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ newReadImportFunc = func(data string, pkg1 *types.Pkg, env *types2.Environment, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
pr := newPkgDecoder(pkg1.Path, data)
// Read package descriptors for both types2 and compiler backend.
readPackage(newPkgReader(pr), pkg1)
- pkg2 = readPackage2(check, packages, pr)
+ pkg2 = readPackage2(env, packages, pr)
return
}
@@ -106,7 +106,6 @@ func unified(noders []*noder) {
readPackage(localPkgReader, types.LocalPkg)
r := localPkgReader.newReader(relocMeta, privateRootIdx, syncPrivate)
- r.ext = r
r.pkgInit(types.LocalPkg, target)
// Type-check any top-level assignments. We ignore non-assignments
@@ -137,6 +136,7 @@ func unified(noders []*noder) {
}
}
todoBodies = nil
+ todoBodiesDone = true
// Check that nothing snuck past typechecking.
for _, n := range target.Decls {
@@ -190,7 +190,6 @@ func writePkgStub(noders []*noder) string {
{
w := privateRootWriter
- w.ext = w
w.pkgInit(noders)
w.flush()
}
diff --git a/src/cmd/compile/internal/noder/unified_test.go b/src/cmd/compile/internal/noder/unified_test.go
index 7f0bca2332..d7334df282 100644
--- a/src/cmd/compile/internal/noder/unified_test.go
+++ b/src/cmd/compile/internal/noder/unified_test.go
@@ -16,6 +16,7 @@ import (
)
var (
+ flagCmp = flag.Bool("cmp", false, "enable TestUnifiedCompare")
flagPkgs = flag.String("pkgs", "std", "list of packages to compare (ignored in -short mode)")
flagAll = flag.Bool("all", false, "enable testing of all GOOS/GOARCH targets")
flagParallel = flag.Bool("parallel", false, "test GOOS/GOARCH targets in parallel")
@@ -37,7 +38,12 @@ var (
// command's -run flag for subtest matching is recommended for less
// powerful machines.
func TestUnifiedCompare(t *testing.T) {
- t.Skip("TODO(#48265): this fails on testing/internal/testdeps, possibly due to type aliases. Fix before merging to master.")
+ // TODO(mdempsky): Either re-enable or delete. Disabled for now to
+ // avoid impeding others' forward progress.
+ if !*flagCmp {
+ t.Skip("skipping TestUnifiedCompare (use -cmp to enable)")
+ }
+
targets, err := exec.Command("go", "tool", "dist", "list").Output()
if err != nil {
t.Fatal(err)
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index 1405c77161..6a66bea239 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -75,14 +75,6 @@ type writer struct {
encoder
- // For writing out object descriptions, ext points to the extension
- // writer for where we can write the compiler's private extension
- // details for the object.
- //
- // TODO(mdempsky): This is a little hacky, but works easiest with
- // the way things are currently.
- ext *writer
-
// TODO(mdempsky): We should be able to prune localsIdx whenever a
// scope closes, and then maybe we can just use the same map for
// storing the TypeParams too (as their TypeName instead).
@@ -299,16 +291,16 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
// Type aliases can refer to uninstantiated generic types, so we
// might see len(TParams) != 0 && len(TArgs) == 0 here.
// TODO(mdempsky): Revisit after #46477 is resolved.
- assert(typ.TParams().Len() == typ.TArgs().Len() || typ.TArgs().Len() == 0)
+ assert(typ.TypeParams().Len() == typ.TypeArgs().Len() || typ.TypeArgs().Len() == 0)
// TODO(mdempsky): Why do we need to loop here?
orig := typ
- for orig.TArgs() != nil {
+ for orig.TypeArgs() != nil {
orig = orig.Orig()
}
w.code(typeNamed)
- w.obj(orig.Obj(), typ.TArgs())
+ w.obj(orig.Obj(), typ.TypeArgs())
case *types2.TypeParam:
index := func() int {
@@ -345,7 +337,7 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
w.typ(typ.Elem())
case *types2.Signature:
- assert(typ.TParams() == nil)
+ assert(typ.TypeParams() == nil)
w.code(typeSignature)
w.signature(typ)
@@ -405,7 +397,7 @@ func (w *writer) interfaceType(typ *types2.Interface) {
for i := 0; i < typ.NumExplicitMethods(); i++ {
m := typ.ExplicitMethod(i)
sig := m.Type().(*types2.Signature)
- assert(sig.TParams() == nil)
+ assert(sig.TypeParams() == nil)
w.pos(m)
w.selector(m)
@@ -504,21 +496,21 @@ func (pw *pkgWriter) objIdx(obj types2.Object) int {
}
w := pw.newWriter(relocObj, syncObject1)
- w.ext = pw.newWriter(relocObjExt, syncObject1)
+ wext := pw.newWriter(relocObjExt, syncObject1)
wname := pw.newWriter(relocName, syncObject1)
wdict := pw.newWriter(relocObjDict, syncObject1)
pw.globalsIdx[obj] = w.idx // break cycles
- assert(w.ext.idx == w.idx)
+ assert(wext.idx == w.idx)
assert(wname.idx == w.idx)
assert(wdict.idx == w.idx)
w.dict = dict
- w.ext.dict = dict
+ wext.dict = dict
- code := w.doObj(obj)
+ code := w.doObj(wext, obj)
w.flush()
- w.ext.flush()
+ wext.flush()
wname.qualifiedIdent(obj)
wname.code(code)
@@ -530,7 +522,7 @@ func (pw *pkgWriter) objIdx(obj types2.Object) int {
return w.idx
}
-func (w *writer) doObj(obj types2.Object) codeObj {
+func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
if obj.Pkg() != w.p.curpkg {
return objStub
}
@@ -542,7 +534,8 @@ func (w *writer) doObj(obj types2.Object) codeObj {
case *types2.Const:
w.pos(obj)
- w.value(obj.Type(), obj.Val())
+ w.typ(obj.Type())
+ w.value(obj.Val())
return objConst
case *types2.Func:
@@ -551,10 +544,10 @@ func (w *writer) doObj(obj types2.Object) codeObj {
sig := obj.Type().(*types2.Signature)
w.pos(obj)
- w.typeParamNames(sig.TParams())
+ w.typeParamNames(sig.TypeParams())
w.signature(sig)
w.pos(decl)
- w.ext.funcExt(obj)
+ wext.funcExt(obj)
return objFunc
case *types2.TypeName:
@@ -568,16 +561,16 @@ func (w *writer) doObj(obj types2.Object) codeObj {
}
named := obj.Type().(*types2.Named)
- assert(named.TArgs() == nil)
+ assert(named.TypeArgs() == nil)
w.pos(obj)
- w.typeParamNames(named.TParams())
- w.ext.typeExt(obj)
+ w.typeParamNames(named.TypeParams())
+ wext.typeExt(obj)
w.typExpr(decl.Type)
w.len(named.NumMethods())
for i := 0; i < named.NumMethods(); i++ {
- w.method(named.Method(i))
+ w.method(wext, named.Method(i))
}
return objType
@@ -585,7 +578,7 @@ func (w *writer) doObj(obj types2.Object) codeObj {
case *types2.Var:
w.pos(obj)
w.typ(obj.Type())
- w.ext.varExt(obj)
+ wext.varExt(obj)
return objVar
}
}
@@ -598,12 +591,6 @@ func (w *writer) typExpr(expr syntax.Expr) {
w.typ(tv.Type)
}
-func (w *writer) value(typ types2.Type, val constant.Value) {
- w.sync(syncValue)
- w.typ(typ)
- w.rawValue(val)
-}
-
// objDict writes the dictionary needed for reading the given object.
func (w *writer) objDict(obj types2.Object, dict *writerDict) {
// TODO(mdempsky): Split objDict into multiple entries? reader.go
@@ -642,7 +629,7 @@ func (w *writer) objDict(obj types2.Object, dict *writerDict) {
assert(len(dict.funcs) == nfuncs)
}
-func (w *writer) typeParamNames(tparams *types2.TParamList) {
+func (w *writer) typeParamNames(tparams *types2.TypeParamList) {
w.sync(syncTypeParamNames)
ntparams := tparams.Len()
@@ -653,7 +640,7 @@ func (w *writer) typeParamNames(tparams *types2.TParamList) {
}
}
-func (w *writer) method(meth *types2.Func) {
+func (w *writer) method(wext *writer, meth *types2.Func) {
decl, ok := w.p.funDecls[meth]
assert(ok)
sig := meth.Type().(*types2.Signature)
@@ -661,12 +648,12 @@ func (w *writer) method(meth *types2.Func) {
w.sync(syncMethod)
w.pos(meth)
w.selector(meth)
- w.typeParamNames(sig.RParams())
+ w.typeParamNames(sig.RecvTypeParams())
w.param(sig.Recv())
w.signature(sig)
w.pos(decl) // XXX: Hack to workaround linker limitations.
- w.ext.funcExt(meth)
+ wext.funcExt(meth)
}
// qualifiedIdent writes out the name of an object declared at package
@@ -1199,7 +1186,8 @@ func (w *writer) expr(expr syntax.Expr) {
w.code(exprConst)
w.pos(pos)
- w.value(tv.Type, tv.Value)
+ w.typ(tv.Type)
+ w.value(tv.Value)
// TODO(mdempsky): These details are only important for backend
// diagnostics. Explore writing them out separately.
@@ -1677,7 +1665,7 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
obj := w.p.info.Defs[decl.Name].(*types2.Func)
sig := obj.Type().(*types2.Signature)
- if sig.RParams() != nil || sig.TParams() != nil {
+ if sig.RecvTypeParams() != nil || sig.TypeParams() != nil {
break // skip generic functions
}
@@ -1711,7 +1699,7 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
// TODO(mdempsky): Revisit after #46477 is resolved.
if name.IsAlias() {
named, ok := name.Type().(*types2.Named)
- if ok && named.TParams().Len() != 0 && named.TArgs().Len() == 0 {
+ if ok && named.TypeParams().Len() != 0 && named.TypeArgs().Len() == 0 {
break
}
}
@@ -1858,17 +1846,17 @@ func fieldIndex(info *types2.Info, str *types2.Struct, key *syntax.Name) int {
}
// objTypeParams returns the type parameters on the given object.
-func objTypeParams(obj types2.Object) *types2.TParamList {
+func objTypeParams(obj types2.Object) *types2.TypeParamList {
switch obj := obj.(type) {
case *types2.Func:
sig := obj.Type().(*types2.Signature)
if sig.Recv() != nil {
- return sig.RParams()
+ return sig.RecvTypeParams()
}
- return sig.TParams()
+ return sig.TypeParams()
case *types2.TypeName:
if !obj.IsAlias() {
- return obj.Type().(*types2.Named).TParams()
+ return obj.Type().(*types2.Named).TypeParams()
}
}
return nil
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 11226f65a0..e366e06949 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -1829,6 +1829,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpPPC64CALLstatic:
s.Call(v)
+ case ssa.OpPPC64CALLtail:
+ s.TailCall(v)
+
case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
@@ -1980,14 +1983,9 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
ssa.BlockPPC64LT, ssa.BlockPPC64GE,
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 183ede789e..6dbe3cb455 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -7,7 +7,6 @@ package reflectdata
import (
"encoding/binary"
"fmt"
- "internal/buildcfg"
"os"
"sort"
"strings"
@@ -1869,15 +1868,11 @@ func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSy
// Disable tailcall for RegabiArgs for now. The IR does not connect the
// arguments with the OTAILCALL node, and the arguments are not marshaled
// correctly.
- if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs && !generic {
- // generate tail call: adjust pointer receiver and jump to embedded method.
- left := dot.X // skip final .M
- if !left.Type().IsPtr() {
- left = typecheck.NodAddr(left)
- }
- as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
- fn.Body.Append(as)
- fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !generic {
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+ fn.Body.Append(ir.NewTailCallStmt(base.Pos, call))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
var call *ir.CallExpr
@@ -1921,7 +1916,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSy
// Target method uses shaped names.
targs2 := make([]*types.Type, len(targs))
for i, t := range targs {
- targs2[i] = typecheck.Shapify(t)
+ targs2[i] = typecheck.Shapify(t, i)
}
targs = targs2
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 30b6d96a89..1359b6a0c3 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -272,7 +272,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
- ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED:
+ ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED,
+ ssa.OpRISCV64FSGNJD:
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
@@ -329,7 +330,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_REG, Reg: r3}})
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
+ case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
@@ -412,6 +413,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
s.Call(v)
+ case ssa.OpRISCV64CALLtail:
+ s.TailCall(v)
case ssa.OpRISCV64LoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -724,14 +727,9 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index ddc05b36ad..deb6c79006 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -556,6 +556,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter:
s.Call(v)
+ case ssa.OpS390XCALLtail:
+ s.TailCall(v)
case ssa.OpS390XLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -899,17 +901,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
s.Br(s390x.ABR, b.Succs[0].Block())
}
return
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
return
case ssa.BlockRet:
s.Prog(obj.ARET)
return
- case ssa.BlockRetJmp:
- p := s.Prog(s390x.ABR)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
- return
}
// Handle s390x-specific blocks. These blocks all have a
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index 969fd96dbf..28edfd2237 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -66,9 +66,6 @@ func checkFunc(f *Func) {
if !b.Controls[0].Type.IsMemory() {
f.Fatalf("retjmp block %s has non-memory control value %s", b, b.Controls[0].LongString())
}
- if b.Aux == nil {
- f.Fatalf("retjmp block %s has nil Aux field", b)
- }
case BlockPlain:
if len(b.Succs) != 1 {
f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs))
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index cd8eba405d..f87ea5b893 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -10,9 +10,11 @@ import (
"fmt"
"hash/crc32"
"internal/buildcfg"
+ "io"
"log"
"math/rand"
"os"
+ "path/filepath"
"regexp"
"runtime"
"sort"
@@ -59,7 +61,7 @@ func Compile(f *Func) {
printFunc(f)
}
f.HTMLWriter.WritePhase("start", "start")
- if BuildDump != "" && BuildDump == f.Name {
+ if BuildDump[f.Name] {
f.dumpFile("build")
}
if checkEnabled {
@@ -163,25 +165,37 @@ func Compile(f *Func) {
phaseName = ""
}
-// dumpFile creates a file from the phase name and function name
-// Dumping is done to files to avoid buffering huge strings before
-// output.
-func (f *Func) dumpFile(phaseName string) {
+// DumpFileForPhase creates a file from the function name and phase name,
+// warning and returning nil if this is not possible.
+func (f *Func) DumpFileForPhase(phaseName string) io.WriteCloser {
f.dumpFileSeq++
fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, int(f.dumpFileSeq), phaseName)
fname = strings.Replace(fname, " ", "_", -1)
fname = strings.Replace(fname, "/", "_", -1)
fname = strings.Replace(fname, ":", "_", -1)
+ if ssaDir := os.Getenv("GOSSADIR"); ssaDir != "" {
+ fname = filepath.Join(ssaDir, fname)
+ }
+
fi, err := os.Create(fname)
if err != nil {
f.Warnl(src.NoXPos, "Unable to create after-phase dump file %s", fname)
- return
+ return nil
}
+ return fi
+}
- p := stringFuncPrinter{w: fi}
- fprintFunc(p, f)
- fi.Close()
+// dumpFile creates a file from the phase name and function name
+// Dumping is done to files to avoid buffering huge strings before
+// output.
+func (f *Func) dumpFile(phaseName string) {
+ fi := f.DumpFileForPhase(phaseName)
+ if fi != nil {
+ p := stringFuncPrinter{w: fi}
+ fprintFunc(p, f)
+ fi.Close()
+ }
}
type pass struct {
@@ -224,7 +238,9 @@ var IntrinsicsDisable bool
var BuildDebug int
var BuildTest int
var BuildStats int
-var BuildDump string // name of function to dump after initial build of ssa
+var BuildDump map[string]bool = make(map[string]bool) // names of functions to dump after initial build of ssa
+
+var GenssaDump map[string]bool = make(map[string]bool) // names of functions to dump after ssa has been converted to asm
// PhaseOption sets the specified flag in the specified ssa phase,
// returning empty string if this was successful or a string explaining
@@ -248,7 +264,7 @@ func PhaseOption(phase, flag string, val int, valString string) string {
switch phase {
case "", "help":
lastcr := 0
- phasenames := " check, all, build, intrinsics"
+ phasenames := " check, all, build, intrinsics, genssa"
for _, p := range passes {
pn := strings.Replace(p.name, " ", "_", -1)
if len(pn)+len(phasenames)-lastcr > 70 {
@@ -278,6 +294,7 @@ where:
Phase "all" supports flags "time", "mem", and "dump".
Phase "intrinsics" supports flags "on", "off", and "debug".
+Phase "genssa" (assembly generation) supports the flag "dump".
If the "dump" flag is specified, the output is written on a file named
<phase>__<function_name>_<seq>.dump; otherwise it is directed to stdout.
@@ -339,10 +356,11 @@ commas. For example:
case "dump":
alldump = val != 0
if alldump {
- BuildDump = valString
+ BuildDump[valString] = true
+ GenssaDump[valString] = true
}
default:
- return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/all/{time,mem,dump=function_name})", flag, phase)
}
}
@@ -355,7 +373,7 @@ commas. For example:
case "debug":
IntrinsicsDebug = val
default:
- return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/intrinsics/{on,off,debug})", flag, phase)
}
return ""
}
@@ -368,9 +386,18 @@ commas. For example:
case "stats":
BuildStats = val
case "dump":
- BuildDump = valString
+ BuildDump[valString] = true
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/build/{debug,test,stats,dump=function_name})", flag, phase)
+ }
+ return ""
+ }
+ if phase == "genssa" {
+ switch flag {
+ case "dump":
+ GenssaDump[valString] = true
default:
- return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/genssa/dump=function_name)", flag, phase)
}
return ""
}
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
new file mode 100644
index 0000000000..c5a0fe449c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -0,0 +1,213 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "runtime"
+ "sort"
+
+ // "flag"
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "testing"
+)
+
+// Matches lines in genssa output that are marked "isstmt", and the parenthesized plus-prefixed line number is a submatch
+var asmLine *regexp.Regexp = regexp.MustCompile(`^\s[vb][0-9]+\s+[0-9]+\s\(\+([0-9]+)\)`)
+
+// this matches e.g. ` v123456789 000007 (+9876654310) MOVUPS X15, ""..autotmp_2-32(SP)`
+
+// Matches lines in genssa output that describe an inlined file (on a Unix filesystem). Note it expects an unadventurous choice of basename.
+var inlineLine *regexp.Regexp = regexp.MustCompile(`^#\s/.*/[-a-zA-Z0-9_]+\.go:([0-9]+)`)
+
+// this matches e.g. # /pa/inline-dumpxxxx.go:6
+
+var testGoArchFlag = flag.String("arch", "", "run test for specified architecture")
+
+func testGoArch() string {
+ if *testGoArchFlag == "" {
+ return runtime.GOARCH
+ }
+ return *testGoArchFlag
+}
+
+func TestDebugLines(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Windows lacks $HOME which complicates workaround for 'missing $GOPATH'") // $HOME needed to work around #43938
+ }
+ // This test is potentially fragile, the goal is that debugging should step properly through "sayhi"
+ // If the blocks are reordered in a way that changes the statement order but execution flows correctly,
+ // then rearrange the expected numbers. Register abi and not-register-abi also have different sequences,
+ // at least for now.
+
+ switch testGoArch() {
+ case "arm64", "amd64": // register ABI
+ testDebugLines(t, "sayhi.go", "sayhi", []int{8, 9, 10, 11})
+
+ case "arm", "386": // probably not register ABI for a while
+ testDebugLines(t, "sayhi.go", "sayhi", []int{9, 10, 11})
+
+ default: // expect ppc64le and riscv will pick up register ABI soonish, not sure about others
+ t.Skip("skipped for many architectures, also changes w/ register ABI")
+ }
+}
+
+func TestInlineLines(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Windows lacks $HOME which complicates workaround for 'missing $GOPATH'") // $HOME needed to work around #43938
+ }
+ if runtime.GOARCH != "amd64" && *testGoArchFlag == "" {
+ // As of september 2021, works for everything except mips64, but still potentially fragile
+ t.Skip("only runs for amd64 unless -arch explicitly supplied")
+ }
+
+ want := [][]int{{3}, {4, 10}, {4, 10, 16}, {4, 10}, {4, 11, 16}, {4, 11}, {4}, {5, 10}, {5, 10, 16}, {5, 10}, {5, 11, 16}, {5, 11}, {5}}
+ testInlineStack(t, "inline-dump.go", "f", want)
+}
+
+func compileAndDump(t *testing.T, file, function, moreGCFlags string) []byte {
+ testenv.MustHaveGoBuild(t)
+
+ tmpdir, err := ioutil.TempDir("", "debug_lines_test")
+ if err != nil {
+ panic(fmt.Sprintf("Problem creating TempDir, error %v", err))
+ }
+ if testing.Verbose() {
+ fmt.Printf("Preserving temporary directory %s\n", tmpdir)
+ } else {
+ defer os.RemoveAll(tmpdir)
+ }
+
+ source, err := filepath.Abs(filepath.Join("testdata", file))
+ if err != nil {
+ panic(fmt.Sprintf("Could not get abspath of testdata directory and file, %v", err))
+ }
+
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "foo.o", "-gcflags=-d=ssa/genssa/dump="+function+" "+moreGCFlags, source)
+ cmd.Dir = tmpdir
+ cmd.Env = replaceEnv(cmd.Env, "GOSSADIR", tmpdir)
+ cmd.Env = replaceEnv(cmd.Env, "HOME", os.Getenv("HOME")) // workaround for #43938
+ testGoos := "linux" // default to linux
+ if testGoArch() == "wasm" {
+ testGoos = "js"
+ }
+ cmd.Env = replaceEnv(cmd.Env, "GOOS", testGoos)
+ cmd.Env = replaceEnv(cmd.Env, "GOARCH", testGoArch())
+
+ if testing.Verbose() {
+ fmt.Printf("About to run %s\n", asCommandLine("", cmd))
+ }
+
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("error running cmd %s: %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
+ }
+
+ if s := stderr.String(); s != "" {
+ t.Fatalf("Wanted empty stderr, instead got:\n%s\n", s)
+ }
+
+ dumpFile := filepath.Join(tmpdir, function+"_01__genssa.dump")
+ dumpBytes, err := os.ReadFile(dumpFile)
+ if err != nil {
+ t.Fatalf("Could not read dump file %s, err=%v", dumpFile, err)
+ }
+ return dumpBytes
+}
+
+func sortInlineStacks(x [][]int) {
+ sort.Slice(x, func(i, j int) bool {
+ if len(x[i]) != len(x[j]) {
+ return len(x[i]) < len(x[j])
+ }
+ for k := range x[i] {
+ if x[i][k] != x[j][k] {
+ return x[i][k] < x[j][k]
+ }
+ }
+ return false
+ })
+}
+
+// testInlineStack ensures that inlining is described properly in the comments in the dump file
+func testInlineStack(t *testing.T, file, function string, wantStacks [][]int) {
+ // this is an inlining reporting test, not an optimization test. -N makes it less fragile
+ dumpBytes := compileAndDump(t, file, function, "-N")
+ dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
+ dumpLineNum := 0
+ var gotStmts []int
+ var gotStacks [][]int
+ for dump.Scan() {
+ line := dump.Text()
+ dumpLineNum++
+ matches := inlineLine.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ stmt, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err)
+ }
+ if testing.Verbose() {
+ fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line)
+ }
+ gotStmts = append(gotStmts, int(stmt))
+ } else if len(gotStmts) > 0 {
+ gotStacks = append(gotStacks, gotStmts)
+ gotStmts = nil
+ }
+ }
+ if len(gotStmts) > 0 {
+ gotStacks = append(gotStacks, gotStmts)
+ gotStmts = nil
+ }
+ sortInlineStacks(gotStacks)
+ sortInlineStacks(wantStacks)
+ if !reflect.DeepEqual(wantStacks, gotStacks) {
+ t.Errorf("wanted inlines %+v but got %+v", wantStacks, gotStacks)
+ }
+
+}
+
+// testDebugLines compiles testdata/<file> with flags -N -l and -d=ssa/genssa/dump=<function>
+// then verifies that the statement-marked lines in that file are the same as those in wantStmts
+// These files must all be short because this is super-fragile.
+// "go build" is run in a temporary directory that is normally deleted, unless -test.v
+func testDebugLines(t *testing.T, file, function string, wantStmts []int) {
+ dumpBytes := compileAndDump(t, file, function, "-N -l")
+ dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
+ var gotStmts []int
+ dumpLineNum := 0
+ for dump.Scan() {
+ line := dump.Text()
+ dumpLineNum++
+ matches := asmLine.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ stmt, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err)
+ }
+ if testing.Verbose() {
+ fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line)
+ }
+ gotStmts = append(gotStmts, int(stmt))
+ }
+ }
+ if !reflect.DeepEqual(wantStmts, gotStmts) {
+ t.Errorf("wanted stmts %v but got %v", wantStmts, gotStmts)
+ }
+
+}
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index b37d3b8c9c..a0f0e653aa 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -176,7 +176,7 @@ func (c *registerCursor) hasRegs() bool {
type expandState struct {
f *Func
abi1 *abi.ABIConfig
- debug bool
+ debug int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
canSSAType func(*types.Type) bool
regSize int64
sp *Value
@@ -302,7 +302,7 @@ func (x *expandState) Printf(format string, a ...interface{}) (n int, err error)
//
// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64, regOffset Abi1RO) []*LocalSlot {
- if x.debug {
+ if x.debug > 1 {
x.indent(3)
defer x.indent(-3)
x.Printf("rewriteSelect(%s; %s; memOff=%d; regOff=%d)\n", leaf.LongString(), selector.LongString(), offset, regOffset)
@@ -325,7 +325,7 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
} else {
x.f.Fatalf("Unexpected %s type, selector=%s, leaf=%s\n", selector.Op.String(), selector.LongString(), leaf.LongString())
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("---%s, break\n", selector.Op.String())
}
case OpArg:
@@ -335,7 +335,7 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
} else {
x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("---OpArg, break\n")
}
break
@@ -381,7 +381,7 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
// This case removes that StructSelect.
if leafType != selector.Type {
if x.f.Config.SoftFloat && selector.Type.IsFloat() {
- if x.debug {
+ if x.debug > 1 {
x.Printf("---OpLoad, break\n")
}
break // softfloat pass will take care of that
@@ -468,7 +468,7 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
} else {
w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
leaf.copyOf(w)
- if x.debug {
+ if x.debug > 1 {
x.Printf("---new %s\n", w.LongString())
}
}
@@ -687,7 +687,7 @@ func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t
panic(fmt.Errorf("offset %d of requested register %d should be zero, source=%s", offs[loadRegOffset], loadRegOffset, source.LongString()))
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("decompose arg %s has %d locs\n", source.LongString(), len(locs))
}
@@ -836,7 +836,7 @@ func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value,
// pos and b locate the store instruction, source is the "base" of the value input,
// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix string, source, mem *Value, t *types.Type, argOffset, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- if x.debug {
+ if x.debug > 1 {
x.indent(3)
defer x.indent(-3)
x.Printf("storeOneArg(%s; %s; %s; aO=%d; sO=%d; lrO=%d; %s)\n", source.LongString(), mem.String(), t.String(), argOffset, storeOffset, loadRegOffset, storeRc.String())
@@ -877,7 +877,7 @@ func storeTwoLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t1
// stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- if x.debug {
+ if x.debug > 1 {
x.indent(3)
defer x.indent(-3)
x.Printf("storeArgOrLoad(%s; %s; %s; %d; %s)\n", source.LongString(), mem.String(), t.String(), storeOffset, storeRc.String())
@@ -1060,7 +1060,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
dst := x.offsetFrom(b, storeRc.storeDest, storeOffset, types.NewPtr(t))
s = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("-->storeArg returns %s, storeRc=%s\n", s.LongString(), storeRc.String())
}
return s
@@ -1071,18 +1071,23 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
// to account for any parameter stores required.
// Any of the old Args that have their use count fall to zero are marked OpInvalid.
func (x *expandState) rewriteArgs(v *Value, firstArg int) {
- if x.debug {
+ if x.debug > 1 {
x.indent(3)
defer x.indent(-3)
x.Printf("rewriteArgs(%s; %d)\n", v.LongString(), firstArg)
}
// Thread the stores on the memory arg
aux := v.Aux.(*AuxCall)
- pos := v.Pos.WithNotStmt()
m0 := v.MemoryArg()
mem := m0
newArgs := []*Value{}
oldArgs := []*Value{}
+ sp := x.sp
+ if v.Op == OpTailLECall {
+ // For tail call, we unwind the frame before the call so we'll use the caller's
+ // SP.
+ sp = x.f.Entry.NewValue0(src.NoXPos, OpGetCallerSP, x.typs.Uintptr)
+ }
for i, a := range v.Args[firstArg : len(v.Args)-1] { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
oldArgs = append(oldArgs, a)
auxI := int64(i)
@@ -1093,9 +1098,20 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
if a.MemoryArg() != m0 {
x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
}
+ if v.Op == OpTailLECall {
+ // It's common for a tail call passing the same arguments (e.g. method wrapper),
+ // so this would be a self copy. Detect this and optimize it out.
+ a0 := a.Args[0]
+ if a0.Op == OpLocalAddr {
+ n := a0.Aux.(*ir.Name)
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.FixedFrameSize() == aOffset {
+ continue
+ }
+ }
+ }
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
// TODO(register args) this will be more complicated with registers in the picture.
- mem = x.rewriteDereference(v.Block, x.sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, pos)
+ mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, a.Pos)
} else {
var rc registerCursor
var result *[]*Value
@@ -1105,11 +1121,19 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
} else {
aOffset = aux.OffsetOfArg(auxI)
}
- if x.debug {
+ if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
+ // It's common for a tail call passing the same arguments (e.g. method wrapper),
+ // so this would be a self copy. Detect this and optimize it out.
+ n := a.Aux.(*ir.Name)
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.FixedFrameSize() == aOffset {
+ continue
+ }
+ }
+ if x.debug > 1 {
x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
}
- rc.init(aRegs, aux.abiInfo, result, x.sp)
- mem = x.storeArgOrLoad(pos, v.Block, a, mem, aType, aOffset, 0, rc)
+ rc.init(aRegs, aux.abiInfo, result, sp)
+ mem = x.storeArgOrLoad(a.Pos, v.Block, a, mem, aType, aOffset, 0, rc)
}
}
var preArgStore [2]*Value
@@ -1120,16 +1144,31 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
v.AddArg(mem)
for _, a := range oldArgs {
if a.Uses == 0 {
- if x.debug {
- x.Printf("...marking %v unused\n", a.LongString())
- }
- a.invalidateRecursively()
+ x.invalidateRecursively(a)
}
}
return
}
+func (x *expandState) invalidateRecursively(a *Value) {
+ var s string
+ if x.debug > 0 {
+ plus := " "
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ plus = " +"
+ }
+ s = a.String() + plus + a.Pos.LineNumber() + " " + a.LongString()
+ if x.debug > 1 {
+ x.Printf("...marking %v unused\n", s)
+ }
+ }
+ lost := a.invalidateRecursively()
+ if x.debug&1 != 0 && lost { // For odd values of x.debug, do this.
+ x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s)
+ }
+}
+
// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
@@ -1148,7 +1187,7 @@ func expandCalls(f *Func) {
x := &expandState{
f: f,
abi1: f.ABI1,
- debug: f.pass.debug > 0,
+ debug: f.pass.debug,
canSSAType: f.fe.CanSSA,
regSize: f.Config.RegSize,
sp: sp,
@@ -1170,7 +1209,7 @@ func expandCalls(f *Func) {
x.loRo, x.hiRo = 0, 1
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("\nexpandsCalls(%s)\n", f.Name)
}
@@ -1193,7 +1232,7 @@ func expandCalls(f *Func) {
for _, v := range b.Values {
firstArg := 0
switch v.Op {
- case OpStaticLECall:
+ case OpStaticLECall, OpTailLECall:
case OpInterLECall:
firstArg = 1
case OpClosureLECall:
@@ -1210,9 +1249,8 @@ func expandCalls(f *Func) {
m0 := v.MemoryArg()
mem := m0
aux := f.OwnAux
- pos := v.Pos.WithNotStmt()
allResults := []*Value{}
- if x.debug {
+ if x.debug > 1 {
x.Printf("multiValueExit rewriting %s\n", v.LongString())
}
var oldArgs []*Value
@@ -1233,7 +1271,7 @@ func expandCalls(f *Func) {
}
continue
}
- mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos)
+ mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, a.Pos)
} else {
if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
addr := a.Args[0] // This is a self-move. // TODO(register args) do what here for registers?
@@ -1257,13 +1295,13 @@ func expandCalls(f *Func) {
b.SetControl(v)
for _, a := range oldArgs {
if a.Uses == 0 {
- if x.debug {
+ if x.debug > 1 {
x.Printf("...marking %v unused\n", a.LongString())
}
- a.invalidateRecursively()
+ x.invalidateRecursively(a)
}
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("...multiValueExit new result %s\n", v.LongString())
}
x.indent(-3)
@@ -1317,7 +1355,7 @@ func expandCalls(f *Func) {
switch w.Op {
case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
val2Preds[w] += 1
- if x.debug {
+ if x.debug > 1 {
x.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
}
}
@@ -1326,7 +1364,7 @@ func expandCalls(f *Func) {
case OpSelectN:
if _, ok := val2Preds[v]; !ok {
val2Preds[v] = 0
- if x.debug {
+ if x.debug > 1 {
x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
}
}
@@ -1337,7 +1375,7 @@ func expandCalls(f *Func) {
}
if _, ok := val2Preds[v]; !ok {
val2Preds[v] = 0
- if x.debug {
+ if x.debug > 1 {
x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
}
}
@@ -1451,7 +1489,7 @@ func expandCalls(f *Func) {
if dupe == nil {
x.commonSelectors[sk] = v
} else if x.sdom.IsAncestorEq(dupe.Block, v.Block) {
- if x.debug {
+ if x.debug > 1 {
x.Printf("Duplicate, make %s copy of %s\n", v, dupe)
}
v.copyOf(dupe)
@@ -1467,12 +1505,12 @@ func expandCalls(f *Func) {
// Rewrite selectors.
for i, v := range allOrdered {
- if x.debug {
+ if x.debug > 1 {
b := v.Block
x.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
}
if v.Uses == 0 {
- v.invalidateRecursively()
+ x.invalidateRecursively(v)
continue
}
if v.Op == OpCopy {
@@ -1512,6 +1550,10 @@ func expandCalls(f *Func) {
v.Op = OpStaticCall
rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
v.Type = types.NewResults(append(rts, types.TypeMem))
+ case OpTailLECall:
+ v.Op = OpTailCall
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
case OpClosureLECall:
v.Op = OpClosureCall
rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
@@ -1583,7 +1625,7 @@ func expandCalls(f *Func) {
v.SetArg(i, aa)
for a.Uses == 0 {
b := a.Args[0]
- a.invalidateRecursively()
+ x.invalidateRecursively(a)
a = b
}
}
@@ -1619,7 +1661,7 @@ func expandCalls(f *Func) {
// rewriteArgToMemOrRegs converts OpArg v in-place into the register version of v,
// if that is appropriate.
func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value {
- if x.debug {
+ if x.debug > 1 {
x.indent(3)
defer x.indent(-3)
x.Printf("rewriteArgToMemOrRegs(%s)\n", v.LongString())
@@ -1650,7 +1692,7 @@ func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value {
default:
panic(badVal("Saw unexpanded OpArg", v))
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("-->%s\n", v.LongString())
}
return v
@@ -1660,7 +1702,7 @@ func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value {
// or rewrites it into a copy of the appropriate OpArgXXX. The actual OpArgXXX is determined by combining baseArg (an OpArg)
// with offset, regOffset, and t to determine which portion of it to reference (either all or a part, in memory or in registers).
func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64, regOffset Abi1RO, t *types.Type, pos src.XPos) *Value {
- if x.debug {
+ if x.debug > 1 {
x.indent(3)
defer x.indent(-3)
x.Printf("newArgToMemOrRegs(base=%s; toReplace=%s; t=%s; memOff=%d; regOff=%d)\n", baseArg.String(), toReplace.LongString(), t.String(), offset, regOffset)
@@ -1696,7 +1738,7 @@ func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64,
if toReplace != nil {
toReplace.copyOf(w)
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("-->%s\n", w.LongString())
}
return w
@@ -1727,7 +1769,7 @@ func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64,
if toReplace != nil {
toReplace.copyOf(w)
}
- if x.debug {
+ if x.debug > 1 {
x.Printf("-->%s\n", w.LongString())
}
return w
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index fac876c23e..7728a395e0 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -43,7 +43,7 @@ type Func struct {
logfiles map[string]writeSyncer
HTMLWriter *HTMLWriter // html writer, for debugging
DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
- PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false.
+ PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false. There's an odd dependence on this in debug.go for method logf.
ruleMatches map[string]int // number of times countRule was called during compilation for any given string
ABI0 *abi.ABIConfig // A copy, for no-sync access
ABI1 *abi.ABIConfig // A copy, for no-sync access
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 199b73c42f..7bdebedafe 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -317,6 +317,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// Miscellaneous
(IsNonNil p) => (SETNE (TESTL p p))
diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go
index 91f33c8374..3512d60865 100644
--- a/src/cmd/compile/internal/ssa/gen/386Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/386Ops.go
@@ -455,6 +455,7 @@ func init() {
},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 5b127c98e7..bfed3bc7fd 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -408,6 +408,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// Lowering conditional moves
// If the condition is a SETxx, we can just run a CMOV from the comparison that was
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 52ea7ac5e0..51cbf5f78a 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -765,6 +765,7 @@ func init() {
// With a register ABI, the actual register info for these instructions (i.e., what is used in regalloc) is augmented with per-call-site bindings of additional arguments to specific in and out registers.
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index bcacbafe3a..2bc58a3c47 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -351,6 +351,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// checks
(NilCheck ...) => (LoweredNilCheck ...)
@@ -497,9 +498,9 @@
(XOR x (MOVWconst [c])) => (XORconst [c] x)
(BIC x (MOVWconst [c])) => (BICconst [c] x)
-(SLL x (MOVWconst [c])) => (SLLconst x [c&31]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=32)
-(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
-(SRA x (MOVWconst [c])) => (SRAconst x [c&31])
+(SLL x (MOVWconst [c])) && 0 <= c && c < 32 => (SLLconst x [c])
+(SRL x (MOVWconst [c])) && 0 <= c && c < 32 => (SRLconst x [c])
+(SRA x (MOVWconst [c])) && 0 <= c && c < 32 => (SRAconst x [c])
(CMP x (MOVWconst [c])) => (CMPconst [c] x)
(CMP (MOVWconst [c]) x) => (InvertFlags (CMPconst [c] x))
@@ -507,6 +508,8 @@
(TST x (MOVWconst [c])) => (TSTconst [c] x)
(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
+(SRR x (MOVWconst [c])) => (SRRconst x [c&31])
+
// Canonicalize the order of arguments to comparisons - helps with CSE.
(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
@@ -1072,60 +1075,60 @@
(CMNshiftRL x (MOVWconst [c]) [d]) => (CMNconst x [int32(uint32(c)>>uint64(d))])
(CMNshiftRA x (MOVWconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
-(ADDshiftLLreg x y (MOVWconst [c])) => (ADDshiftLL x y [c])
-(ADDshiftRLreg x y (MOVWconst [c])) => (ADDshiftRL x y [c])
-(ADDshiftRAreg x y (MOVWconst [c])) => (ADDshiftRA x y [c])
-(ADCshiftLLreg x y (MOVWconst [c]) flags) => (ADCshiftLL x y [c] flags)
-(ADCshiftRLreg x y (MOVWconst [c]) flags) => (ADCshiftRL x y [c] flags)
-(ADCshiftRAreg x y (MOVWconst [c]) flags) => (ADCshiftRA x y [c] flags)
-(ADDSshiftLLreg x y (MOVWconst [c])) => (ADDSshiftLL x y [c])
-(ADDSshiftRLreg x y (MOVWconst [c])) => (ADDSshiftRL x y [c])
-(ADDSshiftRAreg x y (MOVWconst [c])) => (ADDSshiftRA x y [c])
-(SUBshiftLLreg x y (MOVWconst [c])) => (SUBshiftLL x y [c])
-(SUBshiftRLreg x y (MOVWconst [c])) => (SUBshiftRL x y [c])
-(SUBshiftRAreg x y (MOVWconst [c])) => (SUBshiftRA x y [c])
-(SBCshiftLLreg x y (MOVWconst [c]) flags) => (SBCshiftLL x y [c] flags)
-(SBCshiftRLreg x y (MOVWconst [c]) flags) => (SBCshiftRL x y [c] flags)
-(SBCshiftRAreg x y (MOVWconst [c]) flags) => (SBCshiftRA x y [c] flags)
-(SUBSshiftLLreg x y (MOVWconst [c])) => (SUBSshiftLL x y [c])
-(SUBSshiftRLreg x y (MOVWconst [c])) => (SUBSshiftRL x y [c])
-(SUBSshiftRAreg x y (MOVWconst [c])) => (SUBSshiftRA x y [c])
-(RSBshiftLLreg x y (MOVWconst [c])) => (RSBshiftLL x y [c])
-(RSBshiftRLreg x y (MOVWconst [c])) => (RSBshiftRL x y [c])
-(RSBshiftRAreg x y (MOVWconst [c])) => (RSBshiftRA x y [c])
-(RSCshiftLLreg x y (MOVWconst [c]) flags) => (RSCshiftLL x y [c] flags)
-(RSCshiftRLreg x y (MOVWconst [c]) flags) => (RSCshiftRL x y [c] flags)
-(RSCshiftRAreg x y (MOVWconst [c]) flags) => (RSCshiftRA x y [c] flags)
-(RSBSshiftLLreg x y (MOVWconst [c])) => (RSBSshiftLL x y [c])
-(RSBSshiftRLreg x y (MOVWconst [c])) => (RSBSshiftRL x y [c])
-(RSBSshiftRAreg x y (MOVWconst [c])) => (RSBSshiftRA x y [c])
-(ANDshiftLLreg x y (MOVWconst [c])) => (ANDshiftLL x y [c])
-(ANDshiftRLreg x y (MOVWconst [c])) => (ANDshiftRL x y [c])
-(ANDshiftRAreg x y (MOVWconst [c])) => (ANDshiftRA x y [c])
-(ORshiftLLreg x y (MOVWconst [c])) => (ORshiftLL x y [c])
-(ORshiftRLreg x y (MOVWconst [c])) => (ORshiftRL x y [c])
-(ORshiftRAreg x y (MOVWconst [c])) => (ORshiftRA x y [c])
-(XORshiftLLreg x y (MOVWconst [c])) => (XORshiftLL x y [c])
-(XORshiftRLreg x y (MOVWconst [c])) => (XORshiftRL x y [c])
-(XORshiftRAreg x y (MOVWconst [c])) => (XORshiftRA x y [c])
-(BICshiftLLreg x y (MOVWconst [c])) => (BICshiftLL x y [c])
-(BICshiftRLreg x y (MOVWconst [c])) => (BICshiftRL x y [c])
-(BICshiftRAreg x y (MOVWconst [c])) => (BICshiftRA x y [c])
-(MVNshiftLLreg x (MOVWconst [c])) => (MVNshiftLL x [c])
-(MVNshiftRLreg x (MOVWconst [c])) => (MVNshiftRL x [c])
-(MVNshiftRAreg x (MOVWconst [c])) => (MVNshiftRA x [c])
-(CMPshiftLLreg x y (MOVWconst [c])) => (CMPshiftLL x y [c])
-(CMPshiftRLreg x y (MOVWconst [c])) => (CMPshiftRL x y [c])
-(CMPshiftRAreg x y (MOVWconst [c])) => (CMPshiftRA x y [c])
-(TSTshiftLLreg x y (MOVWconst [c])) => (TSTshiftLL x y [c])
-(TSTshiftRLreg x y (MOVWconst [c])) => (TSTshiftRL x y [c])
-(TSTshiftRAreg x y (MOVWconst [c])) => (TSTshiftRA x y [c])
-(TEQshiftLLreg x y (MOVWconst [c])) => (TEQshiftLL x y [c])
-(TEQshiftRLreg x y (MOVWconst [c])) => (TEQshiftRL x y [c])
-(TEQshiftRAreg x y (MOVWconst [c])) => (TEQshiftRA x y [c])
-(CMNshiftLLreg x y (MOVWconst [c])) => (CMNshiftLL x y [c])
-(CMNshiftRLreg x y (MOVWconst [c])) => (CMNshiftRL x y [c])
-(CMNshiftRAreg x y (MOVWconst [c])) => (CMNshiftRA x y [c])
+(ADDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRA x y [c])
+(TSTshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftLL x y [c])
+(TSTshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRL x y [c])
+(TSTshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRA x y [c])
+(TEQshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftLL x y [c])
+(TEQshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRL x y [c])
+(TEQshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRA x y [c])
+(CMNshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftLL x y [c])
+(CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c])
+(CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c])
// Generate rotates
(ADDshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
@@ -1135,7 +1138,6 @@
( ORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
(XORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
-(RotateLeft32 x (MOVWconst [c])) => (SRRconst [-c&31] x)
(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
(RotateLeft32 x y) => (SRR x (RSBconst [0] <y.Type> y))
@@ -1237,24 +1239,24 @@
(AND x (MVN y)) => (BIC x y)
// simplification with *shift ops
-(SUBshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(SUBshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(SUBshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVWconst [0])
-(RSBshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(RSBshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(RSBshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVWconst [0])
-(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
-(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
-(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
-(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
-(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
-(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
-(XORshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(XORshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(XORshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVWconst [0])
-(BICshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(BICshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVWconst [0])
-(BICshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVWconst [0])
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
(AND x (MVNshiftLL y [c])) => (BICshiftLL x y [c])
(AND x (MVNshiftRL y [c])) => (BICshiftRL x y [c])
(AND x (MVNshiftRA y [c])) => (BICshiftRA x y [c])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index ca9d4a4f01..4b66883f26 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -503,6 +503,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// checks
(NilCheck ...) => (LoweredNilCheck ...)
@@ -1174,6 +1175,9 @@
(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
+(ROR x (MOVDconst [c])) => (RORconst x [c&63])
+(RORW x (MOVDconst [c])) => (RORWconst x [c&31])
+
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
@@ -1359,6 +1363,7 @@
(XOR x (MVN y)) => (EON x y)
(OR x (MVN y)) => (ORN x y)
(MVN (XOR x y)) => (EON x y)
+(NEG (NEG x)) => x
(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
@@ -1596,6 +1601,7 @@
(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
@@ -1605,21 +1611,27 @@
(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
+(OR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO x0 y [c])
(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
@@ -1632,6 +1644,7 @@
(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
+(TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
// prefer *const ops to *shift ops
(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
@@ -1640,12 +1653,15 @@
(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftRO (MOVDconst [c]) x [d]) => (ORconst [c] (RORconst <x.Type> x [d]))
(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
@@ -1655,11 +1671,13 @@
(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
// constant folding in *shift ops
(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
@@ -1672,21 +1690,27 @@
(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(ORshiftRO x (MOVDconst [c]) [d]) => (ORconst x [rotateRight64(c, d)])
(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
+(ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst x [^rotateRight64(c, d)])
(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
@@ -1696,29 +1720,36 @@
(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+(TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
// simplification with *shift ops
-(SUBshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
-(SUBshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
-(SUBshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
-(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
-(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
-(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
-(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
-(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
-(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
-(XORshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
-(XORshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
-(XORshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
-(BICshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
-(BICshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
-(BICshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
-(EONshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [-1])
-(EONshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [-1])
-(EONshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [-1])
-(ORNshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [-1])
-(ORNshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [-1])
-(ORNshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ANDshiftRO y:(RORconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftRO y:(RORconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
+(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
// Generate rotates with const shift
(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
@@ -1824,6 +1855,7 @@
// sbfiz
// (x << lc) >> rc
(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// int64(x << lc)
(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
@@ -1835,6 +1867,7 @@
// sbfx
// (x << lc) >> rc
(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// int64(x) >> rc
(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
@@ -1851,42 +1884,43 @@
=> (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
// ubfiz
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// uint64(x) << lc
+(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+// uint64(x << lc)
+(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
+
+// merge ANDconst into ubfiz
// (x & ac) << sc
(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
-(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
-(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
// (x << sc) & ac
(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
-(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
- => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
-(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
- => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
-(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
- => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
-// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// ubfx
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// uint64(x) >> rc
+(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
+// uint64(x >> rc)
+(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
+(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
+(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
+// merge ANDconst into ubfx
// (x >> sc) & ac
(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
-(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
-(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
// (x & ac) >> sc
(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
-(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
- => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
-(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
- => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
-(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
- => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
-// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// merge ubfx and zerso-extension into ubfx
(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index acfb2880c2..e3ebb6e1af 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -302,6 +302,7 @@ func init() {
{name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt), auxInt should be in the range 0 to 63.
{name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
{name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRO", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
{name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt), auxInt should be in the range 0 to 63.
{name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
{name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63.
@@ -314,21 +315,27 @@ func init() {
{name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<<auxInt), auxInt should be in the range 0 to 63.
{name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
{name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRO", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
{name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<<auxInt, auxInt should be in the range 0 to 63.
{name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
{name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRO", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63.
{name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<<auxInt, auxInt should be in the range 0 to 63.
{name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
{name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRO", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63.
{name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<<auxInt), auxInt should be in the range 0 to 63.
{name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
{name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftRO", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
{name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<<auxInt), auxInt should be in the range 0 to 63.
{name: "EONshiftRL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
{name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftRO", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
{name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<<auxInt), auxInt should be in the range 0 to 63.
{name: "ORNshiftRL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
{name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRO", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
{name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt, auxInt should be in the range 0 to 63.
{name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
{name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
@@ -338,6 +345,7 @@ func init() {
{name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0, auxInt should be in the range 0 to 63.
{name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63.
{name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRO", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1 ROR auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
// bitfield ops
// for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
@@ -484,6 +492,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
index d1f86039a3..75ba769724 100644
--- a/src/cmd/compile/internal/ssa/gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -228,14 +228,15 @@ func init() {
// shifts
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256
- {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, 0 <= auxInt < 32
{name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256
- {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned, 0 <= auxInt < 32
{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256
- {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, 0 <= auxInt < 32
{name: "SRR", argLength: 2, reg: gp21}, // arg0 right rotate by arg1 bits
- {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits
+ {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits, 0 <= auxInt < 32
+ // auxInt for all of these satisfy 0 <= auxInt < 32
{name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
{name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
{name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
@@ -431,6 +432,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 4ac9668ea9..639dda4b07 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -334,6 +334,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// atomic intrinsics
(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index fd04a6c3a8..292ff2fc79 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -379,6 +379,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// atomic intrinsics
(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
index a18cd4289d..54c0741efd 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
@@ -276,6 +276,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
index 8177c7e2d1..5f73e9f2dc 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPSOps.go
+++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
@@ -258,6 +258,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index ce4b324b5e..411bb8d29d 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -670,6 +670,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// Miscellaneous
(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index d7d8a33a0a..a14d9cd490 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -429,6 +429,7 @@ func init() {
{name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index b711550186..4290d1b85c 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -96,6 +96,10 @@
(Sqrt ...) => (FSQRTD ...)
(Sqrt32 ...) => (FSQRTS ...)
+(Copysign ...) => (FSGNJD ...)
+
+(Abs ...) => (FABSD ...)
+
(FMA ...) => (FMADDD ...)
// Sign and zero extension.
@@ -542,6 +546,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// Atomic Intrinsics
(AtomicLoad8 ...) => (LoweredAtomicLoad8 ...)
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
index de189e4c60..8a3fdf75f7 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -241,6 +241,7 @@ func init() {
// Calls
{name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: call, aux: "CallOff", call: true}, // tail call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
@@ -432,6 +433,8 @@ func init() {
{name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) - arg2
{name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0)
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0)
+ {name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0
{name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
{name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
{name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 88762f7045..b3928c6a1e 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -434,6 +434,7 @@
(StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) => (CALLclosure ...)
(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
// Miscellaneous
(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index 00fce8e0e5..9b6ac2bfb6 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -480,6 +480,7 @@ func init() {
{name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
index 7ad3d1c72e..9e683b116c 100644
--- a/src/cmd/compile/internal/ssa/gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -307,6 +307,7 @@
(StaticCall ...) => (LoweredStaticCall ...)
(ClosureCall ...) => (LoweredClosureCall ...)
(InterCall ...) => (LoweredInterCall ...)
+(TailCall ...) => (LoweredTailCall ...)
// Miscellaneous
(Convert ...) => (LoweredConvert ...)
diff --git a/src/cmd/compile/internal/ssa/gen/WasmOps.go b/src/cmd/compile/internal/ssa/gen/WasmOps.go
index 7f7ae5e837..0d7327109a 100644
--- a/src/cmd/compile/internal/ssa/gen/WasmOps.go
+++ b/src/cmd/compile/internal/ssa/gen/WasmOps.go
@@ -124,6 +124,7 @@ func init() {
var WasmOps = []opData{
{name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "LoweredTailCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index c183aedf2d..984552900f 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -106,7 +106,7 @@ var genericOps = []opData{
// For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
// Shift amounts are considered unsigned.
- // If arg1 is known to be less than the number of bits in arg0,
+ // If arg1 is known to be nonnegative and less than the number of bits in arg0,
// then auxInt may be set to 1.
// This enables better code generation on some platforms.
{name: "Lsh8x8", argLength: 2, aux: "Bool"}, // arg0 << arg1
@@ -417,10 +417,12 @@ var genericOps = []opData{
{name: "ClosureCall", argLength: -1, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
{name: "StaticCall", argLength: -1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
{name: "InterCall", argLength: -1, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1..argN-1 are register inputs, argN=memory, auxint=arg size. Returns Result of register results, plus memory.
+ {name: "TailCall", argLength: -1, aux: "CallOff", call: true}, // tail call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
{name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
{name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
{name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "TailLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static tail call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
// Conversions: signed extensions, zero (unsigned) extensions, truncations
{name: "SignExt8to16", argLength: 1, typ: "Int16"},
@@ -638,7 +640,7 @@ var genericBlocks = []blockData{
{name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
{name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
{name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
- {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is memory result, jumps to b.Aux.(*gc.Sym)
+ {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call
{name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
// transient block state used for dead code removal
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 4d191199fb..d9a78b3962 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -903,15 +903,12 @@ func (w *HTMLWriter) WriteAST(phase string, buf *bytes.Buffer) {
if strings.HasPrefix(l, "buildssa") {
escaped = fmt.Sprintf("<b>%v</b>", l)
} else {
- // Parse the line number from the format l(123).
- idx := strings.Index(l, " l(")
- if idx != -1 {
- subl := l[idx+3:]
- idxEnd := strings.Index(subl, ")")
- if idxEnd != -1 {
- if _, err := strconv.Atoi(subl[:idxEnd]); err == nil {
- lineNo = subl[:idxEnd]
- }
+ // Parse the line number from the format file:line:col.
+ // See the implementation in ir/fmt.go:dumpNodeHeader.
+ sl := strings.Split(l, ":")
+ if len(sl) >= 3 {
+ if _, err := strconv.Atoi(sl[len(sl)-2]); err == nil {
+ lineNo = sl[len(sl)-2]
}
}
escaped = html.EscapeString(l)
@@ -1221,7 +1218,7 @@ func (p htmlFuncPrinter) startBlock(b *Block, reachable bool) {
}
}
-func (p htmlFuncPrinter) endBlock(b *Block) {
+func (p htmlFuncPrinter) endBlock(b *Block, reachable bool) {
if len(b.Values) > 0 { // end list of values
io.WriteString(p.w, "</ul>")
io.WriteString(p.w, "</li>")
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index 252c47cdeb..b575febd72 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -91,8 +91,8 @@ func (t LocPair) String() string {
type LocResults []Location
func (t LocResults) String() string {
- s := "<"
- a := ""
+ s := ""
+ a := "<"
for _, r := range t {
a += s
s = ","
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 573559db70..eb7e4b91bb 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -515,6 +515,7 @@ const (
Op386DUFFZERO
Op386REPSTOSL
Op386CALLstatic
+ Op386CALLtail
Op386CALLclosure
Op386CALLinter
Op386DUFFCOPY
@@ -993,6 +994,7 @@ const (
OpAMD64DUFFZERO
OpAMD64REPSTOSQ
OpAMD64CALLstatic
+ OpAMD64CALLtail
OpAMD64CALLclosure
OpAMD64CALLinter
OpAMD64DUFFCOPY
@@ -1269,6 +1271,7 @@ const (
OpARMCMOVWLSconst
OpARMSRAcond
OpARMCALLstatic
+ OpARMCALLtail
OpARMCALLclosure
OpARMCALLinter
OpARMLoweredNilCheck
@@ -1409,6 +1412,7 @@ const (
OpARM64MVNshiftLL
OpARM64MVNshiftRL
OpARM64MVNshiftRA
+ OpARM64MVNshiftRO
OpARM64NEGshiftLL
OpARM64NEGshiftRL
OpARM64NEGshiftRA
@@ -1421,21 +1425,27 @@ const (
OpARM64ANDshiftLL
OpARM64ANDshiftRL
OpARM64ANDshiftRA
+ OpARM64ANDshiftRO
OpARM64ORshiftLL
OpARM64ORshiftRL
OpARM64ORshiftRA
+ OpARM64ORshiftRO
OpARM64XORshiftLL
OpARM64XORshiftRL
OpARM64XORshiftRA
+ OpARM64XORshiftRO
OpARM64BICshiftLL
OpARM64BICshiftRL
OpARM64BICshiftRA
+ OpARM64BICshiftRO
OpARM64EONshiftLL
OpARM64EONshiftRL
OpARM64EONshiftRA
+ OpARM64EONshiftRO
OpARM64ORNshiftLL
OpARM64ORNshiftRL
OpARM64ORNshiftRA
+ OpARM64ORNshiftRO
OpARM64CMPshiftLL
OpARM64CMPshiftRL
OpARM64CMPshiftRA
@@ -1445,6 +1455,7 @@ const (
OpARM64TSTshiftLL
OpARM64TSTshiftRL
OpARM64TSTshiftRA
+ OpARM64TSTshiftRO
OpARM64BFI
OpARM64BFXIL
OpARM64SBFIZ
@@ -1552,6 +1563,7 @@ const (
OpARM64CSNEG
OpARM64CSETM
OpARM64CALLstatic
+ OpARM64CALLtail
OpARM64CALLclosure
OpARM64CALLinter
OpARM64LoweredNilCheck
@@ -1697,6 +1709,7 @@ const (
OpMIPSMOVFD
OpMIPSMOVDF
OpMIPSCALLstatic
+ OpMIPSCALLtail
OpMIPSCALLclosure
OpMIPSCALLinter
OpMIPSLoweredAtomicLoad8
@@ -1813,6 +1826,7 @@ const (
OpMIPS64MOVFD
OpMIPS64MOVDF
OpMIPS64CALLstatic
+ OpMIPS64CALLtail
OpMIPS64CALLclosure
OpMIPS64CALLinter
OpMIPS64DUFFZERO
@@ -2025,6 +2039,7 @@ const (
OpPPC64LoweredRound32F
OpPPC64LoweredRound64F
OpPPC64CALLstatic
+ OpPPC64CALLtail
OpPPC64CALLclosure
OpPPC64CALLinter
OpPPC64LoweredZero
@@ -2128,6 +2143,7 @@ const (
OpRISCV64SLTIU
OpRISCV64MOVconvert
OpRISCV64CALLstatic
+ OpRISCV64CALLtail
OpRISCV64CALLclosure
OpRISCV64CALLinter
OpRISCV64DUFFZERO
@@ -2183,6 +2199,8 @@ const (
OpRISCV64FNMSUBD
OpRISCV64FSQRTD
OpRISCV64FNEGD
+ OpRISCV64FABSD
+ OpRISCV64FSGNJD
OpRISCV64FMVDX
OpRISCV64FCVTDW
OpRISCV64FCVTDL
@@ -2384,6 +2402,7 @@ const (
OpS390XMOVDstoreconst
OpS390XCLEAR
OpS390XCALLstatic
+ OpS390XCALLtail
OpS390XCALLclosure
OpS390XCALLinter
OpS390XInvertFlags
@@ -2437,6 +2456,7 @@ const (
OpS390XLoweredZero
OpWasmLoweredStaticCall
+ OpWasmLoweredTailCall
OpWasmLoweredClosureCall
OpWasmLoweredInterCall
OpWasmLoweredAddr
@@ -2783,9 +2803,11 @@ const (
OpClosureCall
OpStaticCall
OpInterCall
+ OpTailCall
OpClosureLECall
OpStaticLECall
OpInterLECall
+ OpTailLECall
OpSignExt8to16
OpSignExt8to32
OpSignExt8to64
@@ -5905,6 +5927,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -13102,6 +13134,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: -1,
@@ -16938,6 +16980,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -18764,6 +18816,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MVNshiftRO",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "NEGshiftLL",
auxType: auxInt64,
argLen: 1,
@@ -18941,6 +19007,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ANDshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "ORshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -18986,6 +19067,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ORshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "XORshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -19031,6 +19127,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "XORshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "BICshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -19076,6 +19187,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "BICshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "EONshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -19121,6 +19247,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "EONshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "ORNshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -19166,6 +19307,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ORNshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "CMPshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -19274,6 +19430,18 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "TSTshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
name: "BFI",
auxType: auxARM64BitField,
argLen: 2,
@@ -20705,6 +20873,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: -1,
@@ -22638,6 +22816,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -24197,6 +24385,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -27025,6 +27223,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -28431,6 +28639,15 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -29188,6 +29405,33 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "FABSD",
+ argLen: 1,
+ asm: riscv.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSGNJD",
+ argLen: 2,
+ asm: riscv.AFSGNJD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
name: "FMVDX",
argLen: 1,
asm: riscv.AFMVDX,
@@ -32159,6 +32403,16 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
@@ -32829,6 +33083,15 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredTailCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
name: "LoweredClosureCall",
auxType: auxCallOff,
argLen: 3,
@@ -35605,6 +35868,13 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "TailCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
name: "ClosureLECall",
auxType: auxCallOff,
argLen: -1,
@@ -35626,6 +35896,13 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "TailLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
name: "SignExt8to16",
argLen: 1,
generic: true,
diff --git a/src/cmd/compile/internal/ssa/print.go b/src/cmd/compile/internal/ssa/print.go
index d917183c70..96cd2c7c90 100644
--- a/src/cmd/compile/internal/ssa/print.go
+++ b/src/cmd/compile/internal/ssa/print.go
@@ -6,6 +6,7 @@ package ssa
import (
"bytes"
+ "cmd/internal/src"
"crypto/sha256"
"fmt"
"io"
@@ -17,22 +18,30 @@ func printFunc(f *Func) {
func hashFunc(f *Func) []byte {
h := sha256.New()
- p := stringFuncPrinter{w: h}
+ p := stringFuncPrinter{w: h, printDead: true}
fprintFunc(p, f)
return h.Sum(nil)
}
func (f *Func) String() string {
var buf bytes.Buffer
- p := stringFuncPrinter{w: &buf}
+ p := stringFuncPrinter{w: &buf, printDead: true}
fprintFunc(p, f)
return buf.String()
}
+// rewriteHash returns a hash of f suitable for detecting rewrite cycles.
+func (f *Func) rewriteHash() string {
+ h := sha256.New()
+ p := stringFuncPrinter{w: h, printDead: false}
+ fprintFunc(p, f)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
type funcPrinter interface {
header(f *Func)
startBlock(b *Block, reachable bool)
- endBlock(b *Block)
+ endBlock(b *Block, reachable bool)
value(v *Value, live bool)
startDepCycle()
endDepCycle()
@@ -40,7 +49,8 @@ type funcPrinter interface {
}
type stringFuncPrinter struct {
- w io.Writer
+ w io.Writer
+ printDead bool
}
func (p stringFuncPrinter) header(f *Func) {
@@ -50,6 +60,9 @@ func (p stringFuncPrinter) header(f *Func) {
}
func (p stringFuncPrinter) startBlock(b *Block, reachable bool) {
+ if !p.printDead && !reachable {
+ return
+ }
fmt.Fprintf(p.w, " b%d:", b.ID)
if len(b.Preds) > 0 {
io.WriteString(p.w, " <-")
@@ -64,14 +77,33 @@ func (p stringFuncPrinter) startBlock(b *Block, reachable bool) {
io.WriteString(p.w, "\n")
}
-func (p stringFuncPrinter) endBlock(b *Block) {
+func (p stringFuncPrinter) endBlock(b *Block, reachable bool) {
+ if !p.printDead && !reachable {
+ return
+ }
fmt.Fprintln(p.w, " "+b.LongString())
}
+func StmtString(p src.XPos) string {
+ linenumber := "(?) "
+ if p.IsKnown() {
+ pfx := ""
+ if p.IsStmt() == src.PosIsStmt {
+ pfx = "+"
+ }
+ if p.IsStmt() == src.PosNotStmt {
+ pfx = "-"
+ }
+ linenumber = fmt.Sprintf("(%s%d) ", pfx, p.Line())
+ }
+ return linenumber
+}
+
func (p stringFuncPrinter) value(v *Value, live bool) {
- fmt.Fprint(p.w, " ")
- //fmt.Fprint(p.w, v.Block.Func.fe.Pos(v.Pos))
- //fmt.Fprint(p.w, ": ")
+ if !p.printDead && !live {
+ return
+ }
+ fmt.Fprintf(p.w, " %s", StmtString(v.Pos))
fmt.Fprint(p.w, v.LongString())
if !live {
fmt.Fprint(p.w, " DEAD")
@@ -103,7 +135,7 @@ func fprintFunc(p funcPrinter, f *Func) {
p.value(v, live[v.ID])
printed[v.ID] = true
}
- p.endBlock(b)
+ p.endBlock(b, reachable[b.ID])
continue
}
@@ -151,7 +183,7 @@ func fprintFunc(p funcPrinter, f *Func) {
}
}
- p.endBlock(b)
+ p.endBlock(b, reachable[b.ID])
}
for _, name := range f.Names {
p.named(*name, f.NamedValues[*name])
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 5d468768b6..79f9efaebf 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -36,6 +36,8 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValu
if debug > 1 {
fmt.Printf("%s: rewriting for %s\n", f.pass.name, f.Name)
}
+ var iters int
+ var states map[string]bool
for {
change := false
for _, b := range f.Blocks {
@@ -146,6 +148,30 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValu
if !change {
break
}
+ iters++
+ if iters > 1000 || debug >= 2 {
+ // We've done a suspiciously large number of rewrites (or we're in debug mode).
+ // As of Sep 2021, 90% of rewrites complete in 4 iterations or fewer
+ // and the maximum value encountered during make.bash is 12.
+ // Start checking for cycles. (This is too expensive to do routinely.)
+ if states == nil {
+ states = make(map[string]bool)
+ }
+ h := f.rewriteHash()
+ if _, ok := states[h]; ok {
+ // We've found a cycle.
+ // To diagnose it, set debug to 2 and start again,
+ // so that we'll print all rules applied until we complete another cycle.
+ // If debug is already >= 2, we've already done that, so it's time to crash.
+ if debug < 2 {
+ debug = 2
+ states = make(map[string]bool)
+ } else {
+ f.Fatalf("rewrite cycle detected")
+ }
+ }
+ states[h] = true
+ }
}
// remove clobbered values
for _, b := range f.Blocks {
@@ -1541,12 +1567,16 @@ func rotateLeft32(v, rotate int64) int64 {
return int64(bits.RotateLeft32(uint32(v), int(rotate)))
}
+func rotateRight64(v, rotate int64) int64 {
+ return int64(bits.RotateLeft64(uint64(v), int(-rotate)))
+}
+
// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
func armBFAuxInt(lsb, width int64) arm64BitField {
if lsb < 0 || lsb > 63 {
panic("ARM(64) bit field lsb constant out of range")
}
- if width < 1 || width > 64 {
+ if width < 1 || lsb+width > 64 {
panic("ARM(64) bit field width constant out of range")
}
return arm64BitField(width | lsb<<8)
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index 1ec2d26f75..34f37867cf 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -652,6 +652,9 @@ func rewriteValue386(v *Value) bool {
case OpSubPtr:
v.Op = Op386SUBL
return true
+ case OpTailCall:
+ v.Op = Op386CALLtail
+ return true
case OpTrunc16to8:
v.Op = OpCopy
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index aa9293e347..e20161c920 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -1103,6 +1103,9 @@ func rewriteValueAMD64(v *Value) bool {
case OpSubPtr:
v.Op = OpAMD64SUBQ
return true
+ case OpTailCall:
+ v.Op = OpAMD64CALLtail
+ return true
case OpTrunc:
return rewriteValueAMD64_OpTrunc(v)
case OpTrunc16to8:
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index febb5566e3..496f9b4ae2 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -338,6 +338,8 @@ func rewriteValueARM(v *Value) bool {
return rewriteValueARM_OpARMSRL(v)
case OpARMSRLconst:
return rewriteValueARM_OpARMSRLconst(v)
+ case OpARMSRR:
+ return rewriteValueARM_OpARMSRR(v)
case OpARMSUB:
return rewriteValueARM_OpARMSUB(v)
case OpARMSUBD:
@@ -855,6 +857,9 @@ func rewriteValueARM(v *Value) bool {
case OpSubPtr:
v.Op = OpARMSUB
return true
+ case OpTailCall:
+ v.Op = OpARMCALLtail
+ return true
case OpTrunc16to8:
v.Op = OpCopy
return true
@@ -1119,6 +1124,7 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
return true
}
// match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (ADCshiftLL x y [c] flags)
for {
x := v_0
@@ -1128,6 +1134,9 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADCshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -1199,6 +1208,7 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
return true
}
// match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (ADCshiftRA x y [c] flags)
for {
x := v_0
@@ -1208,6 +1218,9 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADCshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -1279,6 +1292,7 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
return true
}
// match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (ADCshiftRL x y [c] flags)
for {
x := v_0
@@ -1288,6 +1302,9 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADCshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -1740,6 +1757,7 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
return true
}
// match: (ADDSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ADDSshiftLL x y [c])
for {
x := v_0
@@ -1748,6 +1766,9 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADDSshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -1814,6 +1835,7 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
return true
}
// match: (ADDSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ADDSshiftRA x y [c])
for {
x := v_0
@@ -1822,6 +1844,9 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADDSshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -1888,6 +1913,7 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
return true
}
// match: (ADDSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ADDSshiftRL x y [c])
for {
x := v_0
@@ -1896,6 +1922,9 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADDSshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -2124,6 +2153,7 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
return true
}
// match: (ADDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ADDshiftLL x y [c])
for {
x := v_0
@@ -2132,6 +2162,9 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADDshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -2198,6 +2231,7 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
return true
}
// match: (ADDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ADDshiftRA x y [c])
for {
x := v_0
@@ -2206,6 +2240,9 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADDshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -2288,6 +2325,7 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
return true
}
// match: (ADDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ADDshiftRL x y [c])
for {
x := v_0
@@ -2296,6 +2334,9 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMADDshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -2614,18 +2655,16 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
- // cond: c==d
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARMSLLconst {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
break
}
- c := auxIntToInt32(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -2655,6 +2694,7 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
return true
}
// match: (ANDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ANDshiftLL x y [c])
for {
x := v_0
@@ -2663,6 +2703,9 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMANDshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -2705,18 +2748,16 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
- // cond: c==d
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARMSRAconst {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
break
}
- c := auxIntToInt32(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -2746,6 +2787,7 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
return true
}
// match: (ANDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ANDshiftRA x y [c])
for {
x := v_0
@@ -2754,6 +2796,9 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMANDshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -2796,18 +2841,16 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
- // cond: c==d
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARMSRLconst {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
break
}
- c := auxIntToInt32(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -2837,6 +2880,7 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
return true
}
// match: (ANDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ANDshiftRL x y [c])
for {
x := v_0
@@ -2845,6 +2889,9 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMANDshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -3091,17 +3138,15 @@ func rewriteValueARM_OpARMBICshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BICshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSLLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -3115,6 +3160,7 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (BICshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (BICshiftLL x y [c])
for {
x := v_0
@@ -3123,6 +3169,9 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMBICshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -3147,17 +3196,15 @@ func rewriteValueARM_OpARMBICshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BICshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRAconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -3171,6 +3218,7 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (BICshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (BICshiftRA x y [c])
for {
x := v_0
@@ -3179,6 +3227,9 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMBICshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -3203,17 +3254,15 @@ func rewriteValueARM_OpARMBICshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BICshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -3227,6 +3276,7 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (BICshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (BICshiftRL x y [c])
for {
x := v_0
@@ -3235,6 +3285,9 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMBICshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -3437,6 +3490,7 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
return true
}
// match: (CMNshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (CMNshiftLL x y [c])
for {
x := v_0
@@ -3445,6 +3499,9 @@ func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMCMNshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -3511,6 +3568,7 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
return true
}
// match: (CMNshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (CMNshiftRA x y [c])
for {
x := v_0
@@ -3519,6 +3577,9 @@ func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMCMNshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -3585,6 +3646,7 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
return true
}
// match: (CMNshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (CMNshiftRL x y [c])
for {
x := v_0
@@ -3593,6 +3655,9 @@ func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMCMNshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -4090,6 +4155,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
return true
}
// match: (CMPshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (CMPshiftLL x y [c])
for {
x := v_0
@@ -4098,6 +4164,9 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMCMPshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -4168,6 +4237,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
return true
}
// match: (CMPshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (CMPshiftRA x y [c])
for {
x := v_0
@@ -4176,6 +4246,9 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMCMPshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -4246,6 +4319,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
return true
}
// match: (CMPshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (CMPshiftRL x y [c])
for {
x := v_0
@@ -4254,6 +4328,9 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMCMPshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -8101,6 +8178,7 @@ func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MVNshiftLLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (MVNshiftLL x [c])
for {
x := v_0
@@ -8108,6 +8186,9 @@ func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMMVNshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
@@ -8135,6 +8216,7 @@ func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MVNshiftRAreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (MVNshiftRA x [c])
for {
x := v_0
@@ -8142,6 +8224,9 @@ func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMMVNshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
@@ -8169,6 +8254,7 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MVNshiftRLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (MVNshiftRL x [c])
for {
x := v_0
@@ -8176,6 +8262,9 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMMVNshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
@@ -8556,18 +8645,16 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORshiftLL x y:(SLLconst x [c]) [d])
- // cond: c==d
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARMSLLconst {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
break
}
- c := auxIntToInt32(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -8597,6 +8684,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
return true
}
// match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ORshiftLL x y [c])
for {
x := v_0
@@ -8605,6 +8693,9 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMORshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -8647,18 +8738,16 @@ func rewriteValueARM_OpARMORshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORshiftRA x y:(SRAconst x [c]) [d])
- // cond: c==d
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARMSRAconst {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
break
}
- c := auxIntToInt32(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -8688,6 +8777,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
return true
}
// match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ORshiftRA x y [c])
for {
x := v_0
@@ -8696,6 +8786,9 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMORshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -8754,18 +8847,16 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORshiftRL x y:(SRLconst x [c]) [d])
- // cond: c==d
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARMSRLconst {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
break
}
- c := auxIntToInt32(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -8795,6 +8886,7 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
return true
}
// match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (ORshiftRL x y [c])
for {
x := v_0
@@ -8803,6 +8895,9 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMORshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9090,6 +9185,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
return true
}
// match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (RSBSshiftLL x y [c])
for {
x := v_0
@@ -9098,6 +9194,9 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSBSshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9164,6 +9263,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
return true
}
// match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (RSBSshiftRA x y [c])
for {
x := v_0
@@ -9172,6 +9272,9 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSBSshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9238,6 +9341,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
return true
}
// match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (RSBSshiftRL x y [c])
for {
x := v_0
@@ -9246,6 +9350,9 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSBSshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9346,17 +9453,15 @@ func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (RSBshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (RSBshiftLL (SLLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSLLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -9387,6 +9492,7 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
return true
}
// match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (RSBshiftLL x y [c])
for {
x := v_0
@@ -9395,6 +9501,9 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSBshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9437,17 +9546,15 @@ func rewriteValueARM_OpARMRSBshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (RSBshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (RSBshiftRA (SRAconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRAconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -9478,6 +9585,7 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
return true
}
// match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (RSBshiftRA x y [c])
for {
x := v_0
@@ -9486,6 +9594,9 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSBshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9528,17 +9639,15 @@ func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (RSBshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (RSBshiftRL (SRLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -9569,6 +9678,7 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
return true
}
// match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (RSBshiftRL x y [c])
for {
x := v_0
@@ -9577,6 +9687,9 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSBshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -9683,6 +9796,7 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
return true
}
// match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (RSCshiftLL x y [c] flags)
for {
x := v_0
@@ -9692,6 +9806,9 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSCshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -9763,6 +9880,7 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
return true
}
// match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (RSCshiftRA x y [c] flags)
for {
x := v_0
@@ -9772,6 +9890,9 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSCshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -9843,6 +9964,7 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
return true
}
// match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (RSCshiftRL x y [c] flags)
for {
x := v_0
@@ -9852,6 +9974,9 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMRSCshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -10166,6 +10291,7 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
return true
}
// match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (SBCshiftLL x y [c] flags)
for {
x := v_0
@@ -10175,6 +10301,9 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSBCshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -10246,6 +10375,7 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
return true
}
// match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (SBCshiftRA x y [c] flags)
for {
x := v_0
@@ -10255,6 +10385,9 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSBCshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -10326,6 +10459,7 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
return true
}
// match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
// result: (SBCshiftRL x y [c] flags)
for {
x := v_0
@@ -10335,6 +10469,9 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
}
c := auxIntToInt32(v_2.AuxInt)
flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSBCshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, flags)
@@ -10346,15 +10483,19 @@ func rewriteValueARM_OpARMSLL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SLL x (MOVWconst [c]))
- // result: (SLLconst x [c&31])
+ // cond: 0 <= c && c < 32
+ // result: (SLLconst x [c])
for {
x := v_0
if v_1.Op != OpARMMOVWconst {
break
}
c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSLLconst)
- v.AuxInt = int32ToAuxInt(c & 31)
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -10380,15 +10521,19 @@ func rewriteValueARM_OpARMSRA(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRA x (MOVWconst [c]))
- // result: (SRAconst x [c&31])
+ // cond: 0 <= c && c < 32
+ // result: (SRAconst x [c])
for {
x := v_0
if v_1.Op != OpARMMOVWconst {
break
}
c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSRAconst)
- v.AuxInt = int32ToAuxInt(c & 31)
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -10472,15 +10617,19 @@ func rewriteValueARM_OpARMSRL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRL x (MOVWconst [c]))
- // result: (SRLconst x [c&31])
+ // cond: 0 <= c && c < 32
+ // result: (SRLconst x [c])
for {
x := v_0
if v_1.Op != OpARMMOVWconst {
break
}
c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSRLconst)
- v.AuxInt = int32ToAuxInt(c & 31)
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -10520,6 +10669,24 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool {
}
return false
}
+func rewriteValueARM_OpARMSRR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRR x (MOVWconst [c]))
+ // result: (SRRconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMSUB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -11058,6 +11225,7 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
return true
}
// match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (SUBSshiftLL x y [c])
for {
x := v_0
@@ -11066,6 +11234,9 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSUBSshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11132,6 +11303,7 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
return true
}
// match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (SUBSshiftRA x y [c])
for {
x := v_0
@@ -11140,6 +11312,9 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSUBSshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11206,6 +11381,7 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
return true
}
// match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (SUBSshiftRL x y [c])
for {
x := v_0
@@ -11214,6 +11390,9 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSUBSshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11368,17 +11547,15 @@ func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SUBshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSLLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -11409,6 +11586,7 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
return true
}
// match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (SUBshiftLL x y [c])
for {
x := v_0
@@ -11417,6 +11595,9 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSUBshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11459,17 +11640,15 @@ func rewriteValueARM_OpARMSUBshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SUBshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRAconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -11500,6 +11679,7 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
return true
}
// match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (SUBshiftRA x y [c])
for {
x := v_0
@@ -11508,6 +11688,9 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSUBshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11550,17 +11733,15 @@ func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SUBshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -11591,6 +11772,7 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
return true
}
// match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (SUBshiftRL x y [c])
for {
x := v_0
@@ -11599,6 +11781,9 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMSUBshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11801,6 +11986,7 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
return true
}
// match: (TEQshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (TEQshiftLL x y [c])
for {
x := v_0
@@ -11809,6 +11995,9 @@ func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMTEQshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11875,6 +12064,7 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
return true
}
// match: (TEQshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (TEQshiftRA x y [c])
for {
x := v_0
@@ -11883,6 +12073,9 @@ func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMTEQshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -11949,6 +12142,7 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
return true
}
// match: (TEQshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (TEQshiftRL x y [c])
for {
x := v_0
@@ -11957,6 +12151,9 @@ func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMTEQshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -12159,6 +12356,7 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
return true
}
// match: (TSTshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (TSTshiftLL x y [c])
for {
x := v_0
@@ -12167,6 +12365,9 @@ func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMTSTshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -12233,6 +12434,7 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
return true
}
// match: (TSTshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (TSTshiftRA x y [c])
for {
x := v_0
@@ -12241,6 +12443,9 @@ func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMTSTshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -12307,6 +12512,7 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
return true
}
// match: (TSTshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (TSTshiftRL x y [c])
for {
x := v_0
@@ -12315,6 +12521,9 @@ func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMTSTshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -12595,17 +12804,15 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSLLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -12636,6 +12843,7 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
return true
}
// match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (XORshiftLL x y [c])
for {
x := v_0
@@ -12644,6 +12852,9 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMXORshiftLL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -12686,17 +12897,15 @@ func rewriteValueARM_OpARMXORshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRAconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -12727,6 +12936,7 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
return true
}
// match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (XORshiftRA x y [c])
for {
x := v_0
@@ -12735,6 +12945,9 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMXORshiftRA)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -12793,17 +13006,15 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
// result: (MOVWconst [0])
for {
- d := auxIntToInt32(v.AuxInt)
- x := v_0
- if v_1.Op != OpARMSRLconst {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARMMOVWconst)
@@ -12834,6 +13045,7 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
return true
}
// match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
// result: (XORshiftRL x y [c])
for {
x := v_0
@@ -12842,6 +13054,9 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
break
}
c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
v.reset(OpARMXORshiftRL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
@@ -14901,19 +15116,6 @@ func rewriteValueARM_OpRotateLeft32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (RotateLeft32 x (MOVWconst [c]))
- // result: (SRRconst [-c&31] x)
- for {
- x := v_0
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpARMSRRconst)
- v.AuxInt = int32ToAuxInt(-c & 31)
- v.AddArg(x)
- return true
- }
// match: (RotateLeft32 x y)
// result: (SRR x (RSBconst [0] <y.Type> y))
for {
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index c62ff73c59..614e71f852 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -29,6 +29,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64ANDshiftRA(v)
case OpARM64ANDshiftRL:
return rewriteValueARM64_OpARM64ANDshiftRL(v)
+ case OpARM64ANDshiftRO:
+ return rewriteValueARM64_OpARM64ANDshiftRO(v)
case OpARM64BIC:
return rewriteValueARM64_OpARM64BIC(v)
case OpARM64BICshiftLL:
@@ -37,6 +39,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64BICshiftRA(v)
case OpARM64BICshiftRL:
return rewriteValueARM64_OpARM64BICshiftRL(v)
+ case OpARM64BICshiftRO:
+ return rewriteValueARM64_OpARM64BICshiftRO(v)
case OpARM64CMN:
return rewriteValueARM64_OpARM64CMN(v)
case OpARM64CMNW:
@@ -89,6 +93,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64EONshiftRA(v)
case OpARM64EONshiftRL:
return rewriteValueARM64_OpARM64EONshiftRL(v)
+ case OpARM64EONshiftRO:
+ return rewriteValueARM64_OpARM64EONshiftRO(v)
case OpARM64Equal:
return rewriteValueARM64_OpARM64Equal(v)
case OpARM64FADDD:
@@ -295,6 +301,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64MVNshiftRA(v)
case OpARM64MVNshiftRL:
return rewriteValueARM64_OpARM64MVNshiftRL(v)
+ case OpARM64MVNshiftRO:
+ return rewriteValueARM64_OpARM64MVNshiftRO(v)
case OpARM64NEG:
return rewriteValueARM64_OpARM64NEG(v)
case OpARM64NEGshiftLL:
@@ -315,6 +323,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64ORNshiftRA(v)
case OpARM64ORNshiftRL:
return rewriteValueARM64_OpARM64ORNshiftRL(v)
+ case OpARM64ORNshiftRO:
+ return rewriteValueARM64_OpARM64ORNshiftRO(v)
case OpARM64ORconst:
return rewriteValueARM64_OpARM64ORconst(v)
case OpARM64ORshiftLL:
@@ -323,6 +333,12 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64ORshiftRA(v)
case OpARM64ORshiftRL:
return rewriteValueARM64_OpARM64ORshiftRL(v)
+ case OpARM64ORshiftRO:
+ return rewriteValueARM64_OpARM64ORshiftRO(v)
+ case OpARM64ROR:
+ return rewriteValueARM64_OpARM64ROR(v)
+ case OpARM64RORW:
+ return rewriteValueARM64_OpARM64RORW(v)
case OpARM64RORWconst:
return rewriteValueARM64_OpARM64RORWconst(v)
case OpARM64RORconst:
@@ -367,6 +383,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64TSTshiftRA(v)
case OpARM64TSTshiftRL:
return rewriteValueARM64_OpARM64TSTshiftRL(v)
+ case OpARM64TSTshiftRO:
+ return rewriteValueARM64_OpARM64TSTshiftRO(v)
case OpARM64UBFIZ:
return rewriteValueARM64_OpARM64UBFIZ(v)
case OpARM64UBFX:
@@ -389,6 +407,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64XORshiftRA(v)
case OpARM64XORshiftRL:
return rewriteValueARM64_OpARM64XORshiftRL(v)
+ case OpARM64XORshiftRO:
+ return rewriteValueARM64_OpARM64XORshiftRO(v)
case OpAbs:
v.Op = OpARM64FABSD
return true
@@ -1042,6 +1062,9 @@ func rewriteValueARM64(v *Value) bool {
case OpSubPtr:
v.Op = OpARM64SUB
return true
+ case OpTailCall:
+ v.Op = OpARM64CALLtail
+ return true
case OpTrunc:
v.Op = OpARM64FRINTZD
return true
@@ -2107,6 +2130,28 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool {
}
break
}
+ // match: (AND x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
@@ -2269,18 +2314,16 @@ func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
- // cond: c==d
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
break
}
- c := auxIntToInt64(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -2323,18 +2366,16 @@ func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
- // cond: c==d
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
break
}
- c := auxIntToInt64(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -2377,18 +2418,68 @@ func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
- // cond: c==d
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
// result: y
for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRO (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRO x (MOVDconst [c]) [d])
+ // result: (ANDconst x [rotateRight64(c, d)])
+ for {
d := auxIntToInt64(v.AuxInt)
x := v_0
- y := v_1
- if y.Op != OpARM64SRLconst {
+ if v_1.Op != OpARM64MOVDconst {
break
}
- c := auxIntToInt64(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRO y:(RORconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -2480,6 +2571,25 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool {
v.AddArg2(x0, y)
return true
}
+ // match: (BIC x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
@@ -2499,17 +2609,15 @@ func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BICshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -2535,17 +2643,15 @@ func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BICshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -2571,17 +2677,49 @@ func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BICshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
// result: (MOVDconst [0])
for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRO x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^rotateRight64(c, d)])
+ for {
d := auxIntToInt64(v.AuxInt)
x := v_0
- if v_1.Op != OpARM64SRLconst {
+ if v_1.Op != OpARM64MOVDconst {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -3930,6 +4068,25 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool {
v.AddArg2(x0, y)
return true
}
+ // match: (EON x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
@@ -3949,17 +4106,15 @@ func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (EONshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (EONshiftLL (SLLconst x [c]) x [c])
// result: (MOVDconst [-1])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -3985,17 +4140,15 @@ func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (EONshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (EONshiftRA (SRAconst x [c]) x [c])
// result: (MOVDconst [-1])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -4021,17 +4174,49 @@ func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (EONshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (EONshiftRL (SRLconst x [c]) x [c])
// result: (MOVDconst [-1])
for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRO x (MOVDconst [c]) [d])
+ // result: (XORconst x [^rotateRight64(c, d)])
+ for {
d := auxIntToInt64(v.AuxInt)
x := v_0
- if v_1.Op != OpARM64SRLconst {
+ if v_1.Op != OpARM64MOVDconst {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -7157,37 +7342,37 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (MOVBUreg (SLLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<8-1, sc)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ // match: (MOVBUreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, sc)) {
+ if !(lc < 8) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
v.AddArg(x)
return true
}
- // match: (MOVBUreg (SRLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<8-1, 0)
- // result: (UBFX [armBFAuxInt(sc, 8)] x)
+ // match: (MOVBUreg (SRLconst [rc] x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, 0)) {
+ if !(rc < 8) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8))
v.AddArg(x)
return true
}
@@ -10703,37 +10888,37 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (MOVHUreg (SLLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<16-1, sc)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ // match: (MOVHUreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, sc)) {
+ if !(lc < 16) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
v.AddArg(x)
return true
}
- // match: (MOVHUreg (SRLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<16-1, 0)
- // result: (UBFX [armBFAuxInt(sc, 16)] x)
+ // match: (MOVHUreg (SRLconst [rc] x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, 0)) {
+ if !(rc < 16) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16))
v.AddArg(x)
return true
}
@@ -12849,37 +13034,37 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (MOVWUreg (SLLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<32-1, sc)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ // match: (MOVWUreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, sc)) {
+ if !(lc < 32) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
v.AddArg(x)
return true
}
- // match: (MOVWUreg (SRLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<32-1, 0)
- // result: (UBFX [armBFAuxInt(sc, 32)] x)
+ // match: (MOVWUreg (SRLconst [rc] x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, 0)) {
+ if !(rc < 32) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32))
v.AddArg(x)
return true
}
@@ -15608,6 +15793,24 @@ func rewriteValueARM64_OpARM64MVN(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MVN x:(RORconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRO [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool {
@@ -15658,6 +15861,22 @@ func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64MVNshiftRO(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRO (MOVDconst [c]) [d])
+ // result: (MOVDconst [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64NEG(v *Value) bool {
v_0 := v.Args[0]
// match: (NEG (MUL x y))
@@ -15684,6 +15903,16 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpARM64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
// match: (NEG (MOVDconst [c]))
// result: (MOVDconst [-c])
for {
@@ -15937,6 +16166,28 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
break
}
+ // match: (OR x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
// match: (OR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
// cond: cc == OpARM64LessThanU
// result: (ROR x (NEG <t> y))
@@ -17906,6 +18157,25 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool {
v.AddArg2(x0, y)
return true
}
+ // match: (ORN x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
@@ -17925,17 +18195,15 @@ func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORNshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (ORNshiftLL (SLLconst x [c]) x [c])
// result: (MOVDconst [-1])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -17961,17 +18229,15 @@ func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORNshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (ORNshiftRA (SRAconst x [c]) x [c])
// result: (MOVDconst [-1])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -17997,17 +18263,49 @@ func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORNshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (ORNshiftRL (SRLconst x [c]) x [c])
// result: (MOVDconst [-1])
for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRO x (MOVDconst [c]) [d])
+ // result: (ORconst x [^rotateRight64(c, d)])
+ for {
d := auxIntToInt64(v.AuxInt)
x := v_0
- if v_1.Op != OpARM64SRLconst {
+ if v_1.Op != OpARM64MOVDconst {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -18120,18 +18418,16 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORshiftLL x y:(SLLconst x [c]) [d])
- // cond: c==d
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
break
}
- c := auxIntToInt64(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -19800,18 +20096,16 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORshiftRA x y:(SRAconst x [c]) [d])
- // cond: c==d
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
break
}
- c := auxIntToInt64(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -19854,18 +20148,16 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORshiftRL x y:(SRLconst x [c]) [d])
- // cond: c==d
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
// result: y
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- if y.Op != OpARM64SRLconst {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
break
}
- c := auxIntToInt64(y.AuxInt)
- if x != y.Args[0] || !(c == d) {
+ x := y.Args[0]
+ if x != v_1 {
break
}
v.copyOf(y)
@@ -19953,6 +20245,94 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64ORshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRO (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRO x (MOVDconst [c]) [d])
+ // result: (ORconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRO y:(RORconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ROR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROR x (MOVDconst [c]))
+ // result: (RORconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORW x (MOVDconst [c]))
+ // result: (RORWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64RORWconst(v *Value) bool {
v_0 := v.Args[0]
// match: (RORWconst [c] (RORWconst [d] x))
@@ -20130,72 +20510,60 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (ANDconst [ac] x))
- // cond: isARM64BFMask(sc, ac, 0)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ // match: (SLLconst [lc] (MOVWUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64ANDconst {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
break
}
- ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, ac, 0)) {
- break
- }
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (MOVWUreg x))
- // cond: isARM64BFMask(sc, 1<<32-1, 0)
- // result: (UBFIZ [armBFAuxInt(sc, 32)] x)
+ // match: (SLLconst [lc] (MOVHUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64MOVWUreg {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, 0)) {
- break
- }
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (MOVHUreg x))
- // cond: isARM64BFMask(sc, 1<<16-1, 0)
- // result: (UBFIZ [armBFAuxInt(sc, 16)] x)
+ // match: (SLLconst [lc] (MOVBUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64MOVHUreg {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, 0)) {
- break
- }
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (MOVBUreg x))
- // cond: isARM64BFMask(sc, 1<<8-1, 0)
- // result: (UBFIZ [armBFAuxInt(sc, 8)] x)
+ // match: (SLLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64MOVBUreg {
+ if v_0.Op != OpARM64ANDconst {
break
}
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, 0)) {
+ if !(isARM64BFMask(sc, ac, 0)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
v.AddArg(x)
return true
}
@@ -20488,90 +20856,90 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (ANDconst [ac] x))
- // cond: isARM64BFMask(sc, ac, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc < rc
+ // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64ANDconst {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
break
}
- ac := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, ac, sc)) {
+ if !(lc < rc) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (MOVWUreg x))
- // cond: isARM64BFMask(sc, 1<<32-1, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ // match: (SRLconst [rc] (MOVWUreg x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, sc)) {
+ if !(rc < 32) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (MOVHUreg x))
- // cond: isARM64BFMask(sc, 1<<16-1, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ // match: (SRLconst [rc] (MOVHUreg x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, sc)) {
+ if !(rc < 16) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (MOVBUreg x))
- // cond: isARM64BFMask(sc, 1<<8-1, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ // match: (SRLconst [rc] (MOVBUreg x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, sc)) {
+ if !(rc < 8) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [rc] (SLLconst [lc] x))
- // cond: lc < rc
- // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ // match: (SRLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
- rc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64SLLconst {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
break
}
- lc := auxIntToInt64(v_0.AuxInt)
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(lc < rc) {
+ if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
@@ -20981,17 +21349,15 @@ func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SUBshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -21017,17 +21383,15 @@ func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SUBshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -21053,17 +21417,15 @@ func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SUBshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -21157,6 +21519,28 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool {
}
break
}
+ // match: (TST x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValueARM64_OpARM64TSTW(v *Value) bool {
@@ -21323,6 +21707,43 @@ func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64TSTshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRO (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRO x (MOVDconst [c]) [d])
+ // result: (TSTconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFIZ [bfc] (SLLconst [sc] x))
@@ -21782,6 +22203,28 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
break
}
+ // match: (XOR x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
// match: (XOR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
// cond: cc == OpARM64LessThanU
// result: (ROR x (NEG <t> y))
@@ -22152,17 +22595,15 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SLLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -22364,17 +22805,15 @@ func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRAconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -22418,17 +22857,15 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
// result: (MOVDconst [0])
for {
- d := auxIntToInt64(v.AuxInt)
- x := v_0
- if v_1.Op != OpARM64SRLconst {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if x != v_1.Args[0] || !(c == d) {
+ x := v_0.Args[0]
+ if x != v_1 {
break
}
v.reset(OpARM64MOVDconst)
@@ -22471,6 +22908,58 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64XORshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRO (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRO x (MOVDconst [c]) [d])
+ // result: (XORconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpAddr(v *Value) bool {
v_0 := v.Args[0]
// match: (Addr {sym} base)
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index 429369d631..811ea9d9d3 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -544,6 +544,9 @@ func rewriteValueMIPS(v *Value) bool {
case OpSubPtr:
v.Op = OpMIPSSUB
return true
+ case OpTailCall:
+ v.Op = OpMIPSCALLtail
+ return true
case OpTrunc16to8:
v.Op = OpCopy
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index 772d7b66ef..1fbd556b5c 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -625,6 +625,9 @@ func rewriteValueMIPS64(v *Value) bool {
case OpSubPtr:
v.Op = OpMIPS64SUBV
return true
+ case OpTailCall:
+ v.Op = OpMIPS64CALLtail
+ return true
case OpTrunc16to8:
v.Op = OpCopy
return true
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 96dee0bd21..b35331a624 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -772,6 +772,9 @@ func rewriteValuePPC64(v *Value) bool {
case OpSubPtr:
v.Op = OpPPC64SUB
return true
+ case OpTailCall:
+ v.Op = OpPPC64CALLtail
+ return true
case OpTrunc:
v.Op = OpPPC64FTRUNC
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 743ff50b0c..f856a26d49 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -8,6 +8,9 @@ import "cmd/compile/internal/types"
func rewriteValueRISCV64(v *Value) bool {
switch v.Op {
+ case OpAbs:
+ v.Op = OpRISCV64FABSD
+ return true
case OpAdd16:
v.Op = OpRISCV64ADD
return true
@@ -134,6 +137,9 @@ func rewriteValueRISCV64(v *Value) bool {
case OpConvert:
v.Op = OpRISCV64MOVconvert
return true
+ case OpCopysign:
+ v.Op = OpRISCV64FSGNJD
+ return true
case OpCvt32Fto32:
v.Op = OpRISCV64FCVTWS
return true
@@ -633,6 +639,9 @@ func rewriteValueRISCV64(v *Value) bool {
case OpSubPtr:
v.Op = OpRISCV64SUB
return true
+ case OpTailCall:
+ v.Op = OpRISCV64CALLtail
+ return true
case OpTrunc16to8:
v.Op = OpCopy
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index 8b41d62c31..0d63586149 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -819,6 +819,9 @@ func rewriteValueS390X(v *Value) bool {
case OpSubPtr:
v.Op = OpS390XSUB
return true
+ case OpTailCall:
+ v.Op = OpS390XCALLtail
+ return true
case OpTrunc:
return rewriteValueS390X_OpTrunc(v)
case OpTrunc16to8:
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
index 5dab09f85b..defd40ddd1 100644
--- a/src/cmd/compile/internal/ssa/rewriteWasm.go
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -556,6 +556,9 @@ func rewriteValueWasm(v *Value) bool {
case OpSubPtr:
v.Op = OpWasmI64Sub
return true
+ case OpTailCall:
+ v.Op = OpWasmLoweredTailCall
+ return true
case OpTrunc:
v.Op = OpWasmF64Trunc
return true
diff --git a/src/cmd/compile/internal/ssa/testdata/inline-dump.go b/src/cmd/compile/internal/ssa/testdata/inline-dump.go
new file mode 100644
index 0000000000..97893b6f21
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/inline-dump.go
@@ -0,0 +1,17 @@
+package foo
+
+func f(m, n int) int {
+ a := g(n)
+ b := g(m)
+ return a + b
+}
+
+func g(x int) int {
+ y := h(x + 1)
+ z := h(x - 1)
+ return y + z
+}
+
+func h(x int) int {
+ return x * x
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/sayhi.go b/src/cmd/compile/internal/ssa/testdata/sayhi.go
new file mode 100644
index 0000000000..680e1eb3a1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/sayhi.go
@@ -0,0 +1,12 @@
+package foo
+
+import (
+ "fmt"
+ "sync"
+)
+
+func sayhi(n int, wg *sync.WaitGroup) {
+ fmt.Println("hi", n)
+ fmt.Println("hi", n)
+ wg.Done()
+}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index 630e4814b9..630143cc50 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -351,11 +351,13 @@ func (v *Value) reset(op Op) {
// invalidateRecursively marks a value as invalid (unused)
// and after decrementing reference counts on its Args,
// also recursively invalidates any of those whose use
-// count goes to zero.
+// count goes to zero. It returns whether any of the
+// invalidated values was marked with IsStmt.
//
// BEWARE of doing this *before* you've applied intended
// updates to SSA.
-func (v *Value) invalidateRecursively() {
+func (v *Value) invalidateRecursively() bool {
+ lostStmt := v.Pos.IsStmt() == src.PosIsStmt
if v.InCache {
v.Block.Func.unCache(v)
}
@@ -364,7 +366,8 @@ func (v *Value) invalidateRecursively() {
for _, a := range v.Args {
a.Uses--
if a.Uses == 0 {
- a.invalidateRecursively()
+ lost := a.invalidateRecursively()
+ lostStmt = lost || lostStmt
}
}
@@ -375,6 +378,7 @@ func (v *Value) invalidateRecursively() {
v.AuxInt = 0
v.Aux = nil
+ return lostStmt
}
// copyOf is called from rewrite rules.
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index d7510965f6..52f060b601 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -544,7 +544,7 @@ func IsStackAddr(v *Value) bool {
v = v.Args[0]
}
switch v.Op {
- case OpSP, OpLocalAddr, OpSelectNAddr:
+ case OpSP, OpLocalAddr, OpSelectNAddr, OpGetCallerSP:
return true
}
return false
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
index 6d8c53e722..c54a734c75 100644
--- a/src/cmd/compile/internal/ssagen/abi.go
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -382,18 +382,16 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
}
var tail ir.Node
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+ tail = call
if tailcall {
- tail = ir.NewTailCallStmt(base.Pos, f.Nname)
- } else {
- call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
- call.Args = ir.ParamNames(tfn.Type())
- call.IsDDD = tfn.Type().IsVariadic()
- tail = call
- if tfn.Type().NumResults() > 0 {
- n := ir.NewReturnStmt(base.Pos, nil)
- n.Results = []ir.Node{call}
- tail = n
- }
+ tail = ir.NewTailCallStmt(base.Pos, call)
+ } else if tfn.Type().NumResults() > 0 {
+ n := ir.NewReturnStmt(base.Pos, nil)
+ n.Results = []ir.Node{call}
+ tail = n
}
fn.Body.Append(tail)
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 1d5a872b1b..82d232f940 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -96,6 +96,7 @@ func InitConfig() {
ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
+ ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
@@ -366,6 +367,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
if fn.Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
+ s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
fe := ssafn{
curfn: fn,
@@ -709,6 +711,31 @@ func (s *state) newObject(typ *types.Type) *ssa.Value {
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
}
+func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
+ if !n.Type().IsPtr() {
+ s.Fatalf("expected pointer type: %v", n.Type())
+ }
+ elem := n.Type().Elem()
+ if count != nil {
+ if !elem.IsArray() {
+ s.Fatalf("expected array type: %v", elem)
+ }
+ elem = elem.Elem()
+ }
+ size := elem.Size()
+ // Casting from larger type to smaller one is ok, so for smallest type, do nothing.
+ if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) {
+ return
+ }
+ if count == nil {
+ count = s.constInt(types.Types[types.TUINTPTR], 1)
+ }
+ if count.Type.Size() != s.config.PtrSize {
+ s.Fatalf("expected count fit to an uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
+ }
+ s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, s.reflectType(elem), count)
+}
+
// reflectType returns an SSA value representing a pointer to typ's
// reflection type descriptor.
func (s *state) reflectType(typ *types.Type) *ssa.Value {
@@ -861,10 +888,11 @@ type state struct {
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
- cgoUnsafeArgs bool
- hasdefer bool // whether the function contains a defer statement
- softFloat bool
- hasOpenDefers bool // whether we are doing open-coded defers
+ cgoUnsafeArgs bool
+ hasdefer bool // whether the function contains a defer statement
+ softFloat bool
+ hasOpenDefers bool // whether we are doing open-coded defers
+ checkPtrEnabled bool // whether to insert checkptr instrumentation
// If doing open-coded defers, list of info about the defer calls in
// scanning order. Hence, at exit we should run these defers in reverse
@@ -1668,9 +1696,11 @@ func (s *state) stmt(n ir.Node) {
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
- b := s.exit()
- b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = callTargetLSym(n.Target)
+ s.callResult(n.Call, callTail)
+ call := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.
+ b.SetControl(call)
case ir.OCONTINUE, ir.OBREAK:
n := n.(*ir.BranchStmt)
@@ -2323,8 +2353,181 @@ func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
return x
}
+func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
+ // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
+ return s.newValue1(ssa.OpCopy, tt, v)
+ }
+ if ft.IsInteger() && tt.IsInteger() {
+ var op ssa.Op
+ if tt.Size() == ft.Size() {
+ op = ssa.OpCopy
+ } else if tt.Size() < ft.Size() {
+ // truncation
+ switch 10*ft.Size() + tt.Size() {
+ case 21:
+ op = ssa.OpTrunc16to8
+ case 41:
+ op = ssa.OpTrunc32to8
+ case 42:
+ op = ssa.OpTrunc32to16
+ case 81:
+ op = ssa.OpTrunc64to8
+ case 82:
+ op = ssa.OpTrunc64to16
+ case 84:
+ op = ssa.OpTrunc64to32
+ default:
+ s.Fatalf("weird integer truncation %v -> %v", ft, tt)
+ }
+ } else if ft.IsSigned() {
+ // sign extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpSignExt8to16
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
+ }
+ } else {
+ // zero extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpZeroExt8to16
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
+ }
+ }
+ return s.newValue1(op, tt, v)
+ }
+
+ if ft.IsFloat() || tt.IsFloat() {
+ conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+ if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
+ if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+ if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
+ if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+
+ if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
+ if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint32Tofloat32(n, v, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint32Tofloat64(n, v, ft, tt)
+ }
+ } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint32(n, v, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint32(n, v, ft, tt)
+ }
+ }
+ }
+
+ if !ok {
+ s.Fatalf("weird float conversion %v -> %v", ft, tt)
+ }
+ op1, op2, it := conv.op1, conv.op2, conv.intermediateType
+
+ if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
+ // normal case, not tripping over unsigned 64
+ if op1 == ssa.OpCopy {
+ if op2 == ssa.OpCopy {
+ return v
+ }
+ return s.newValueOrSfCall1(op2, tt, v)
+ }
+ if op2 == ssa.OpCopy {
+ return s.newValueOrSfCall1(op1, tt, v)
+ }
+ return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v))
+ }
+ // Tricky 64-bit unsigned cases.
+ if ft.IsInteger() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint64Tofloat32(n, v, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint64Tofloat64(n, v, ft, tt)
+ }
+ s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
+ }
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint64(n, v, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint64(n, v, ft, tt)
+ }
+ s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
+ return nil
+ }
+
+ if ft.IsComplex() && tt.IsComplex() {
+ var op ssa.Op
+ if ft.Size() == tt.Size() {
+ switch ft.Size() {
+ case 8:
+ op = ssa.OpRound32F
+ case 16:
+ op = ssa.OpRound64F
+ default:
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ } else if ft.Size() == 8 && tt.Size() == 16 {
+ op = ssa.OpCvt32Fto64F
+ } else if ft.Size() == 16 && tt.Size() == 8 {
+ op = ssa.OpCvt64Fto32F
+ } else {
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ ftp := types.FloatForComplex(ft)
+ ttp := types.FloatForComplex(tt)
+ return s.newValue2(ssa.OpComplexMake, tt,
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
+ }
+
+ s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
+ return nil
+}
+
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n ir.Node) *ssa.Value {
+ return s.exprCheckPtr(n, true)
+}
+
+func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
if ir.HasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
@@ -2472,6 +2675,9 @@ func (s *state) expr(n ir.Node) *ssa.Value {
// unsafe.Pointer <--> *T
if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
+ if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() {
+ s.checkPtrAlignment(n, v, nil)
+ }
return v
}
@@ -2510,174 +2716,7 @@ func (s *state) expr(n ir.Node) *ssa.Value {
case ir.OCONV:
n := n.(*ir.ConvExpr)
x := s.expr(n.X)
- ft := n.X.Type() // from type
- tt := n.Type() // to type
- if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
- // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
- return s.newValue1(ssa.OpCopy, n.Type(), x)
- }
- if ft.IsInteger() && tt.IsInteger() {
- var op ssa.Op
- if tt.Size() == ft.Size() {
- op = ssa.OpCopy
- } else if tt.Size() < ft.Size() {
- // truncation
- switch 10*ft.Size() + tt.Size() {
- case 21:
- op = ssa.OpTrunc16to8
- case 41:
- op = ssa.OpTrunc32to8
- case 42:
- op = ssa.OpTrunc32to16
- case 81:
- op = ssa.OpTrunc64to8
- case 82:
- op = ssa.OpTrunc64to16
- case 84:
- op = ssa.OpTrunc64to32
- default:
- s.Fatalf("weird integer truncation %v -> %v", ft, tt)
- }
- } else if ft.IsSigned() {
- // sign extension
- switch 10*ft.Size() + tt.Size() {
- case 12:
- op = ssa.OpSignExt8to16
- case 14:
- op = ssa.OpSignExt8to32
- case 18:
- op = ssa.OpSignExt8to64
- case 24:
- op = ssa.OpSignExt16to32
- case 28:
- op = ssa.OpSignExt16to64
- case 48:
- op = ssa.OpSignExt32to64
- default:
- s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
- }
- } else {
- // zero extension
- switch 10*ft.Size() + tt.Size() {
- case 12:
- op = ssa.OpZeroExt8to16
- case 14:
- op = ssa.OpZeroExt8to32
- case 18:
- op = ssa.OpZeroExt8to64
- case 24:
- op = ssa.OpZeroExt16to32
- case 28:
- op = ssa.OpZeroExt16to64
- case 48:
- op = ssa.OpZeroExt32to64
- default:
- s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
- }
- }
- return s.newValue1(op, n.Type(), x)
- }
-
- if ft.IsFloat() || tt.IsFloat() {
- conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
- if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
- if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
- conv = conv1
- }
- }
- if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
- if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
- conv = conv1
- }
- }
-
- if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
- if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
- // tt is float32 or float64, and ft is also unsigned
- if tt.Size() == 4 {
- return s.uint32Tofloat32(n, x, ft, tt)
- }
- if tt.Size() == 8 {
- return s.uint32Tofloat64(n, x, ft, tt)
- }
- } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
- // ft is float32 or float64, and tt is unsigned integer
- if ft.Size() == 4 {
- return s.float32ToUint32(n, x, ft, tt)
- }
- if ft.Size() == 8 {
- return s.float64ToUint32(n, x, ft, tt)
- }
- }
- }
-
- if !ok {
- s.Fatalf("weird float conversion %v -> %v", ft, tt)
- }
- op1, op2, it := conv.op1, conv.op2, conv.intermediateType
-
- if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
- // normal case, not tripping over unsigned 64
- if op1 == ssa.OpCopy {
- if op2 == ssa.OpCopy {
- return x
- }
- return s.newValueOrSfCall1(op2, n.Type(), x)
- }
- if op2 == ssa.OpCopy {
- return s.newValueOrSfCall1(op1, n.Type(), x)
- }
- return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
- }
- // Tricky 64-bit unsigned cases.
- if ft.IsInteger() {
- // tt is float32 or float64, and ft is also unsigned
- if tt.Size() == 4 {
- return s.uint64Tofloat32(n, x, ft, tt)
- }
- if tt.Size() == 8 {
- return s.uint64Tofloat64(n, x, ft, tt)
- }
- s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
- }
- // ft is float32 or float64, and tt is unsigned integer
- if ft.Size() == 4 {
- return s.float32ToUint64(n, x, ft, tt)
- }
- if ft.Size() == 8 {
- return s.float64ToUint64(n, x, ft, tt)
- }
- s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
- return nil
- }
-
- if ft.IsComplex() && tt.IsComplex() {
- var op ssa.Op
- if ft.Size() == tt.Size() {
- switch ft.Size() {
- case 8:
- op = ssa.OpRound32F
- case 16:
- op = ssa.OpRound64F
- default:
- s.Fatalf("weird complex conversion %v -> %v", ft, tt)
- }
- } else if ft.Size() == 8 && tt.Size() == 16 {
- op = ssa.OpCvt32Fto64F
- } else if ft.Size() == 16 && tt.Size() == 8 {
- op = ssa.OpCvt64Fto32F
- } else {
- s.Fatalf("weird complex conversion %v -> %v", ft, tt)
- }
- ftp := types.FloatForComplex(ft)
- ttp := types.FloatForComplex(tt)
- return s.newValue2(ssa.OpComplexMake, tt,
- s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
- s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
- }
-
- s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
- return nil
+ return s.conv(n, x, n.X.Type(), n.Type())
case ir.ODOTTYPE:
n := n.(*ir.TypeAssertExpr)
@@ -3079,7 +3118,8 @@ func (s *state) expr(n ir.Node) *ssa.Value {
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
n := n.(*ir.SliceExpr)
- v := s.expr(n.X)
+ check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
+ v := s.exprCheckPtr(n.X, !check)
var i, j, k *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
@@ -3091,8 +3131,9 @@ func (s *state) expr(n ir.Node) *ssa.Value {
k = s.expr(n.Max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
- if n.CheckPtrCall != nil {
- s.stmt(n.CheckPtrCall)
+ if check {
+ // Emit checkptr instrumentation after bound check to prevent false positive, see #46938.
+ s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR]))
}
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
@@ -3606,6 +3647,7 @@ const (
callDefer
callDeferStack
callGo
+ callTail
)
type sfRtCallDef struct {
@@ -4173,12 +4215,12 @@ func InitTables() {
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
- sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
+ sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm)
addF("math", "Copysign",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
- sys.PPC64, sys.Wasm)
+ sys.PPC64, sys.RISCV64, sys.Wasm)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
@@ -4872,13 +4914,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
}
- if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
switch n.Op() {
case ir.OCALLFUNC:
- if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
+ if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
callee = fn
if buildcfg.Experiment.RegabiArgs {
@@ -4932,7 +4974,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
stksize := params.ArgWidth() // includes receiver, args, and results
res := n.X.Type().Results()
- if k == callNormal {
+ if k == callNormal || k == callTail {
for _, p := range params.OutParams() {
ACResults = append(ACResults, p.Type)
}
@@ -4979,7 +5021,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
- if k != callNormal {
+ if k != callNormal && k != callTail {
// Write closure (arg to newproc/deferproc).
ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
@@ -5029,6 +5071,10 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
case callee != nil:
aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ if k == callTail {
+ call.Op = ssa.OpTailLECall
+ stksize = 0 // Tail call does not use stack. We reuse caller's frame.
+ }
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
}
@@ -5265,12 +5311,6 @@ func (s *state) canSSAName(name *ir.Name) bool {
return false
}
}
- if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
- // wrappers generated by genwrapper need to update
- // the .this pointer in place.
- // TODO: treat as a PPARAMOUT?
- return false
- }
return true
// TODO: try to make more variables SSAable?
}
@@ -6663,7 +6703,8 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block
var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
- if f.PrintOrHtmlSSA {
+ gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name]
+ if gatherPrintInfo {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
@@ -6774,7 +6815,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
valueToProgAfter[v.ID] = s.pp.Next
}
- if f.PrintOrHtmlSSA {
+ if gatherPrintInfo {
for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v
}
@@ -6804,7 +6845,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
x := s.pp.Next
s.SetPos(b.Pos)
Arch.SSAGenBlock(&s, b, next)
- if f.PrintOrHtmlSSA {
+ if gatherPrintInfo {
for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
}
@@ -6983,6 +7024,54 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
buf.WriteString("</code>")
f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
}
+ if ssa.GenssaDump[f.Name] {
+ fi := f.DumpFileForPhase("genssa")
+ if fi != nil {
+
+ // inliningDiffers if any filename changes or if any line number except the innermost (index 0) changes.
+ inliningDiffers := func(a, b []src.Pos) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if a[i].Filename() != b[i].Filename() {
+ return true
+ }
+ if i > 0 && a[i].Line() != b[i].Line() {
+ return true
+ }
+ }
+ return false
+ }
+
+ var allPosOld []src.Pos
+ var allPos []src.Pos
+
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() {
+ allPos = p.AllPos(allPos)
+ if inliningDiffers(allPos, allPosOld) {
+ for i := len(allPos) - 1; i >= 0; i-- {
+ pos := allPos[i]
+ fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line())
+ }
+ allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage.
+ }
+ }
+
+ var s string
+ if v, ok := progToValue[p]; ok {
+ s = v.String()
+ } else if b, ok := progToBlock[p]; ok {
+ s = b.String()
+ } else {
+ s = " " // most value and branch strings are 2-3 characters long
+ }
+ fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString())
+ }
+ fi.Close()
+ }
+ }
defframe(&s, e, f)
@@ -7360,6 +7449,14 @@ func (s *State) Call(v *ssa.Value) *obj.Prog {
return p
}
+// TailCall returns a new tail call instruction for the SSA value v.
+// It is like Call, but for a tail call.
+func (s *State) TailCall(v *ssa.Value) *obj.Prog {
+ p := s.Call(v)
+ p.As = obj.ARET
+ return p
+}
+
// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
// It must be called immediately before emitting the actual CALL instruction,
// since it emits PCDATA for the stack map at the call (calls are safe points).
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index e89796cb31..82cb06b180 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -276,7 +276,9 @@ func (p *parser) syntaxErrorAt(pos Pos, msg string) {
}
// tokstring returns the English word for selected punctuation tokens
-// for more readable error messages.
+// for more readable error messages. Use tokstring (not tok.String())
+// for user-facing (error) messages; use tok.String() for debugging
+// output.
func tokstring(tok token) string {
switch tok {
case _Comma:
@@ -1839,7 +1841,7 @@ func (p *parser) embeddedTerm() Expr {
}
// ParameterDecl = [ IdentifierList ] [ "..." ] Type .
-func (p *parser) paramDeclOrNil(name *Name) *Field {
+func (p *parser) paramDeclOrNil(name *Name, follow token) *Field {
if trace {
defer p.trace("paramDecl")()
}
@@ -1893,8 +1895,8 @@ func (p *parser) paramDeclOrNil(name *Name) *Field {
return f
}
- p.syntaxError("expecting )")
- p.advance(_Comma, _Rparen)
+ p.syntaxError("expecting " + tokstring(follow))
+ p.advance(_Comma, follow)
return nil
}
@@ -1908,9 +1910,10 @@ func (p *parser) paramList(name *Name, close token, requireNames bool) (list []*
defer p.trace("paramList")()
}
- var named int // number of parameters that have an explicit name and type/bound
- p.list(_Comma, close, func() bool {
- par := p.paramDeclOrNil(name)
+ var named int // number of parameters that have an explicit name and type
+ var typed int // number of parameters that have an explicit type
+ end := p.list(_Comma, close, func() bool {
+ par := p.paramDeclOrNil(name, close)
name = nil // 1st name was consumed if present
if par != nil {
if debug && par.Name == nil && par.Type == nil {
@@ -1919,6 +1922,9 @@ func (p *parser) paramList(name *Name, close token, requireNames bool) (list []*
if par.Name != nil && par.Type != nil {
named++
}
+ if par.Type != nil {
+ typed++
+ }
list = append(list, par)
}
return false
@@ -1939,10 +1945,11 @@ func (p *parser) paramList(name *Name, close token, requireNames bool) (list []*
}
} else if named != len(list) {
// some named => all must have names and types
- var pos Pos // left-most error position (or unknown)
- var typ Expr
+ var pos Pos // left-most error position (or unknown)
+ var typ Expr // current type (from right to left)
for i := len(list) - 1; i >= 0; i-- {
- if par := list[i]; par.Type != nil {
+ par := list[i]
+ if par.Type != nil {
typ = par.Type
if par.Name == nil {
pos = typ.Pos()
@@ -1961,7 +1968,12 @@ func (p *parser) paramList(name *Name, close token, requireNames bool) (list []*
if pos.IsKnown() {
var msg string
if requireNames {
- msg = "type parameters must be named"
+ if named == typed {
+ pos = end // position error at closing ]
+ msg = "missing type constraint"
+ } else {
+ msg = "type parameters must be named"
+ }
} else {
msg = "mixed named and unnamed parameters"
}
@@ -2201,7 +2213,7 @@ func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleS
if p.tok != _Semi {
// accept potential varDecl but complain
if p.got(_Var) {
- p.syntaxError(fmt.Sprintf("var declaration not allowed in %s initializer", keyword.String()))
+ p.syntaxError(fmt.Sprintf("var declaration not allowed in %s initializer", tokstring(keyword)))
}
init = p.simpleStmt(nil, keyword)
// If we have a range clause, we are done (can only happen for keyword == _For).
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43527.go2 b/src/cmd/compile/internal/syntax/testdata/issue43527.go2
new file mode 100644
index 0000000000..dd2c9b1272
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue43527.go2
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // 0 and 1-element []-lists are syntactically valid
+ _[A, B /* ERROR missing type constraint */ ] int
+ _[A, /* ERROR type parameters must be named */ interface{}] int
+ _[A, B, C /* ERROR missing type constraint */ ] int
+ _[A B, C /* ERROR missing type constraint */ ] int
+ _[A B, /* ERROR type parameters must be named */ interface{}] int
+ _[A B, /* ERROR type parameters must be named */ interface{}, C D] int
+ _[A B, /* ERROR type parameters must be named */ interface{}, C, D] int
+ _[A B, /* ERROR type parameters must be named */ interface{}, C, interface{}] int
+ _[A B, C interface{}, D, /* ERROR type parameters must be named */ interface{}] int
+)
+
+// function type parameters use the same parsing routine - just have a couple of tests
+
+func _[A, B /* ERROR missing type constraint */ ]() {}
+func _[A, /* ERROR type parameters must be named */ interface{}]() {}
diff --git a/src/cmd/compile/internal/syntax/testdata/tparams.go2 b/src/cmd/compile/internal/syntax/testdata/tparams.go2
index 42031c3277..80e155bfe0 100644
--- a/src/cmd/compile/internal/syntax/testdata/tparams.go2
+++ b/src/cmd/compile/internal/syntax/testdata/tparams.go2
@@ -4,8 +4,8 @@
package p
-type t[ /* ERROR type parameters must be named */ a, b] struct{}
-type t[a t, b t, /* ERROR type parameters must be named */ c] struct{}
+type t[a, b /* ERROR missing type constraint */ ] struct{}
+type t[a t, b t, c /* ERROR missing type constraint */ ] struct{}
type t struct {
t [n]byte
t[a]
@@ -18,5 +18,7 @@ type t interface {
}
func f[ /* ERROR empty type parameter list */ ]()
-func f[ /* ERROR type parameters must be named */ a, b]()
-func f[a t, b t, /* ERROR type parameters must be named */ c]()
+func f[a, b /* ERROR missing type constraint */ ]()
+func f[a t, b t, c /* ERROR missing type constraint */ ]()
+
+func f[a b, /* ERROR expecting ] */ 0] ()
diff --git a/src/cmd/compile/internal/typecheck/crawler.go b/src/cmd/compile/internal/typecheck/crawler.go
index 9e523c3d14..3f212aa805 100644
--- a/src/cmd/compile/internal/typecheck/crawler.go
+++ b/src/cmd/compile/internal/typecheck/crawler.go
@@ -44,12 +44,13 @@ func (p *crawler) markObject(n *ir.Name) {
p.markType(n.Type())
}
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
+// markType recursively visits types reachable from t to identify functions whose
+// inline bodies may be needed. For instantiated generic types, it visits the base
+// generic type, which has the relevant methods.
func (p *crawler) markType(t *types.Type) {
- if t.IsInstantiatedGeneric() {
- // Re-instantiated types don't add anything new, so don't follow them.
- return
+ if t.OrigSym() != nil {
+ // Convert to the base generic type.
+ t = t.OrigSym().Def.Type()
}
if p.marked[t] {
return
@@ -92,6 +93,9 @@ func (p *crawler) markType(t *types.Type) {
p.markType(t.Elem())
case types.TSTRUCT:
+ if t.IsFuncArgStruct() {
+ break
+ }
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
@@ -129,9 +133,9 @@ func (p *crawler) markEmbed(t *types.Type) {
t = t.Elem()
}
- if t.IsInstantiatedGeneric() {
- // Re-instantiated types don't add anything new, so don't follow them.
- return
+ if t.OrigSym() != nil {
+ // Convert to the base generic type.
+ t = t.OrigSym().Def.Type()
}
if p.embedded[t] {
@@ -185,6 +189,15 @@ func (p *crawler) markInlBody(n *ir.Name) {
var doFlood func(n ir.Node)
doFlood = func(n ir.Node) {
+ t := n.Type()
+ if t != nil && (t.HasTParam() || t.IsFullyInstantiated()) {
+ // Ensure that we call markType() on any base generic type
+ // that is written to the export file (even if not explicitly
+ // marked for export), so we will call markInlBody on its
+ // methods, and the methods will be available for
+ // instantiation if needed.
+ p.markType(t)
+ }
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
p.markInlBody(ir.MethodExprName(n))
@@ -198,9 +211,6 @@ func (p *crawler) markInlBody(n *ir.Name) {
case ir.PEXTERN:
Export(n)
}
- p.checkGenericType(n.Type())
- case ir.OTYPE:
- p.checkGenericType(n.Type())
case ir.OMETHVALUE:
// Okay, because we don't yet inline indirect
// calls to method values.
@@ -216,16 +226,3 @@ func (p *crawler) markInlBody(n *ir.Name) {
// because after inlining they might be callable.
ir.VisitList(fn.Inl.Body, doFlood)
}
-
-// checkGenerictype ensures that we call markType() on any base generic type that
-// is written to the export file (even if not explicitly marked
-// for export), so its methods will be available for inlining if needed.
-func (p *crawler) checkGenericType(t *types.Type) {
- if t != nil && t.HasTParam() {
- if t.OrigSym() != nil {
- // Convert to the base generic type.
- t = t.OrigSym().Def.Type()
- }
- p.markType(t)
- }
-}
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index f001017a86..489306e1e6 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -63,8 +63,9 @@
// }
//
// type Func struct {
-// Tag byte // 'F'
+// Tag byte // 'F' or 'G'
// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
// Signature Signature
// }
//
@@ -75,8 +76,9 @@
// }
//
// type Type struct {
-// Tag byte // 'T'
+// Tag byte // 'T' or 'U'
// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
// Underlying typeOff
//
// Methods []struct{ // omitted if Underlying is an interface type
@@ -93,6 +95,12 @@
// Type typeOff
// }
//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Bound typeOff
+// }
//
// typeOff means a uvarint that either indicates a predeclared type,
// or an offset into the Data section. If the uvarint is less than
@@ -104,7 +112,7 @@
// (*exportWriter).value for details.
//
//
-// There are nine kinds of type descriptors, distinguished by an itag:
+// There are twelve kinds of type descriptors, distinguished by an itag:
//
// type DefinedType struct {
// Tag itag // definedType
@@ -172,8 +180,30 @@
// }
// }
//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
//
-// TODO(danscales): fill in doc for 'type TypeParamType' and 'type InstType'
//
// type Signature struct {
// Params []Param
@@ -255,7 +285,7 @@ const (
structType
interfaceType
typeParamType
- instType
+ instanceType // Instantiation of a generic type
unionType
)
@@ -893,7 +923,7 @@ func (w *exportWriter) doTyp(t *types.Type) {
if strings.Index(s.Name, "[") < 0 {
base.Fatalf("incorrect name for instantiated type")
}
- w.startType(instType)
+ w.startType(instanceType)
w.pos(t.Pos())
// Export the type arguments for the instantiated type. The
// instantiated type could be in a method header (e.g. "func (v
@@ -1456,10 +1486,23 @@ func (w *exportWriter) node(n ir.Node) {
}
}
-// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
-// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
+func isNonEmptyAssign(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OAS:
+ if n.(*ir.AssignStmt).Y != nil {
+ return true
+ }
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ return true
+ }
+ return false
+}
+
+// Caution: stmt will emit more than one node for statement nodes n that have a
+// non-empty n.Ninit and where n is not a non-empty assignment or a node with a natural init
+// section (such as in "if", "for", etc.).
func (w *exportWriter) stmt(n ir.Node) {
- if len(n.Init()) > 0 && !ir.StmtWithInit(n.Op()) {
+ if len(n.Init()) > 0 && !ir.StmtWithInit(n.Op()) && !isNonEmptyAssign(n) && n.Op() != ir.ORANGE {
// can't use stmtList here since we don't want the final OEND
for _, n := range n.Init() {
w.stmt(n)
@@ -1495,8 +1538,10 @@ func (w *exportWriter) stmt(n ir.Node) {
if n.Y != nil {
w.op(ir.OAS)
w.pos(n.Pos())
+ w.stmtList(n.Init())
w.expr(n.X)
w.expr(n.Y)
+ w.bool(n.Def)
}
case ir.OASOP:
@@ -1517,8 +1562,10 @@ func (w *exportWriter) stmt(n ir.Node) {
w.op(ir.OAS2)
}
w.pos(n.Pos())
+ w.stmtList(n.Init())
w.exprList(n.Lhs)
w.exprList(n.Rhs)
+ w.bool(n.Def)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
@@ -1556,6 +1603,7 @@ func (w *exportWriter) stmt(n ir.Node) {
n := n.(*ir.RangeStmt)
w.op(ir.ORANGE)
w.pos(n.Pos())
+ w.stmtList(n.Init())
w.exprsOrNil(n.Key, n.Value)
w.expr(n.X)
w.stmtList(n.Body)
@@ -2065,8 +2113,10 @@ func (w *exportWriter) expr(n ir.Node) {
n := n.(*ir.AssignListStmt)
w.op(ir.OSELRECV2)
w.pos(n.Pos())
+ w.stmtList(n.Init())
w.exprList(n.Lhs)
w.exprList(n.Rhs)
+ w.bool(n.Def)
default:
base.Fatalf("cannot export %v (%d) node\n"+
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
index 8bc098c2bd..ec4057a8d0 100644
--- a/src/cmd/compile/internal/typecheck/iimport.go
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -316,16 +316,12 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
return n
case 'T', 'U':
- var rparams []*types.Type
- if tag == 'U' {
- rparams = r.typeList()
- }
-
// Types can be recursive. We need to setup a stub
// declaration before recursing.
n := importtype(pos, sym)
t := n.Type()
if tag == 'U' {
+ rparams := r.typeList()
t.SetRParams(rparams)
}
@@ -825,7 +821,7 @@ func (r *importReader) typ1() *types.Type {
}
return n.Type()
- case instType:
+ case instanceType:
if r.p.exportVersion < iexportVersionGenerics {
base.Fatalf("unexpected instantiation type")
}
@@ -1170,10 +1166,26 @@ func (r *importReader) stmtList() []ir.Node {
if n.Op() == ir.OBLOCK {
n := n.(*ir.BlockStmt)
list = append(list, n.List...)
- } else {
- list = append(list, n)
+ continue
}
-
+ if len(list) > 0 {
+ // check for an optional label that can only immediately
+ // precede a for/range/select/switch statement.
+ if last := list[len(list)-1]; last.Op() == ir.OLABEL {
+ label := last.(*ir.LabelStmt).Label
+ switch n.Op() {
+ case ir.OFOR:
+ n.(*ir.ForStmt).Label = label
+ case ir.ORANGE:
+ n.(*ir.RangeStmt).Label = label
+ case ir.OSELECT:
+ n.(*ir.SelectStmt).Label = label
+ case ir.OSWITCH:
+ n.(*ir.SwitchStmt).Label = label
+ }
+ }
+ }
+ list = append(list, n)
}
return list
}
@@ -1503,7 +1515,7 @@ func (r *importReader) node() ir.Node {
if go117ExportTypes {
n.SetOp(op)
}
- *n.PtrInit() = init
+ n.SetInit(init)
n.IsDDD = r.bool()
if go117ExportTypes {
n.SetType(r.exoticType())
@@ -1607,7 +1619,12 @@ func (r *importReader) node() ir.Node {
// unreachable - never exported
case ir.OAS:
- return ir.NewAssignStmt(r.pos(), r.expr(), r.expr())
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewAssignStmt(pos, r.expr(), r.expr())
+ n.SetInit(init)
+ n.Def = r.bool()
+ return n
case ir.OASOP:
n := ir.NewAssignOpStmt(r.pos(), r.op(), r.expr(), nil)
@@ -1624,7 +1641,12 @@ func (r *importReader) node() ir.Node {
// unreachable - mapped to case OAS2 by exporter
goto error
}
- return ir.NewAssignListStmt(r.pos(), op, r.exprList(), r.exprList())
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewAssignListStmt(pos, op, r.exprList(), r.exprList())
+ n.SetInit(init)
+ n.Def = r.bool()
+ return n
case ir.ORETURN:
return ir.NewReturnStmt(r.pos(), r.exprList())
@@ -1638,26 +1660,28 @@ func (r *importReader) node() ir.Node {
case ir.OIF:
pos, init := r.pos(), r.stmtList()
n := ir.NewIfStmt(pos, r.expr(), r.stmtList(), r.stmtList())
- *n.PtrInit() = init
+ n.SetInit(init)
return n
case ir.OFOR:
pos, init := r.pos(), r.stmtList()
cond, post := r.exprsOrNil()
n := ir.NewForStmt(pos, nil, cond, post, r.stmtList())
- *n.PtrInit() = init
+ n.SetInit(init)
return n
case ir.ORANGE:
- pos := r.pos()
+ pos, init := r.pos(), r.stmtList()
k, v := r.exprsOrNil()
- return ir.NewRangeStmt(pos, k, v, r.expr(), r.stmtList())
+ n := ir.NewRangeStmt(pos, k, v, r.expr(), r.stmtList())
+ n.SetInit(init)
+ return n
case ir.OSELECT:
pos := r.pos()
init := r.stmtList()
n := ir.NewSelectStmt(pos, r.commList())
- *n.PtrInit() = init
+ n.SetInit(init)
return n
case ir.OSWITCH:
@@ -1665,7 +1689,7 @@ func (r *importReader) node() ir.Node {
init := r.stmtList()
x, _ := r.exprsOrNil()
n := ir.NewSwitchStmt(pos, x, r.caseList(x))
- *n.PtrInit() = init
+ n.SetInit(init)
return n
// case OCASE:
@@ -1709,7 +1733,12 @@ func (r *importReader) node() ir.Node {
return n
case ir.OSELRECV2:
- return ir.NewAssignListStmt(r.pos(), ir.OSELRECV2, r.exprList(), r.exprList())
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewAssignListStmt(pos, ir.OSELRECV2, r.exprList(), r.exprList())
+ n.SetInit(init)
+ n.Def = r.bool()
+ return n
default:
base.Fatalf("cannot import %v (%d) node\n"+
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index c322d490e5..9a02c1752c 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -395,10 +395,11 @@ func tcSelect(sel *ir.SelectStmt) {
n := Stmt(ncase.Comm)
ncase.Comm = n
oselrecv2 := func(dst, recv ir.Node, def bool) {
- n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv})
- n.Def = def
- n.SetTypecheck(1)
- ncase.Comm = n
+ selrecv := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv})
+ selrecv.Def = def
+ selrecv.SetTypecheck(1)
+ selrecv.SetInit(n.Init())
+ ncase.Comm = selrecv
}
switch n.Op() {
default:
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 34f20879f1..d4af4e172e 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -352,7 +352,10 @@ func Assignop(src, dst *types.Type) (ir.Op, string) {
if types.Identical(src, dst) {
return ir.OCONVNOP, ""
}
+ return Assignop1(src, dst)
+}
+func Assignop1(src, dst *types.Type) (ir.Op, string) {
// 2. src and dst have identical underlying types and
// a. either src or dst is not a named type, or
// b. both are empty interface types, or
@@ -1019,10 +1022,11 @@ type Tsubster struct {
SubstForwFunc func(*types.Type) *types.Type
}
-// Typ computes the type obtained by substituting any type parameter in t with the
-// corresponding type argument in subst. If t contains no type parameters, the
-// result is t; otherwise the result is a new type. It deals with recursive types
-// by using TFORW types and finding partially or fully created types via sym.Def.
+// Typ computes the type obtained by substituting any type parameter or shape in t
+// that appears in subst.Tparams with the corresponding type argument in subst.Targs.
+// If t contains no type parameters, the result is t; otherwise the result is a new
+// type. It deals with recursive types by using TFORW types and finding partially or
+// fully created types via sym.Def.
func (ts *Tsubster) Typ(t *types.Type) *types.Type {
// Defer the CheckSize calls until we have fully-defined
// (possibly-recursive) top-level type.
@@ -1033,14 +1037,14 @@ func (ts *Tsubster) Typ(t *types.Type) *types.Type {
}
func (ts *Tsubster) typ1(t *types.Type) *types.Type {
- if !t.HasTParam() && t.Kind() != types.TFUNC {
+ if !t.HasTParam() && !t.HasShape() && t.Kind() != types.TFUNC {
// Note: function types need to be copied regardless, as the
// types of closures may contain declarations that need
// to be copied. See #45738.
return t
}
- if t.IsTypeParam() {
+ if t.IsTypeParam() || t.IsShape() {
for i, tp := range ts.Tparams {
if tp == t {
return ts.Targs[i]
@@ -1072,14 +1076,14 @@ func (ts *Tsubster) typ1(t *types.Type) *types.Type {
var targsChanged bool
var forw *types.Type
- if t.Sym() != nil && t.HasTParam() {
+ if t.Sym() != nil && (t.HasTParam() || t.HasShape()) {
// Need to test for t.HasTParam() again because of special TFUNC case above.
// Translate the type params for this type according to
// the tparam/targs mapping from subst.
neededTargs = make([]*types.Type, len(t.RParams()))
for i, rparam := range t.RParams() {
neededTargs[i] = ts.typ1(rparam)
- if !types.Identical(neededTargs[i], rparam) {
+ if !types.IdenticalStrict(neededTargs[i], rparam) {
targsChanged = true
}
}
@@ -1286,7 +1290,7 @@ func (ts *Tsubster) typ1(t *types.Type) *types.Type {
// fields, set force to true.
func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
if t.NumFields() == 0 {
- if t.HasTParam() {
+ if t.HasTParam() || t.HasShape() {
// For an empty struct, we need to return a new type,
// since it may now be fully instantiated (HasTParam
// becomes false).
@@ -1312,6 +1316,7 @@ func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
// the type param, not the instantiated type).
newfields[i] = types.NewField(f.Pos, f.Sym, t2)
newfields[i].Embedded = f.Embedded
+ newfields[i].Note = f.Note
if f.IsDDD() {
newfields[i].SetIsDDD(true)
}
@@ -1387,19 +1392,20 @@ func genericTypeName(sym *types.Sym) string {
return sym.Name[0:strings.Index(sym.Name, "[")]
}
-// Shapify takes a concrete type and returns a GCshape type that can
+// Shapify takes a concrete type and a type param index, and returns a GCshape type that can
// be used in place of the input type and still generate identical code.
// No methods are added - all methods calls directly on a shape should
// be done by converting to an interface using the dictionary.
//
-// TODO: this could take the generic function and base its decisions
-// on how that generic function uses this type argument. For instance,
-// if it doesn't use it as a function argument/return value, then
-// we don't need to distinguish int64 and float64 (because they only
-// differ in how they get passed as arguments). For now, we only
-// unify two different types if they are identical in every possible way.
-func Shapify(t *types.Type) *types.Type {
- assert(!t.HasShape())
+// For now, we only consider two types to have the same shape, if they have exactly
+// the same underlying type or they are both pointer types.
+//
+// Shape types are also distinguished by the index of the type in a type param/arg
+// list. We need to do this so we can distinguish and substitute properly for two
+// type params in the same function that have the same shape for a particular
+// instantiation.
+func Shapify(t *types.Type, index int) *types.Type {
+ assert(!t.IsShape())
// Map all types with the same underlying type to the same shape.
u := t.Underlying()
@@ -1409,22 +1415,35 @@ func Shapify(t *types.Type) *types.Type {
u = types.Types[types.TUINT8].PtrTo()
}
- if s := shaped[u]; s != nil {
+ if shapeMap == nil {
+ shapeMap = map[int]map[*types.Type]*types.Type{}
+ }
+ submap := shapeMap[index]
+ if submap == nil {
+ submap = map[*types.Type]*types.Type{}
+ shapeMap[index] = submap
+ }
+ if s := submap[u]; s != nil {
return s
}
- sym := shapePkg.Lookup(u.LinkString())
+ nm := fmt.Sprintf("%s_%d", u.LinkString(), index)
+ sym := types.ShapePkg.Lookup(nm)
+ if sym.Def != nil {
+ // Use any existing type with the same name
+ submap[u] = sym.Def.Type()
+ return submap[u]
+ }
name := ir.NewDeclNameAt(u.Pos(), ir.OTYPE, sym)
s := types.NewNamed(name)
+ sym.Def = name
s.SetUnderlying(u)
s.SetIsShape(true)
s.SetHasShape(true)
name.SetType(s)
name.SetTypecheck(1)
- shaped[u] = s
+ submap[u] = s
return s
}
-var shaped = map[*types.Type]*types.Type{}
-
-var shapePkg = types.NewPkg(".shape", ".shape")
+var shapeMap map[int]map[*types.Type]*types.Type
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index 404af5b1b2..42970f6a5e 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -879,6 +879,7 @@ func typecheck1(n ir.Node, top int) ir.Node {
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
+ n.Call = typecheck(n.Call, ctxStmt|ctxExpr).(*ir.CallExpr)
return n
case ir.OCHECKNIL:
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
index 2e9e2f4fd8..dce7d29143 100644
--- a/src/cmd/compile/internal/types/identity.go
+++ b/src/cmd/compile/internal/types/identity.go
@@ -4,19 +4,30 @@
package types
+const (
+ identIgnoreTags = 1 << iota
+ identStrict
+)
+
// Identical reports whether t1 and t2 are identical types, following the spec rules.
// Receiver parameter types are ignored. Named (defined) types are only equal if they
// are pointer-equal - i.e. there must be a unique types.Type for each specific named
// type. Also, a type containing a shape type is considered identical to another type
// (shape or not) if their underlying types are the same, or they are both pointers.
func Identical(t1, t2 *Type) bool {
- return identical(t1, t2, true, nil)
+ return identical(t1, t2, 0, nil)
}
// IdenticalIgnoreTags is like Identical, but it ignores struct tags
// for struct identity.
func IdenticalIgnoreTags(t1, t2 *Type) bool {
- return identical(t1, t2, false, nil)
+ return identical(t1, t2, identIgnoreTags, nil)
+}
+
+// IdenticalStrict is like Identical, but matches types exactly, without the
+// exception for shapes.
+func IdenticalStrict(t1, t2 *Type) bool {
+ return identical(t1, t2, identStrict, nil)
}
type typePair struct {
@@ -24,7 +35,7 @@ type typePair struct {
t2 *Type
}
-func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
+func identical(t1, t2 *Type, flags int, assumedEqual map[typePair]struct{}) bool {
if t1 == t2 {
return true
}
@@ -32,7 +43,7 @@ func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) b
return false
}
if t1.sym != nil || t2.sym != nil {
- if t1.HasShape() || t2.HasShape() {
+ if flags&identStrict == 0 && (t1.HasShape() || t2.HasShape()) {
switch t1.kind {
case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64, TBOOL, TSTRING, TPTR, TUNSAFEPTR:
return true
@@ -78,7 +89,7 @@ cont:
}
for i, f1 := range t1.AllMethods().Slice() {
f2 := t2.AllMethods().Index(i)
- if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, flags, assumedEqual) {
return false
}
}
@@ -90,10 +101,10 @@ cont:
}
for i, f1 := range t1.FieldSlice() {
f2 := t2.Field(i)
- if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, flags, assumedEqual) {
return false
}
- if cmpTags && f1.Note != f2.Note {
+ if (flags&identIgnoreTags) == 0 && f1.Note != f2.Note {
return false
}
}
@@ -111,7 +122,7 @@ cont:
}
for i, f1 := range fs1 {
f2 := fs2[i]
- if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, flags, assumedEqual) {
return false
}
}
@@ -129,10 +140,10 @@ cont:
}
case TMAP:
- if !identical(t1.Key(), t2.Key(), cmpTags, assumedEqual) {
+ if !identical(t1.Key(), t2.Key(), flags, assumedEqual) {
return false
}
}
- return identical(t1.Elem(), t2.Elem(), cmpTags, assumedEqual)
+ return identical(t1.Elem(), t2.Elem(), flags, assumedEqual)
}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index eb70f7b9b4..392c54ba79 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -1706,6 +1706,10 @@ func NewNamed(obj TypeObject) *Type {
t := newType(TFORW)
t.sym = obj.Sym()
t.nod = obj
+ if t.sym.Pkg == ShapePkg {
+ t.SetIsShape(true)
+ t.SetHasShape(true)
+ }
return t
}
@@ -2182,3 +2186,5 @@ var (
)
var SimType [NTYPE]Kind
+
+var ShapePkg = NewPkg(".shape", ".shape")
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index b2938b84da..6914e6c89f 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -108,6 +108,11 @@ type ImporterFrom interface {
// A Config specifies the configuration for type checking.
// The zero value for Config is a ready-to-use default configuration.
type Config struct {
+ // Environment is the environment used for resolving global
+ // identifiers. If nil, the type checker will initialize this
+ // field with a newly created environment.
+ Environment *Environment
+
// GoVersion describes the accepted Go language version. The string
// must follow the format "go%d.%d" (e.g. "go1.12") or ist must be
// empty; an empty string indicates the latest language version.
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 039a6c0e5e..cd5a61332a 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -145,6 +145,7 @@ func TestValuesInfo(t *testing.T) {
{`package f7b; var _ = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`},
{`package g0; const (a = len([iota]int{}); b; c); const _ = c`, `c`, `int`, `2`}, // issue #22341
+ {`package g1; var(j int32; s int; n = 1.0<<s == j)`, `1.0`, `int32`, `1`}, // issue #48422
}
for _, test := range tests {
@@ -1645,6 +1646,48 @@ func TestIdentical_issue15173(t *testing.T) {
}
}
+func TestIdenticalUnions(t *testing.T) {
+ tname := NewTypeName(nopos, nil, "myInt", nil)
+ myInt := NewNamed(tname, Typ[Int], nil)
+ tmap := map[string]*Term{
+ "int": NewTerm(false, Typ[Int]),
+ "~int": NewTerm(true, Typ[Int]),
+ "string": NewTerm(false, Typ[String]),
+ "~string": NewTerm(true, Typ[String]),
+ "myInt": NewTerm(false, myInt),
+ }
+ makeUnion := func(s string) *Union {
+ parts := strings.Split(s, "|")
+ var terms []*Term
+ for _, p := range parts {
+ term := tmap[p]
+ if term == nil {
+ t.Fatalf("missing term %q", p)
+ }
+ terms = append(terms, term)
+ }
+ return NewUnion(terms)
+ }
+ for _, test := range []struct {
+ x, y string
+ want bool
+ }{
+ // These tests are just sanity checks. The tests for type sets and
+ // interfaces provide much more test coverage.
+ {"int|~int", "~int", true},
+ {"myInt|~int", "~int", true},
+ {"int|string", "string|int", true},
+ {"int|int|string", "string|int", true},
+ {"myInt|string", "int|string", false},
+ } {
+ x := makeUnion(test.x)
+ y := makeUnion(test.y)
+ if got := Identical(x, y); got != test.want {
+ t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got)
+ }
+ }
+}
+
func TestIssue15305(t *testing.T) {
const src = "package p; func f() int16; var _ = f(undef)"
f, err := parseSrc("issue15305.go", src)
@@ -1871,7 +1914,7 @@ func TestInstantiate(t *testing.T) {
// type T should have one type parameter
T := pkg.Scope().Lookup("T").Type().(*Named)
- if n := T.TParams().Len(); n != 1 {
+ if n := T.TypeParams().Len(); n != 1 {
t.Fatalf("expected 1 type parameter; found %d", n)
}
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index 6184fc2ea5..29d63cf819 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -68,7 +68,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
// x.typ is typed
// A generic (non-instantiated) function value cannot be assigned to a variable.
- if sig := asSignature(x.typ); sig != nil && sig.TParams().Len() > 0 {
+ if sig := asSignature(x.typ); sig != nil && sig.TypeParams().Len() > 0 {
check.errorf(x, "cannot use generic function %s without instantiation in %s", x, context)
}
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index e3844d5163..3b8d85859a 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -826,7 +826,7 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
// type param is placed in the current package so export/import
// works as expected.
tpar := NewTypeName(nopos, check.pkg, "<type parameter>", nil)
- ptyp := check.NewTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect
+ ptyp := check.newTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect
ptyp.index = tp.index
return ptyp
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 5bf17876c1..ba3bb475a3 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -30,7 +30,7 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
// check number of type arguments (got) vs number of type parameters (want)
sig := x.typ.(*Signature)
- got, want := len(targs), sig.TParams().Len()
+ got, want := len(targs), sig.TypeParams().Len()
if !useConstraintTypeInference && got != want || got > want {
check.errorf(xlist[got-1], "got %d type arguments but want %d", got, want)
x.mode = invalid
@@ -41,7 +41,7 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
// if we don't have enough type arguments, try type inference
inferred := false
if got < want {
- targs = check.infer(inst.Pos(), sig.TParams().list(), targs, nil, nil, true)
+ targs = check.infer(inst.Pos(), sig.TypeParams().list(), targs, nil, nil, true)
if targs == nil {
// error was already reported
x.mode = invalid
@@ -61,7 +61,7 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
// instantiate function signature
res := check.instantiate(x.Pos(), sig, targs, poslist).(*Signature)
- assert(res.TParams().Len() == 0) // signature is not generic anymore
+ assert(res.TypeParams().Len() == 0) // signature is not generic anymore
if inferred {
check.recordInferred(inst, targs, res)
}
@@ -166,7 +166,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
assert(len(targs) == len(xlist))
// check number of type arguments (got) vs number of type parameters (want)
- got, want := len(targs), sig.TParams().Len()
+ got, want := len(targs), sig.TypeParams().Len()
if got > want {
check.errorf(xlist[want], "got %d type arguments but want %d", got, want)
check.use(call.ArgList...)
@@ -200,7 +200,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
// if type inference failed, a parametrized result must be invalidated
// (operands cannot have a parametrized type)
- if x.mode == value && sig.TParams().Len() > 0 && isParameterized(sig.TParams().list(), x.typ) {
+ if x.mode == value && sig.TypeParams().Len() > 0 && isParameterized(sig.TypeParams().list(), x.typ) {
x.mode = invalid
}
@@ -328,7 +328,7 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
}
// infer type arguments and instantiate signature if necessary
- if sig.TParams().Len() > 0 {
+ if sig.TypeParams().Len() > 0 {
if !check.allowVersion(check.pkg, 1, 18) {
if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil {
check.softErrorf(iexpr.Pos(), "function instantiation requires go1.18 or later")
@@ -338,21 +338,21 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
}
// TODO(gri) provide position information for targs so we can feed
// it to the instantiate call for better error reporting
- targs := check.infer(call.Pos(), sig.TParams().list(), targs, sigParams, args, true)
+ targs := check.infer(call.Pos(), sig.TypeParams().list(), targs, sigParams, args, true)
if targs == nil {
return // error already reported
}
// compute result signature
rsig = check.instantiate(call.Pos(), sig, targs, nil).(*Signature)
- assert(rsig.TParams().Len() == 0) // signature is not generic anymore
+ assert(rsig.TypeParams().Len() == 0) // signature is not generic anymore
check.recordInferred(call, targs, rsig)
// Optimization: Only if the parameter list was adjusted do we
// need to compute it from the adjusted list; otherwise we can
// simply use the result signature's parameter list.
if adjusted {
- sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TParams().list(), targs), nil).(*Tuple)
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil).(*Tuple)
} else {
sigParams = rsig.params
}
@@ -535,7 +535,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
// the signature accordingly.
// TODO(gri) factor this code out
sig := m.typ.(*Signature)
- if sig.RParams().Len() > 0 {
+ if sig.RecvTypeParams().Len() > 0 {
// For inference to work, we must use the receiver type
// matching the receiver in the actual method declaration.
// If the method is embedded, the matching receiver is the
@@ -564,7 +564,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
// the receiver type arguments here, the receiver must be be otherwise invalid
// and an error has been reported elsewhere.
arg := operand{mode: variable, expr: x.expr, typ: recv}
- targs := check.infer(m.pos, sig.RParams().list(), nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */)
+ targs := check.infer(m.pos, sig.RecvTypeParams().list(), nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */)
//check.dump("### inferred targs = %s", targs)
if targs == nil {
// We may reach here if there were other errors (see issue #40056).
@@ -574,7 +574,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
// (If we modify m, some tests will fail; possibly because the m is in use.)
// TODO(gri) investigate and provide a correct explanation here
copy := *m
- copy.typ = check.subst(e.Pos(), m.typ, makeSubstMap(sig.RParams().list(), targs), nil)
+ copy.typ = check.subst(e.Pos(), m.typ, makeSubstMap(sig.RecvTypeParams().list(), targs), nil)
obj = &copy
}
// TODO(gri) we also need to do substitution for parameterized interface methods
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 4226b4de82..24a05e6b37 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -86,7 +86,6 @@ type Checker struct {
nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
- typMap map[string]*Named // maps an instantiated named type hash to a *Named type
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
@@ -171,6 +170,11 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
conf = new(Config)
}
+ // make sure we have an environment
+ if conf.Environment == nil {
+ conf.Environment = NewEnvironment()
+ }
+
// make sure we have an info struct
if info == nil {
info = new(Info)
@@ -188,7 +192,6 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
- typMap: make(map[string]*Named),
}
}
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index cd97080824..26e050511e 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -317,7 +317,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
}
case *Named:
- t.expand(check.typMap)
+ t.resolve(check.conf.Environment)
// don't touch the type if it is from a different package or the Universe scope
// (doing so would lead to a race condition - was issue #35049)
@@ -592,13 +592,13 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
named.underlying = under(named)
// If the RHS is a type parameter, it must be from this type declaration.
- if tpar, _ := named.underlying.(*TypeParam); tpar != nil && tparamIndex(named.TParams().list(), tpar) < 0 {
+ if tpar, _ := named.underlying.(*TypeParam); tpar != nil && tparamIndex(named.TypeParams().list(), tpar) < 0 {
check.errorf(tdecl.Type, "cannot use function type parameter %s as RHS in type declaration", tpar)
named.underlying = Typ[Invalid]
}
}
-func (check *Checker) collectTypeParams(dst **TParamList, list []*syntax.Field) {
+func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Field) {
tparams := make([]*TypeParam, len(list))
// Declare type parameters up-front.
@@ -648,7 +648,7 @@ func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam {
// constraints to make sure we don't rely on them if they
// are not properly set yet.
tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil)
- tpar := check.NewTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
+ tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
check.declare(check.scope, name, tname, check.scope.pos) // TODO(gri) check scope position
return tpar
}
@@ -715,7 +715,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
}
if base != nil {
- base.load() // TODO(mdempsky): Probably unnecessary.
+ base.resolve(nil) // TODO(mdempsky): Probably unnecessary.
base.methods = append(base.methods, m)
}
}
diff --git a/src/cmd/compile/internal/types2/environment.go b/src/cmd/compile/internal/types2/environment.go
new file mode 100644
index 0000000000..fe9a3099fe
--- /dev/null
+++ b/src/cmd/compile/internal/types2/environment.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package types2
+
+import (
+ "bytes"
+ "strings"
+ "sync"
+)
+
+// An Environment is an opaque type checking environment. It may be used to
+// share identical type instances across type-checked packages or calls to
+// Instantiate.
+//
+// It is safe for concurrent use.
+type Environment struct {
+ mu sync.Mutex
+ typeMap map[string]*Named // type hash -> instance
+ nextID int // next unique ID
+ seen map[*Named]int // assigned unique IDs
+}
+
+// NewEnvironment creates a new Environment.
+func NewEnvironment() *Environment {
+ return &Environment{
+ typeMap: make(map[string]*Named),
+ seen: make(map[*Named]int),
+ }
+}
+
+// TypeHash returns a string representation of typ, which can be used as an exact
+// type hash: types that are identical produce identical string representations.
+// If typ is a *Named type and targs is not empty, typ is printed as if it were
+// instantiated with targs. The result is guaranteed to not contain blanks (" ").
+func (env *Environment) TypeHash(typ Type, targs []Type) string {
+ assert(env != nil)
+ assert(typ != nil)
+ var buf bytes.Buffer
+
+ h := newTypeHasher(&buf, env)
+ if named, _ := typ.(*Named); named != nil && len(targs) > 0 {
+ // Don't use WriteType because we need to use the provided targs
+ // and not any targs that might already be with the *Named type.
+ h.typePrefix(named)
+ h.typeName(named.obj)
+ h.typeList(targs)
+ } else {
+ assert(targs == nil)
+ h.typ(typ)
+ }
+
+ return strings.Replace(buf.String(), " ", "#", -1) // ReplaceAll is not available in Go1.4
+}
+
+// typeForHash returns the recorded type for the type hash h, if it exists.
+// If no type exists for h and n is non-nil, n is recorded for h.
+func (env *Environment) typeForHash(h string, n *Named) *Named {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ if existing := env.typeMap[h]; existing != nil {
+ return existing
+ }
+ if n != nil {
+ env.typeMap[h] = n
+ }
+ return n
+}
+
+// idForType returns a unique ID for the pointer n.
+func (env *Environment) idForType(n *Named) int {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ id, ok := env.seen[n]
+ if !ok {
+ id = env.nextID
+ env.seen[n] = id
+ env.nextID++
+ }
+ return id
+}
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
index a68273271b..ea43fab178 100644
--- a/src/cmd/compile/internal/types2/errors.go
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -246,7 +246,7 @@ func stripAnnotations(s string) string {
var b bytes.Buffer
for _, r := range s {
// strip #'s and subscript digits
- if r != instanceMarker && !('â‚€' <= r && r < 'â‚€'+10) { // 'â‚€' == U+2080
+ if r < 'â‚€' || 'â‚€'+10 <= r { // 'â‚€' == U+2080
b.WriteRune(r)
}
}
diff --git a/src/cmd/compile/internal/types2/errors_test.go b/src/cmd/compile/internal/types2/errors_test.go
index e1f0e83fc9..72a2ce3655 100644
--- a/src/cmd/compile/internal/types2/errors_test.go
+++ b/src/cmd/compile/internal/types2/errors_test.go
@@ -35,7 +35,6 @@ func TestStripAnnotations(t *testing.T) {
{"foo", "foo"},
{"fooâ‚€", "foo"},
{"foo(Tâ‚€)", "foo(T)"},
- {"#foo(Tâ‚€)", "foo(T)"},
} {
got := stripAnnotations(test.in)
if got != test.want {
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index febfd21ea3..848a70dea8 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -34,7 +34,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
return false
case value:
- if sig := asSignature(x.typ); sig != nil && sig.TParams().Len() > 0 {
+ if sig := asSignature(x.typ); sig != nil && sig.TypeParams().Len() > 0 {
// function instantiation
return true
}
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index bb7270b346..c2a8155dc7 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -562,7 +562,7 @@ func (w *cycleFinder) typ(typ Type) {
w.typ(t.elem)
case *Named:
- for _, tpar := range t.TArgs().list() {
+ for _, tpar := range t.TypeArgs().list() {
w.typ(tpar)
}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index c882699d1d..7a9279943c 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -13,21 +13,6 @@ import (
"fmt"
)
-// An Environment is an opaque type checking environment. It may be used to
-// share identical type instances across type checked packages or calls to
-// Instantiate.
-type Environment struct {
- // For now, Environment just hides a Checker.
- // Eventually, we strive to remove the need for a checker.
- check *Checker
-}
-
-// NewEnvironment returns a new Environment, initialized with the given
-// Checker, or nil.
-func NewEnvironment(check *Checker) *Environment {
- return &Environment{check}
-}
-
// Instantiate instantiates the type typ with the given type arguments targs.
// typ must be a *Named or a *Signature type, and its number of type parameters
// must match the number of provided type arguments. The result is a new,
@@ -46,22 +31,18 @@ func NewEnvironment(check *Checker) *Environment {
// TODO(rfindley): change this function to also return an error if lengths of
// tparams and targs do not match.
func Instantiate(env *Environment, typ Type, targs []Type, validate bool) (Type, error) {
- var check *Checker
- if env != nil {
- check = env.check
- }
- inst := check.instance(nopos, typ, targs)
+ inst := (*Checker)(nil).instance(nopos, typ, targs, env)
var err error
if validate {
var tparams []*TypeParam
switch t := typ.(type) {
case *Named:
- tparams = t.TParams().list()
+ tparams = t.TypeParams().list()
case *Signature:
- tparams = t.TParams().list()
+ tparams = t.TypeParams().list()
}
- if i, err := check.verify(nopos, tparams, targs); err != nil {
+ if i, err := (*Checker)(nil).verify(nopos, tparams, targs); err != nil {
return inst, ArgumentError{i, err}
}
}
@@ -90,7 +71,7 @@ func (check *Checker) instantiate(pos syntax.Pos, typ Type, targs []Type, posLis
}()
}
- inst := check.instance(pos, typ, targs)
+ inst := check.instance(pos, typ, targs, check.conf.Environment)
assert(len(posList) <= len(targs))
check.later(func() {
@@ -99,9 +80,9 @@ func (check *Checker) instantiate(pos syntax.Pos, typ Type, targs []Type, posLis
var tparams []*TypeParam
switch t := typ.(type) {
case *Named:
- tparams = t.TParams().list()
+ tparams = t.TypeParams().list()
case *Signature:
- tparams = t.TParams().list()
+ tparams = t.TypeParams().list()
}
// Avoid duplicate errors; instantiate will have complained if tparams
// and targs do not have the same length.
@@ -122,35 +103,40 @@ func (check *Checker) instantiate(pos syntax.Pos, typ Type, targs []Type, posLis
// instance creates a type or function instance using the given original type
// typ and arguments targs. For Named types the resulting instance will be
// unexpanded.
-func (check *Checker) instance(pos syntax.Pos, typ Type, targs []Type) Type {
+func (check *Checker) instance(pos syntax.Pos, typ Type, targs []Type, env *Environment) Type {
switch t := typ.(type) {
case *Named:
- h := typeHash(t, targs)
- if check != nil {
- // typ may already have been instantiated with identical type arguments.
- // In that case, re-use the existing instance.
- if named := check.typMap[h]; named != nil {
+ var h string
+ if env != nil {
+ h = env.TypeHash(t, targs)
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ if named := env.typeForHash(h, nil); named != nil {
return named
}
}
tname := NewTypeName(pos, t.obj.pkg, t.obj.name, nil)
- named := check.newNamed(tname, t, nil, nil, nil) // methods and tparams are set when named is loaded
+ named := check.newNamed(tname, t, nil, nil, nil) // methods and tparams are set when named is resolved
named.targs = NewTypeList(targs)
- named.instPos = &pos
- if check != nil {
- check.typMap[h] = named
+ named.resolver = func(env *Environment, n *Named) (*TypeParamList, Type, []*Func) {
+ return expandNamed(env, n, pos)
+ }
+ if env != nil {
+ // It's possible that we've lost a race to add named to the environment.
+ // In this case, use whichever instance is recorded in the environment.
+ named = env.typeForHash(h, named)
}
return named
case *Signature:
- tparams := t.TParams()
+ tparams := t.TypeParams()
if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
return Typ[Invalid]
}
if tparams.Len() == 0 {
return typ // nothing to do (minor optimization)
}
- sig := check.subst(pos, typ, makeSubstMap(tparams.list(), targs), nil).(*Signature)
+ sig := check.subst(pos, typ, makeSubstMap(tparams.list(), targs), env).(*Signature)
// If the signature doesn't use its type parameters, subst
// will not make a copy. In that case, make a copy now (so
// we can set tparams to nil w/o causing side-effects).
diff --git a/src/cmd/compile/internal/types2/instantiate_test.go b/src/cmd/compile/internal/types2/instantiate_test.go
new file mode 100644
index 0000000000..69a26491cb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/instantiate_test.go
@@ -0,0 +1,62 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package types2_test
+
+import (
+ . "cmd/compile/internal/types2"
+ "testing"
+)
+
+func TestInstantiateEquality(t *testing.T) {
+ const src = genericPkg + "p; type T[P any] int"
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+ // Instantiating the same type twice should result in pointer-equivalent
+ // instances.
+ env := NewEnvironment()
+ res1, err := Instantiate(env, T, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res2, err := Instantiate(env, T, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res1 != res2 {
+ t.Errorf("first instance (%s) not pointer-equivalent to second instance (%s)", res1, res2)
+ }
+}
+func TestInstantiateNonEquality(t *testing.T) {
+ const src = genericPkg + "p; type T[P any] int"
+ pkg1, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg2, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We consider T1 and T2 to be distinct types, so their instances should not
+ // be deduplicated by the environment.
+ T1 := pkg1.Scope().Lookup("T").Type().(*Named)
+ T2 := pkg2.Scope().Lookup("T").Type().(*Named)
+ env := NewEnvironment()
+ res1, err := Instantiate(env, T1, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res2, err := Instantiate(env, T2, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res1 == res2 {
+ t.Errorf("instance from pkg1 (%s) is pointer-equivalent to instance from pkg2 (%s)", res1, res2)
+ }
+ if Identical(res1, res2) {
+ t.Errorf("instance from pkg1 (%s) is identical to instance from pkg2 (%s)", res1, res2)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
index e57158d2d5..340df51524 100644
--- a/src/cmd/compile/internal/types2/interface.go
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -11,6 +11,7 @@ import "cmd/compile/internal/syntax"
// An Interface represents an interface type.
type Interface struct {
+ check *Checker // for error reporting; nil once type set is computed
obj *TypeName // corresponding declared object; or nil (for better error messages)
methods []*Func // ordered list of explicitly declared methods
embeddeds []Type // ordered list of explicitly embedded elements
@@ -21,7 +22,7 @@ type Interface struct {
}
// typeSet returns the type set for interface t.
-func (t *Interface) typeSet() *_TypeSet { return computeInterfaceTypeSet(nil, nopos, t) }
+func (t *Interface) typeSet() *_TypeSet { return computeInterfaceTypeSet(t.check, nopos, t) }
// emptyInterface represents the empty interface
var emptyInterface = Interface{complete: true, tset: &topTypeSet}
@@ -198,7 +199,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
}
// All methods and embedded elements for this interface are collected;
- // i.e., this interface is may be used in a type set computation.
+ // i.e., this interface may be used in a type set computation.
ityp.complete = true
if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
@@ -214,7 +215,15 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
// Compute type set with a non-nil *Checker as soon as possible
// to report any errors. Subsequent uses of type sets will use
// this computed type set and won't need to pass in a *Checker.
- check.later(func() { computeInterfaceTypeSet(check, iface.Pos(), ityp) })
+ //
+ // Pin the checker to the interface type in the interim, in case the type set
+ // must be used before delayed funcs are processed (see issue #48234).
+ // TODO(rfindley): clean up use of *Checker with computeInterfaceTypeSet
+ ityp.check = check
+ check.later(func() {
+ computeInterfaceTypeSet(check, iface.Pos(), ityp)
+ ityp.check = nil
+ })
}
func flattenUnion(list []syntax.Expr, x syntax.Expr) []syntax.Expr {
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index d0718e51e2..0e7a2b70e2 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -122,7 +122,7 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
seen[named] = true
// look for a matching attached method
- named.load()
+ named.resolve(nil)
if i, m := lookupMethod(named.methods, pkg, name); m != nil {
// potential match
// caution: method may not have a proper signature yet
@@ -321,10 +321,10 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
- if ftyp.TParams().Len() != mtyp.TParams().Len() {
+ if ftyp.TypeParams().Len() != mtyp.TypeParams().Len() {
return m, f
}
- if !acceptMethodTypeParams && ftyp.TParams().Len() > 0 {
+ if !acceptMethodTypeParams && ftyp.TypeParams().Len() > 0 {
panic("method with type parameters")
}
@@ -334,7 +334,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
u := newUnifier(true)
- u.x.init(ftyp.TParams().list())
+ u.x.init(ftyp.TypeParams().list())
if !u.unify(ftyp, mtyp) {
return m, f
}
@@ -373,10 +373,10 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
- if ftyp.TParams().Len() != mtyp.TParams().Len() {
+ if ftyp.TypeParams().Len() != mtyp.TypeParams().Len() {
return m, f
}
- if !acceptMethodTypeParams && ftyp.TParams().Len() > 0 {
+ if !acceptMethodTypeParams && ftyp.TypeParams().Len() > 0 {
panic("method with type parameters")
}
@@ -387,17 +387,17 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// In order to compare the signatures, substitute the receiver
// type parameters of ftyp with V's instantiation type arguments.
// This lazily instantiates the signature of method f.
- if Vn != nil && Vn.TParams().Len() > 0 {
+ if Vn != nil && Vn.TypeParams().Len() > 0 {
// Be careful: The number of type arguments may not match
// the number of receiver parameters. If so, an error was
// reported earlier but the length discrepancy is still
// here. Exit early in this case to prevent an assertion
// failure in makeSubstMap.
// TODO(gri) Can we avoid this check by fixing the lengths?
- if len(ftyp.RParams().list()) != Vn.targs.Len() {
+ if len(ftyp.RecvTypeParams().list()) != Vn.targs.Len() {
return
}
- ftyp = check.subst(nopos, ftyp, makeSubstMap(ftyp.RParams().list(), Vn.targs.list()), nil).(*Signature)
+ ftyp = check.subst(nopos, ftyp, makeSubstMap(ftyp.RecvTypeParams().list(), Vn.targs.list()), nil).(*Signature)
}
// If the methods have type parameters we don't care whether they
@@ -406,7 +406,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
u := newUnifier(true)
- if ftyp.TParams().Len() > 0 {
+ if ftyp.TypeParams().Len() > 0 {
// We reach here only if we accept method type parameters.
// In this case, unification must consider any receiver
// and method type parameters as "free" type parameters.
@@ -416,9 +416,9 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// unimplemented call so that we test this code if we
// enable method type parameters.
unimplemented()
- u.x.init(append(ftyp.RParams().list(), ftyp.TParams().list()...))
+ u.x.init(append(ftyp.RecvTypeParams().list(), ftyp.TypeParams().list()...))
} else {
- u.x.init(ftyp.RParams().list())
+ u.x.init(ftyp.RecvTypeParams().list())
}
if !u.unify(ftyp, mtyp) {
return m, f
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index a76e69fcf1..c844012e39 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -12,18 +12,18 @@ import (
// A Named represents a named (defined) type.
type Named struct {
check *Checker
- info typeInfo // for cycle detection
- obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
- orig *Named // original, uninstantiated type
- fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
- underlying Type // possibly a *Named during setup; never a *Named once set up completely
- instPos *syntax.Pos // position information for lazy instantiation, or nil
- tparams *TParamList // type parameters, or nil
- targs *TypeList // type arguments (after instantiation), or nil
- methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
-
- resolve func(*Named) ([]*TypeParam, Type, []*Func)
- once sync.Once
+ info typeInfo // for cycle detection
+ obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
+ orig *Named // original, uninstantiated type
+ fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
+ underlying Type // possibly a *Named during setup; never a *Named once set up completely
+ tparams *TypeParamList // type parameters, or nil
+ targs *TypeList // type arguments (after instantiation), or nil
+ methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
+
+ // resolver may be provided to lazily resolve type parameters, underlying, and methods.
+ resolver func(*Environment, *Named) (tparams *TypeParamList, underlying Type, methods []*Func)
+ once sync.Once // ensures that tparams, underlying, and methods are resolved before accessing
}
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
@@ -36,49 +36,28 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
}
-func (t *Named) load() *Named {
- // If t is an instantiated type, it derives its methods and tparams from its
- // base type. Since we expect type parameters and methods to be set after a
- // call to load, we must load the base and copy here.
- //
- // underlying is set when t is expanded.
- //
- // By convention, a type instance is loaded iff its tparams are set.
- if t.targs.Len() > 0 && t.tparams == nil {
- t.orig.load()
- t.tparams = t.orig.tparams
- t.methods = t.orig.methods
- }
- if t.resolve == nil {
+func (t *Named) resolve(env *Environment) *Named {
+ if t.resolver == nil {
return t
}
t.once.Do(func() {
- // TODO(mdempsky): Since we're passing t to resolve anyway
+ // TODO(mdempsky): Since we're passing t to the resolver anyway
// (necessary because types2 expects the receiver type for methods
// on defined interface types to be the Named rather than the
// underlying Interface), maybe it should just handle calling
- // SetTParams, SetUnderlying, and AddMethod instead? Those
- // methods would need to support reentrant calls though. It would
+ // SetTypeParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
// also make the API more future-proof towards further extensions
- // (like SetTParams).
-
- tparams, underlying, methods := t.resolve(t)
-
- switch underlying.(type) {
- case nil, *Named:
- panic("invalid underlying type")
- }
-
- t.tparams = bindTParams(tparams)
- t.underlying = underlying
- t.methods = methods
+ // (like SetTypeParams).
+ t.tparams, t.underlying, t.methods = t.resolver(env, t)
+ t.fromRHS = t.underlying // for cycle detection
})
return t
}
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
-func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TParamList, methods []*Func) *Named {
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TypeParamList, methods []*Func) *Named {
typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
if typ.orig == nil {
typ.orig = typ
@@ -119,21 +98,21 @@ func (t *Named) Orig() *Named { return t.orig }
// TODO(gri) Come up with a better representation and API to distinguish
// between parameterized instantiated and non-instantiated types.
-// TParams returns the type parameters of the named type t, or nil.
+// TypeParams returns the type parameters of the named type t, or nil.
// The result is non-nil for an (originally) parameterized type even if it is instantiated.
-func (t *Named) TParams() *TParamList { return t.load().tparams }
+func (t *Named) TypeParams() *TypeParamList { return t.resolve(nil).tparams }
-// SetTParams sets the type parameters of the named type t.
-func (t *Named) SetTParams(tparams []*TypeParam) { t.load().tparams = bindTParams(tparams) }
+// SetTypeParams sets the type parameters of the named type t.
+func (t *Named) SetTypeParams(tparams []*TypeParam) { t.resolve(nil).tparams = bindTParams(tparams) }
-// TArgs returns the type arguments used to instantiate the named type t.
-func (t *Named) TArgs() *TypeList { return t.targs }
+// TypeArgs returns the type arguments used to instantiate the named type t.
+func (t *Named) TypeArgs() *TypeList { return t.targs }
// NumMethods returns the number of explicit methods whose receiver is named type t.
-func (t *Named) NumMethods() int { return len(t.load().methods) }
+func (t *Named) NumMethods() int { return len(t.resolve(nil).methods) }
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
-func (t *Named) Method(i int) *Func { return t.load().methods[i] }
+func (t *Named) Method(i int) *Func { return t.resolve(nil).methods[i] }
// SetUnderlying sets the underlying type and marks t as complete.
func (t *Named) SetUnderlying(underlying Type) {
@@ -143,18 +122,18 @@ func (t *Named) SetUnderlying(underlying Type) {
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
- t.load().underlying = underlying
+ t.resolve(nil).underlying = underlying
}
// AddMethod adds method m unless it is already in the method list.
func (t *Named) AddMethod(m *Func) {
- t.load()
+ t.resolve(nil)
if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
t.methods = append(t.methods, m)
}
}
-func (t *Named) Underlying() Type { return t.load().expand(nil).underlying }
+func (t *Named) Underlying() Type { return t.resolve(nil).underlying }
func (t *Named) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
@@ -240,37 +219,36 @@ func (n *Named) setUnderlying(typ Type) {
}
}
-// expand ensures that the underlying type of n is instantiated.
+// expandNamed ensures that the underlying type of n is instantiated.
// The underlying type will be Typ[Invalid] if there was an error.
-func (n *Named) expand(typMap map[string]*Named) *Named {
- if n.instPos != nil {
- // n must be loaded before instantiation, in order to have accurate
- // tparams. This is done implicitly by the call to n.TParams, but making it
- // explicit is harmless: load is idempotent.
- n.load()
- var u Type
- if n.check.validateTArgLen(*n.instPos, n.tparams.Len(), n.targs.Len()) {
- if typMap == nil {
- if n.check != nil {
- typMap = n.check.typMap
- } else {
- // If we're instantiating lazily, we might be outside the scope of a
- // type-checking pass. In that case we won't have a pre-existing
- // typMap, but don't want to create a duplicate of the current instance
- // in the process of expansion.
- h := typeHash(n.orig, n.targs.list())
- typMap = map[string]*Named{h: n}
- }
+func expandNamed(env *Environment, n *Named, instPos syntax.Pos) (*TypeParamList, Type, []*Func) {
+ n.orig.resolve(env)
+
+ var u Type
+ if n.check.validateTArgLen(instPos, n.orig.tparams.Len(), n.targs.Len()) {
+ // TODO(rfindley): handling an optional Checker and Environment here (and
+ // in subst) feels overly complicated. Can we simplify?
+ if env == nil {
+ if n.check != nil {
+ env = n.check.conf.Environment
+ } else {
+ // If we're instantiating lazily, we might be outside the scope of a
+ // type-checking pass. In that case we won't have a pre-existing
+ // environment, but don't want to create a duplicate of the current
+ // instance in the process of expansion.
+ env = NewEnvironment()
}
- u = n.check.subst(*n.instPos, n.orig.underlying, makeSubstMap(n.TParams().list(), n.targs.list()), typMap)
- } else {
- u = Typ[Invalid]
+ h := env.TypeHash(n.orig, n.targs.list())
+ // add the instance to the environment to avoid infinite recursion.
+ // addInstance may return a different, existing instance, but we
+ // shouldn't return that instance from expand.
+ env.typeForHash(h, n)
}
- n.underlying = u
- n.fromRHS = u
- n.instPos = nil
+ u = n.check.subst(instPos, n.orig.underlying, makeSubstMap(n.orig.tparams.list(), n.targs.list()), env)
+ } else {
+ u = Typ[Invalid]
}
- return n
+ return n.orig.tparams, u, n.orig.methods
}
// safeUnderlying returns the underlying of typ without expanding instances, to
@@ -279,7 +257,7 @@ func (n *Named) expand(typMap map[string]*Named) *Named {
// TODO(rfindley): eliminate this function or give it a better name.
func safeUnderlying(typ Type) Type {
if t, _ := typ.(*Named); t != nil {
- return t.load().underlying
+ return t.resolve(nil).underlying
}
return typ.Underlying()
}
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index a3f5f913aa..540cb3f44f 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -278,9 +278,21 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
// NewTypeNameLazy returns a new defined type like NewTypeName, but it
// lazily calls resolve to finish constructing the Named object.
-func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, resolve func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
- NewNamed(obj, nil, nil).resolve = resolve
+
+ resolve := func(_ *Environment, t *Named) (*TypeParamList, Type, []*Func) {
+ tparams, underlying, methods := load(t)
+
+ switch underlying.(type) {
+ case nil, *Named:
+ panic(fmt.Sprintf("invalid underlying type %T", t.underlying))
+ }
+
+ return bindTParams(tparams), underlying, methods
+ }
+
+ NewNamed(obj, nil, nil).resolver = resolve
return obj
}
@@ -475,8 +487,8 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
if _, ok := typ.(*Basic); ok {
return
}
- if named, _ := typ.(*Named); named != nil && named.TParams().Len() > 0 {
- newTypeWriter(buf, qf).tParamList(named.TParams().list())
+ if named, _ := typ.(*Named); named != nil && named.TypeParams().Len() > 0 {
+ newTypeWriter(buf, qf).tParamList(named.TypeParams().list())
}
if tname.IsAlias() {
buf.WriteString(" =")
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index 3ccafef990..74ad3da72c 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -21,7 +21,7 @@ func isNamed(typ Type) bool {
func isGeneric(typ Type) bool {
// A parameterized type is only instantiated if it doesn't have an instantiation already.
named, _ := typ.(*Named)
- return named != nil && named.obj != nil && named.targs == nil && named.TParams() != nil
+ return named != nil && named.obj != nil && named.targs == nil && named.TypeParams() != nil
}
func is(typ Type, what BasicInfo) bool {
@@ -220,11 +220,18 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
// parameter names.
if y, ok := y.(*Signature); ok {
return x.variadic == y.variadic &&
- identicalTParams(x.TParams().list(), y.TParams().list(), cmpTags, p) &&
+ identicalTParams(x.TypeParams().list(), y.TypeParams().list(), cmpTags, p) &&
identical(x.params, y.params, cmpTags, p) &&
identical(x.results, y.results, cmpTags, p)
}
+ case *Union:
+ if y, _ := y.(*Union); y != nil {
+ xset := computeUnionTypeSet(nil, nopos, x)
+ yset := computeUnionTypeSet(nil, nopos, y)
+ return xset.terms.equal(yset.terms)
+ }
+
case *Interface:
// Two interface types are identical if they describe the same type sets.
// With the existing implementation restriction, this simplifies to:
@@ -302,11 +309,8 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
// Two named types are identical if their type names originate
// in the same type declaration.
if y, ok := y.(*Named); ok {
- x.expand(nil)
- y.expand(nil)
-
- xargs := x.TArgs().list()
- yargs := y.TArgs().list()
+ xargs := x.TypeArgs().list()
+ yargs := y.TypeArgs().list()
if len(xargs) != len(yargs) {
return false
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index a7d0db624c..e3186f5eed 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -19,13 +19,13 @@ type Signature struct {
// and store it in the Func Object) because when type-checking a function
// literal we call the general type checker which returns a general Type.
// We then unpack the *Signature and use the scope for the literal body.
- rparams *TParamList // receiver type parameters from left to right, or nil
- tparams *TParamList // type parameters from left to right, or nil
- scope *Scope // function scope, present for package-local signatures
- recv *Var // nil if not a method
- params *Tuple // (incoming) parameters from left to right; or nil
- results *Tuple // (outgoing) results from left to right; or nil
- variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
+ rparams *TypeParamList // receiver type parameters from left to right, or nil
+ tparams *TypeParamList // type parameters from left to right, or nil
+ scope *Scope // function scope, present for package-local signatures
+ recv *Var // nil if not a method
+ params *Tuple // (incoming) parameters from left to right; or nil
+ results *Tuple // (outgoing) results from left to right; or nil
+ variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
}
// NewSignature returns a new function type for the given receiver, parameters,
@@ -53,17 +53,17 @@ func NewSignature(recv *Var, params, results *Tuple, variadic bool) *Signature {
// contain methods whose receiver type is a different interface.
func (s *Signature) Recv() *Var { return s.recv }
-// TParams returns the type parameters of signature s, or nil.
-func (s *Signature) TParams() *TParamList { return s.tparams }
+// TypeParams returns the type parameters of signature s, or nil.
+func (s *Signature) TypeParams() *TypeParamList { return s.tparams }
-// SetTParams sets the type parameters of signature s.
-func (s *Signature) SetTParams(tparams []*TypeParam) { s.tparams = bindTParams(tparams) }
+// SetTypeParams sets the type parameters of signature s.
+func (s *Signature) SetTypeParams(tparams []*TypeParam) { s.tparams = bindTParams(tparams) }
-// RParams returns the receiver type parameters of signature s, or nil.
-func (s *Signature) RParams() *TParamList { return s.rparams }
+// RecvTypeParams returns the receiver type parameters of signature s, or nil.
+func (s *Signature) RecvTypeParams() *TypeParamList { return s.rparams }
-// SetRParams sets the receiver type params of signature s.
-func (s *Signature) SetRParams(rparams []*TypeParam) { s.rparams = bindTParams(rparams) }
+// SetRecvTypeParams sets the receiver type params of signature s.
+func (s *Signature) SetRecvTypeParams(rparams []*TypeParam) { s.rparams = bindTParams(rparams) }
// Params returns the parameters of signature s, or nil.
func (s *Signature) Params() *Tuple { return s.params }
@@ -133,19 +133,19 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
// again when we type-check the signature.
// TODO(gri) maybe the receiver should be marked as invalid instead?
if recv, _ := check.genericType(rname, false).(*Named); recv != nil {
- recvTParams = recv.TParams().list()
+ recvTParams = recv.TypeParams().list()
}
}
// provide type parameter bounds
// - only do this if we have the right number (otherwise an error is reported elsewhere)
- if sig.RParams().Len() == len(recvTParams) {
+ if sig.RecvTypeParams().Len() == len(recvTParams) {
// We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, sig.RParams().Len())
- for i, t := range sig.RParams().list() {
+ list := make([]Type, sig.RecvTypeParams().Len())
+ for i, t := range sig.RecvTypeParams().list() {
list[i] = t
}
smap := makeSubstMap(recvTParams, list)
- for i, tpar := range sig.RParams().list() {
+ for i, tpar := range sig.RecvTypeParams().list() {
bound := recvTParams[i].bound
// bound is (possibly) parameterized in the context of the
// receiver type declaration. Substitute parameters for the
@@ -210,10 +210,10 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
var err string
switch T := rtyp.(type) {
case *Named:
- T.expand(nil)
+ T.resolve(check.conf.Environment)
// The receiver type may be an instantiated type referred to
// by an alias (which cannot have receiver parameters for now).
- if T.TArgs() != nil && sig.RParams() == nil {
+ if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
check.errorf(recv.pos, "cannot define methods on instantiated type %s", recv.typ)
break
}
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
index 5be369d843..a7f1185fa8 100644
--- a/src/cmd/compile/internal/types2/sizeof_test.go
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -28,10 +28,10 @@ func TestSizeof(t *testing.T) {
{Tuple{}, 12, 24},
{Signature{}, 28, 56},
{Union{}, 16, 32},
- {Interface{}, 40, 80},
+ {Interface{}, 44, 88},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 72, 136},
+ {Named{}, 68, 128},
{TypeParam{}, 28, 48},
{term{}, 12, 24},
{top{}, 0, 0},
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index c67538d4f0..87c1d7872b 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -6,10 +6,7 @@
package types2
-import (
- "bytes"
- "cmd/compile/internal/syntax"
-)
+import "cmd/compile/internal/syntax"
type substMap map[*TypeParam]Type
@@ -40,8 +37,8 @@ func (m substMap) lookup(tpar *TypeParam) Type {
// incoming type. If a substitution took place, the result type is different
// from the incoming type.
//
-// If the given typMap is non-nil, it is used in lieu of check.typMap.
-func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, typMap map[string]*Named) Type {
+// If the given environment is non-nil, it is used in lieu of check.env.
+func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, env *Environment) Type {
if smap.empty() {
return typ
}
@@ -61,27 +58,27 @@ func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, typMap map[
if check != nil {
subst.check = check
- if typMap == nil {
- typMap = check.typMap
+ if env == nil {
+ env = check.conf.Environment
}
}
- if typMap == nil {
+ if env == nil {
// If we don't have a *Checker and its global type map,
// use a local version. Besides avoiding duplicate work,
// the type map prevents infinite recursive substitution
// for recursive types (example: type T[P any] *T[P]).
- typMap = make(map[string]*Named)
+ env = NewEnvironment()
}
- subst.typMap = typMap
+ subst.env = env
return subst.typ(typ)
}
type subster struct {
- pos syntax.Pos
- smap substMap
- check *Checker // nil if called via Instantiate
- typMap map[string]*Named
+ pos syntax.Pos
+ smap substMap
+ check *Checker // nil if called via Instantiate
+ env *Environment
}
func (subst *subster) typ(typ Type) Type {
@@ -182,13 +179,19 @@ func (subst *subster) typ(typ Type) Type {
}
}
- if t.TParams().Len() == 0 {
+ // subst is called by expandNamed, so in this function we need to be
+ // careful not to call any methods that would cause t to be expanded: doing
+ // so would result in deadlock.
+ //
+ // So we call t.orig.TypeParams() rather than t.TypeParams() here and
+ // below.
+ if t.orig.TypeParams().Len() == 0 {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
var newTArgs []Type
- assert(t.targs.Len() == t.TParams().Len())
+ assert(t.targs.Len() == t.orig.TypeParams().Len())
// already instantiated
dump(">>> %s already instantiated", t)
@@ -201,7 +204,7 @@ func (subst *subster) typ(typ Type) Type {
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if newTArgs == nil {
- newTArgs = make([]Type, t.TParams().Len())
+ newTArgs = make([]Type, t.orig.TypeParams().Len())
copy(newTArgs, t.targs.list())
}
newTArgs[i] = new_targ
@@ -214,32 +217,29 @@ func (subst *subster) typ(typ Type) Type {
}
// before creating a new named type, check if we have this one already
- h := typeHash(t, newTArgs)
+ h := subst.env.TypeHash(t.orig, newTArgs)
dump(">>> new type hash: %s", h)
- if named, found := subst.typMap[h]; found {
+ if named := subst.env.typeForHash(h, nil); named != nil {
dump(">>> found %s", named)
return named
}
- // Create a new named type and populate typMap to avoid endless recursion.
- // The position used here is irrelevant because validation only occurs on t
- // (we don't call validType on named), but we use subst.pos to help with
- // debugging.
- tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- t.load()
- // It's ok to provide a nil *Checker because the newly created type
- // doesn't need to be (lazily) expanded; it's expanded below.
- named := (*Checker)(nil).newNamed(tname, t.orig, nil, t.tparams, t.methods) // t is loaded, so tparams and methods are available
- named.targs = NewTypeList(newTArgs)
- subst.typMap[h] = named
- t.expand(subst.typMap) // must happen after typMap update to avoid infinite recursion
-
- // do the substitution
- dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, newTArgs)
- named.underlying = subst.typOrNil(t.underlying)
- dump(">>> underlying: %v", named.underlying)
+ t.orig.resolve(subst.env)
+ // Create a new instance and populate the environment to avoid endless
+ // recursion. The position used here is irrelevant because validation only
+ // occurs on t (we don't call validType on named), but we use subst.pos to
+ // help with debugging.
+ named := subst.check.instance(subst.pos, t.orig, newTArgs, subst.env).(*Named)
+ // TODO(rfindley): we probably don't need to resolve here. Investigate if
+ // this can be removed.
+ named.resolve(subst.env)
assert(named.underlying != nil)
- named.fromRHS = named.underlying // for consistency, though no cycle detection is necessary
+
+ // Note that if we were to expose substitution more generally (not just in
+ // the context of a declaration), we'd have to substitute in
+ // named.underlying as well.
+ //
+ // But this is unnecessary for now.
return named
@@ -253,35 +253,6 @@ func (subst *subster) typ(typ Type) Type {
return typ
}
-// typeHash returns a string representation of typ, which can be used as an exact
-// type hash: types that are identical produce identical string representations.
-// If typ is a *Named type and targs is not empty, typ is printed as if it were
-// instantiated with targs.
-func typeHash(typ Type, targs []Type) string {
- assert(typ != nil)
- var buf bytes.Buffer
-
- h := newTypeHasher(&buf)
- if named, _ := typ.(*Named); named != nil && len(targs) > 0 {
- // Don't use WriteType because we need to use the provided targs
- // and not any targs that might already be with the *Named type.
- h.typeName(named.obj)
- h.typeList(targs)
- } else {
- assert(targs == nil)
- h.typ(typ)
- }
-
- if debug {
- // there should be no instance markers in type hashes
- for _, b := range buf.Bytes() {
- assert(b != instanceMarker)
- }
- }
-
- return buf.String()
-}
-
// typOrNil is like typ but if the argument is nil it is replaced with Typ[Invalid].
// A nil type may appear in pathological cases such as type T[P any] []func(_ T([]_))
// where an array/slice element is accessed before it is set up.
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
new file mode 100644
index 0000000000..e4bcee51fe
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+const L = 10
+
+type (
+ _ [L]struct{}
+ _ [A /* ERROR undeclared name A for array length */ ]struct{}
+ _ [B /* ERROR not an expression */ ]struct{}
+ _[A any] struct{}
+
+ B int
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go2
new file mode 100644
index 0000000000..4c4fc2fda8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go2
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Fooer[t any] interface {
+ foo(Barer[t])
+}
+type Barer[t any] interface {
+ bar(Bazer[t])
+}
+type Bazer[t any] interface {
+ Fooer[t]
+ baz(t)
+}
+
+type Int int
+
+func (n Int) baz(int) {}
+func (n Int) foo(b Barer[int]) { b.bar(n) }
+
+type F[t any] interface { f(G[t]) }
+type G[t any] interface { g(H[t]) }
+type H[t any] interface { F[t] }
+
+type T struct{}
+func (n T) f(b G[T]) { b.g(n) }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2
index 56e90942ab..2c4b6610fe 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2
@@ -5,4 +5,4 @@
package p
// don't crash
-func T /* ERROR missing */ [P /* ERROR named */ ] m /* ERROR m */ () /* ERROR \) */ { /* ERROR { */ } /* ERROR } */
+func T /* ERROR missing */ [P] /* ERROR missing */ m /* ERROR unexpected */ () /* ERROR \) */ { /* ERROR { */ } /* ERROR } */
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go2
new file mode 100644
index 0000000000..e069930c42
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go2
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = interface{
+ m()
+ m /* ERROR "duplicate method" */ ()
+}(nil)
diff --git a/src/cmd/compile/internal/types2/type.go b/src/cmd/compile/internal/types2/type.go
index ca5ecdc434..400d6f7128 100644
--- a/src/cmd/compile/internal/types2/type.go
+++ b/src/cmd/compile/internal/types2/type.go
@@ -115,7 +115,7 @@ func asInterface(t Type) *Interface {
func asNamed(t Type) *Named {
e, _ := t.(*Named)
if e != nil {
- e.expand(nil)
+ e.resolve(nil)
}
return e
}
diff --git a/src/cmd/compile/internal/types2/typelists.go b/src/cmd/compile/internal/types2/typelists.go
index f313ea310e..ababe85909 100644
--- a/src/cmd/compile/internal/types2/typelists.go
+++ b/src/cmd/compile/internal/types2/typelists.go
@@ -6,20 +6,20 @@ package types2
import "bytes"
-// TParamList holds a list of type parameters.
-type TParamList struct{ tparams []*TypeParam }
+// TypeParamList holds a list of type parameters.
+type TypeParamList struct{ tparams []*TypeParam }
// Len returns the number of type parameters in the list.
// It is safe to call on a nil receiver.
-func (l *TParamList) Len() int { return len(l.list()) }
+func (l *TypeParamList) Len() int { return len(l.list()) }
// At returns the i'th type parameter in the list.
-func (l *TParamList) At(i int) *TypeParam { return l.tparams[i] }
+func (l *TypeParamList) At(i int) *TypeParam { return l.tparams[i] }
// list is for internal use where we expect a []*TypeParam.
// TODO(rfindley): list should probably be eliminated: we can pass around a
-// TParamList instead.
-func (l *TParamList) list() []*TypeParam {
+// TypeParamList instead.
+func (l *TypeParamList) list() []*TypeParam {
if l == nil {
return nil
}
@@ -66,7 +66,7 @@ func (l *TypeList) String() string {
// ----------------------------------------------------------------------------
// Implementation
-func bindTParams(list []*TypeParam) *TParamList {
+func bindTParams(list []*TypeParam) *TypeParamList {
if len(list) == 0 {
return nil
}
@@ -76,5 +76,5 @@ func bindTParams(list []*TypeParam) *TParamList {
}
typ.index = i
}
- return &TParamList{tparams: list}
+ return &TypeParamList{tparams: list}
}
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
index 445337fee8..505596f571 100644
--- a/src/cmd/compile/internal/types2/typeparam.go
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -29,18 +29,22 @@ type TypeParam struct {
func (t *TypeParam) Obj() *TypeName { return t.obj }
// NewTypeParam returns a new TypeParam. Type parameters may be set on a Named
-// or Signature type by calling SetTParams. Setting a type parameter on more
+// or Signature type by calling SetTypeParams. Setting a type parameter on more
// than one type will result in a panic.
//
-// The bound argument can be nil, and set later via SetBound.
-func (check *Checker) NewTypeParam(obj *TypeName, bound Type) *TypeParam {
+// The constraint argument can be nil, and set later via SetConstraint.
+func NewTypeParam(obj *TypeName, constraint Type) *TypeParam {
+ return (*Checker)(nil).newTypeParam(obj, constraint)
+}
+
+func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
// Always increment lastID, even if it is not used.
id := nextID()
if check != nil {
check.nextID++
id = check.nextID
}
- typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: bound}
+ typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: constraint}
if obj.typ == nil {
obj.typ = typ
}
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 6083955306..bdafcf883d 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -8,7 +8,7 @@ package types2
import (
"bytes"
- "fmt"
+ "strconv"
"unicode/utf8"
)
@@ -63,32 +63,45 @@ func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) {
newTypeWriter(buf, qf).signature(sig)
}
-// instanceMarker is the prefix for an instantiated type in unexpanded form.
-const instanceMarker = '#'
-
type typeWriter struct {
buf *bytes.Buffer
seen map[Type]bool
qf Qualifier
- hash bool
+ env *Environment // if non-nil, we are type hashing
}
func newTypeWriter(buf *bytes.Buffer, qf Qualifier) *typeWriter {
- return &typeWriter{buf, make(map[Type]bool), qf, false}
+ return &typeWriter{buf, make(map[Type]bool), qf, nil}
}
-func newTypeHasher(buf *bytes.Buffer) *typeWriter {
- return &typeWriter{buf, make(map[Type]bool), nil, true}
+func newTypeHasher(buf *bytes.Buffer, env *Environment) *typeWriter {
+ assert(env != nil)
+ return &typeWriter{buf, make(map[Type]bool), nil, env}
+}
+
+func (w *typeWriter) byte(b byte) {
+ if w.env != nil {
+ if b == ' ' {
+ b = '#'
+ }
+ w.buf.WriteByte(b)
+ return
+ }
+ w.buf.WriteByte(b)
+ if b == ',' || b == ';' {
+ w.buf.WriteByte(' ')
+ }
+}
+
+func (w *typeWriter) string(s string) {
+ w.buf.WriteString(s)
}
-func (w *typeWriter) byte(b byte) { w.buf.WriteByte(b) }
-func (w *typeWriter) string(s string) { w.buf.WriteString(s) }
-func (w *typeWriter) writef(format string, args ...interface{}) { fmt.Fprintf(w.buf, format, args...) }
func (w *typeWriter) error(msg string) {
- if w.hash {
+ if w.env != nil {
panic(msg)
}
- w.string("<" + msg + ">")
+ w.buf.WriteString("<" + msg + ">")
}
func (w *typeWriter) typ(typ Type) {
@@ -115,7 +128,9 @@ func (w *typeWriter) typ(typ Type) {
w.string(t.name)
case *Array:
- w.writef("[%d]", t.len)
+ w.byte('[')
+ w.string(strconv.FormatInt(t.len, 10))
+ w.byte(']')
w.typ(t.elem)
case *Slice:
@@ -126,7 +141,7 @@ func (w *typeWriter) typ(typ Type) {
w.string("struct{")
for i, f := range t.fields {
if i > 0 {
- w.string("; ")
+ w.byte(';')
}
// This doesn't do the right thing for embedded type
// aliases where we should print the alias name, not
@@ -137,7 +152,11 @@ func (w *typeWriter) typ(typ Type) {
}
w.typ(f.typ)
if tag := t.Tag(i); tag != "" {
- w.writef(" %q", tag)
+ w.byte(' ')
+ // TODO(gri) If tag contains blanks, replacing them with '#'
+ // in Environment.TypeHash may produce another tag
+ // accidentally.
+ w.string(strconv.Quote(tag))
}
}
w.byte('}')
@@ -175,7 +194,7 @@ func (w *typeWriter) typ(typ Type) {
first := true
for _, m := range t.methods {
if !first {
- w.string("; ")
+ w.byte(';')
}
first = false
w.string(m.name)
@@ -183,7 +202,7 @@ func (w *typeWriter) typ(typ Type) {
}
for _, typ := range t.embeddeds {
if !first {
- w.string("; ")
+ w.byte(';')
}
first = false
w.typ(typ)
@@ -223,20 +242,14 @@ func (w *typeWriter) typ(typ Type) {
}
case *Named:
- // Instance markers indicate unexpanded instantiated
- // types. Write them to aid debugging, but don't write
- // them when we need an instance hash: whether a type
- // is fully expanded or not doesn't matter for identity.
- if !w.hash && t.instPos != nil {
- w.byte(instanceMarker)
- }
+ w.typePrefix(t)
w.typeName(t.obj)
if t.targs != nil {
// instantiated type
w.typeList(t.targs.list())
- } else if !w.hash && t.TParams().Len() != 0 { // For type hashing, don't need to format the TParams
+ } else if w.env == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TParams
// parameterized type
- w.tParamList(t.TParams().list())
+ w.tParamList(t.TypeParams().list())
}
case *TypeParam:
@@ -263,11 +276,20 @@ func (w *typeWriter) typ(typ Type) {
}
}
+// If w.env is non-nil, typePrefix writes a unique prefix for the named type t
+// based on the types already observed by w.env. If w.env is nil, it does
+// nothing.
+func (w *typeWriter) typePrefix(t *Named) {
+ if w.env != nil {
+ w.string(strconv.Itoa(w.env.idForType(t)))
+ }
+}
+
func (w *typeWriter) typeList(list []Type) {
w.byte('[')
for i, typ := range list {
if i > 0 {
- w.string(", ")
+ w.byte(',')
}
w.typ(typ)
}
@@ -291,7 +313,7 @@ func (w *typeWriter) tParamList(list []*TypeParam) {
w.byte(' ')
w.typ(prev)
}
- w.string(", ")
+ w.byte(',')
}
prev = tpar.bound
w.typ(tpar)
@@ -308,31 +330,6 @@ func (w *typeWriter) typeName(obj *TypeName) {
writePackage(w.buf, obj.pkg, w.qf)
}
w.string(obj.name)
-
- if w.hash {
- // For local defined types, use the (original!) TypeName's scope
- // numbers to disambiguate.
- if typ, _ := obj.typ.(*Named); typ != nil {
- // TODO(gri) Figure out why typ.orig != typ.orig.orig sometimes
- // and whether the loop can iterate more than twice.
- // (It seems somehow connected to instance types.)
- for typ.orig != typ {
- typ = typ.orig
- }
- w.writeScopeNumbers(typ.obj.parent)
- }
- }
-}
-
-// writeScopeNumbers writes the number sequence for this scope to buf
-// in the form ".i.j.k" where i, j, k, etc. stand for scope numbers.
-// If a scope is nil or has no parent (such as a package scope), nothing
-// is written.
-func (w *typeWriter) writeScopeNumbers(s *Scope) {
- if s != nil && s.number > 0 {
- w.writeScopeNumbers(s.parent)
- w.writef(".%d", s.number)
- }
}
func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
@@ -340,10 +337,10 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
if tup != nil {
for i, v := range tup.vars {
if i > 0 {
- w.string(", ")
+ w.byte(',')
}
// parameter names are ignored for type identity and thus type hashes
- if !w.hash && v.name != "" {
+ if w.env == nil && v.name != "" {
w.string(v.name)
w.byte(' ')
}
@@ -371,8 +368,8 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
}
func (w *typeWriter) signature(sig *Signature) {
- if sig.TParams().Len() != 0 {
- w.tParamList(sig.TParams().list())
+ if sig.TypeParams().Len() != 0 {
+ w.tParamList(sig.TypeParams().list())
}
w.tuple(sig.params, sig.variadic)
@@ -384,7 +381,7 @@ func (w *typeWriter) signature(sig *Signature) {
}
w.byte(' ')
- if n == 1 && (w.hash || sig.results.vars[0].name == "") {
+ if n == 1 && (w.env != nil || sig.results.vars[0].name == "") {
// single unnamed result (if type hashing, name must be ignored)
w.typ(sig.results.vars[0].typ)
return
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index f3db3bbba9..5aacb94a60 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -428,6 +428,14 @@ func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def
// and returns the constant length >= 0, or a value < 0
// to indicate an error (and thus an unknown length).
func (check *Checker) arrayLength(e syntax.Expr) int64 {
+ // If e is an undeclared identifier, the array declaration might be an
+ // attempt at a parameterized type declaration with missing constraint.
+ // Provide a better error message than just "undeclared name: X".
+ if name, _ := e.(*syntax.Name); name != nil && check.lookup(name.Value) == nil {
+ check.errorf(name, "undeclared name %s for array length", name.Value)
+ return -1
+ }
+
var x operand
check.expr(&x, e)
if x.mode != constant_ {
@@ -436,6 +444,7 @@ func (check *Checker) arrayLength(e syntax.Expr) int64 {
}
return -1
}
+
if isUntyped(x.typ) || isInteger(x.typ) {
if val := constant.ToInt(x.val); val.Kind() == constant.Int {
if representableConst(val, check, Typ[Int], nil) {
@@ -447,6 +456,7 @@ func (check *Checker) arrayLength(e syntax.Expr) int64 {
}
}
}
+
check.errorf(&x, "array length %s must be integer", &x)
return -1
}
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index a1e5b3679b..bb69f0d27b 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -428,9 +428,6 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
case *Named:
if y, ok := y.(*Named); ok {
- x.expand(nil)
- y.expand(nil)
-
xargs := x.targs.list()
yargs := y.targs.list()
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index a615b4c876..af3ab97325 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -88,7 +88,7 @@ func defPredeclaredTypes() {
res := NewVar(nopos, nil, "", Typ[String])
sig := NewSignature(nil, nil, NewTuple(res), false)
err := NewFunc(nopos, nil, "Error", sig)
- ityp := &Interface{obj, []*Func{err}, nil, nil, true, nil}
+ ityp := &Interface{nil, obj, []*Func{err}, nil, nil, true, nil}
computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset
typ := NewNamed(obj, ityp, nil)
sig.recv = NewVar(nopos, nil, "", typ)
@@ -99,7 +99,7 @@ func defPredeclaredTypes() {
{
obj := NewTypeName(nopos, nil, "comparable", nil)
obj.setColor(black)
- ityp := &Interface{obj, nil, nil, nil, true, &_TypeSet{true, nil, allTermlist}}
+ ityp := &Interface{nil, obj, nil, nil, nil, true, &_TypeSet{true, nil, allTermlist}}
NewNamed(obj, ityp, nil)
def(obj)
}
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
index d701d545de..5d69fc3868 100644
--- a/src/cmd/compile/internal/walk/convert.go
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -25,9 +25,6 @@ func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
return n.X
}
if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
- if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T
- return walkCheckPtrAlignment(n, init, nil)
- }
if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
return walkCheckPtrArithmetic(n, init)
}
@@ -414,41 +411,6 @@ func byteindex(n ir.Node) ir.Node {
return n
}
-func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, se *ir.SliceExpr) ir.Node {
- if !n.Type().IsPtr() {
- base.Fatalf("expected pointer type: %v", n.Type())
- }
- elem := n.Type().Elem()
- var count ir.Node
- if se != nil {
- count = se.Max
- }
- if count != nil {
- if !elem.IsArray() {
- base.Fatalf("expected array type: %v", elem)
- }
- elem = elem.Elem()
- }
-
- size := elem.Size()
- if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
- return n
- }
-
- if count == nil {
- count = ir.NewInt(1)
- }
-
- n.X = cheapExpr(n.X, init)
- checkPtrCall := mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR]))
- if se != nil {
- se.CheckPtrCall = checkPtrCall
- } else {
- init.Append(checkPtrCall)
- }
- return n
-}
-
func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// Calling cheapExpr(n, init) below leads to a recursive call to
// walkExpr, which leads us back here again. Use n.Checkptr to
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index ed2d68539d..e5bf6cf0b5 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -807,15 +807,7 @@ func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node {
// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node.
func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node {
-
- checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
- if checkSlice {
- conv := n.X.(*ir.ConvExpr)
- conv.X = walkExpr(conv.X, init)
- } else {
- n.X = walkExpr(n.X, init)
- }
-
+ n.X = walkExpr(n.X, init)
n.Low = walkExpr(n.Low, init)
if n.Low != nil && ir.IsZero(n.Low) {
// Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
@@ -823,9 +815,6 @@ func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node {
}
n.High = walkExpr(n.High, init)
n.Max = walkExpr(n.Max, init)
- if checkSlice {
- n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, n)
- }
if n.Op().IsSlice3() {
if n.Max != nil && n.Max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, n.Max.(*ir.UnaryExpr).X) {
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 4de8858f26..7ac1f75c8f 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -941,6 +941,12 @@ func (o *orderState) stmt(n ir.Node) {
if colas {
if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
init = init[1:]
+
+ // iimport may have added a default initialization assignment,
+ // due to how it handles ODCL statements.
+ if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n {
+ init = init[1:]
+ }
}
dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
ncas.PtrInit().Append(dcl)
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index 4581bca3df..f09e916546 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -136,6 +136,14 @@ func walkStmt(n ir.Node) ir.Node {
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
+
+ var init ir.Nodes
+ n.Call.X = walkExpr(n.Call.X, &init)
+
+ if len(init) > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+ }
return n
case ir.OINLMARK:
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
index 0b2ca3fdbb..765051c944 100644
--- a/src/cmd/compile/internal/wasm/ssa.go
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -88,13 +88,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.ARET)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
-
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockDefer:
p := s.Prog(wasm.AGet)
@@ -122,7 +116,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
- case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
+ case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall:
s.PrepareCall(v)
if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
// The runtime needs to inject jumps to
@@ -141,6 +135,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
p.Pos = v.Pos
+ if v.Op == ssa.OpWasmLoweredTailCall {
+ p.As = obj.ARET
+ }
} else {
getValue64(s, v.Args[0])
p := s.Prog(obj.ACALL)
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index a06fdbcb71..32e29f347b 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -752,6 +752,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
s.Call(v)
+ case ssa.Op386CALLtail:
+ s.TailCall(v)
case ssa.Op386NEGL,
ssa.Op386BSWAPL,
ssa.Op386NOTL:
@@ -892,14 +894,9 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockExit:
+ case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockRetJmp:
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = b.Aux.(*obj.LSym)
case ssa.Block386EQF:
s.CombJump(b, next, &eqfJumps)