aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/gc
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2020-12-23 01:05:16 -0500
committerRuss Cox <rsc@golang.org>2020-12-23 06:39:43 +0000
commite4895ab4c0eb44de6ddc5dc8d860a827b20d2781 (patch)
treea35d0c242a2b2788ccdbdecf5c02e52b7b24f639 /src/cmd/compile/internal/gc
parent01fd2d05c8b7bfc083977ca73123a5541b289737 (diff)
downloadgo-e4895ab4c0eb44de6ddc5dc8d860a827b20d2781.tar.gz
go-e4895ab4c0eb44de6ddc5dc8d860a827b20d2781.zip
[dev.regabi] cmd/compile: split out package walk [generated]
[git-generate] cd src/cmd/compile/internal/gc rf ' # Late addition to package ir. mv closuredebugruntimecheck ClosureDebugRuntimeCheck mv hasemptycvars IsTrivialClosure mv ClosureDebugRuntimeCheck IsTrivialClosure func.go mv func.go cmd/compile/internal/ir # Late addition to package reflectdata. mv markTypeUsedInInterface MarkTypeUsedInInterface mv markUsedIfaceMethod MarkUsedIfaceMethod mv MarkTypeUsedInInterface MarkUsedIfaceMethod reflect.go mv reflect.go cmd/compile/internal/reflectdata # Late addition to package staticdata. mv litsym InitConst mv InitConst data.go mv data.go cmd/compile/internal/staticdata # Extract staticinit out of walk into its own package. mv InitEntry InitPlan InitSchedule InitSchedule.append InitSchedule.staticInit \ InitSchedule.tryStaticInit InitSchedule.staticcopy \ InitSchedule.staticassign InitSchedule.initplan InitSchedule.addvalue \ statuniqgen staticname stataddr anySideEffects getlit isvaluelit \ sched.go mv InitSchedule.initplans InitSchedule.Plans mv InitSchedule.inittemps InitSchedule.Temps mv InitSchedule.out InitSchedule.Out mv InitSchedule.staticInit InitSchedule.StaticInit mv InitSchedule.staticassign InitSchedule.StaticAssign mv InitSchedule Schedule mv InitPlan Plan mv InitEntry Entry mv anySideEffects AnySideEffects mv staticname StaticName mv stataddr StaticLoc mv sched.go cmd/compile/internal/staticinit # Export API and unexport non-API. mv transformclosure Closure mv walk Walk mv Order orderState mv swt.go switch.go mv racewalk.go race.go mv closure.go order.go range.go select.go switch.go race.go \ sinit.go subr.go walk.go \ cmd/compile/internal/walk ' : # Update format test. cd ../../ go install cmd/compile/... cmd/internal/archive go test -u || go test -u rm -rf ../../../pkg/darwin_amd64/cmd Change-Id: I11c7a45f74d4a9e963da15c080e1018caaa99c05 Reviewed-on: https://go-review.googlesource.com/c/go/+/279478 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Diffstat (limited to 'src/cmd/compile/internal/gc')
-rw-r--r--src/cmd/compile/internal/gc/closure.go218
-rw-r--r--src/cmd/compile/internal/gc/compile.go3
-rw-r--r--src/cmd/compile/internal/gc/initorder.go11
-rw-r--r--src/cmd/compile/internal/gc/main.go3
-rw-r--r--src/cmd/compile/internal/gc/obj.go57
-rw-r--r--src/cmd/compile/internal/gc/order.go1489
-rw-r--r--src/cmd/compile/internal/gc/racewalk.go48
-rw-r--r--src/cmd/compile/internal/gc/range.go495
-rw-r--r--src/cmd/compile/internal/gc/select.go297
-rw-r--r--src/cmd/compile/internal/gc/sinit.go1156
-rw-r--r--src/cmd/compile/internal/gc/subr.go337
-rw-r--r--src/cmd/compile/internal/gc/swt.go549
-rw-r--r--src/cmd/compile/internal/gc/walk.go4039
13 files changed, 10 insertions, 8692 deletions
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
deleted file mode 100644
index 4679b6535b..0000000000
--- a/src/cmd/compile/internal/gc/closure.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/src"
-)
-
-// transformclosure is called in a separate phase after escape analysis.
-// It transform closure bodies to properly reference captured variables.
-func transformclosure(fn *ir.Func) {
- lno := base.Pos
- base.Pos = fn.Pos()
-
- if fn.ClosureCalled() {
- // If the closure is directly called, we transform it to a plain function call
- // with variables passed as args. This avoids allocation of a closure object.
- // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
- // will complete the transformation later.
- // For illustration, the following closure:
- // func(a int) {
- // println(byval)
- // byref++
- // }(42)
- // becomes:
- // func(byval int, &byref *int, a int) {
- // println(byval)
- // (*&byref)++
- // }(byval, &byref, 42)
-
- // f is ONAME of the actual function.
- f := fn.Nname
-
- // We are going to insert captured variables before input args.
- var params []*types.Field
- var decls []*ir.Name
- for _, v := range fn.ClosureVars {
- if !v.Byval() {
- // If v of type T is captured by reference,
- // we introduce function param &v *T
- // and v remains PAUTOHEAP with &v heapaddr
- // (accesses will implicitly deref &v).
- addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name))
- addr.SetType(types.NewPtr(v.Type()))
- v.Heapaddr = addr
- v = addr
- }
-
- v.Class_ = ir.PPARAM
- decls = append(decls, v)
-
- fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
- fld.Nname = v
- params = append(params, fld)
- }
-
- if len(params) > 0 {
- // Prepend params and decls.
- f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
- fn.Dcl = append(decls, fn.Dcl...)
- }
-
- types.CalcSize(f.Type())
- fn.SetType(f.Type()) // update type of ODCLFUNC
- } else {
- // The closure is not called, so it is going to stay as closure.
- var body []ir.Node
- offset := int64(types.PtrSize)
- for _, v := range fn.ClosureVars {
- // cv refers to the field inside of closure OSTRUCTLIT.
- typ := v.Type()
- if !v.Byval() {
- typ = types.NewPtr(typ)
- }
- offset = types.Rnd(offset, int64(typ.Align))
- cr := ir.NewClosureRead(typ, offset)
- offset += typ.Width
-
- if v.Byval() && v.Type().Width <= int64(2*types.PtrSize) {
- // If it is a small variable captured by value, downgrade it to PAUTO.
- v.Class_ = ir.PAUTO
- fn.Dcl = append(fn.Dcl, v)
- body = append(body, ir.NewAssignStmt(base.Pos, v, cr))
- } else {
- // Declare variable holding addresses taken from closure
- // and initialize in entry prologue.
- addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name))
- addr.SetType(types.NewPtr(v.Type()))
- addr.Class_ = ir.PAUTO
- addr.SetUsed(true)
- addr.Curfn = fn
- fn.Dcl = append(fn.Dcl, addr)
- v.Heapaddr = addr
- var src ir.Node = cr
- if v.Byval() {
- src = typecheck.NodAddr(cr)
- }
- body = append(body, ir.NewAssignStmt(base.Pos, addr, src))
- }
- }
-
- if len(body) > 0 {
- typecheck.Stmts(body)
- fn.Enter.Set(body)
- fn.SetNeedctxt(true)
- }
- }
-
- base.Pos = lno
-}
-
-// hasemptycvars reports whether closure clo has an
-// empty list of captured vars.
-func hasemptycvars(clo *ir.ClosureExpr) bool {
- return len(clo.Func.ClosureVars) == 0
-}
-
-// closuredebugruntimecheck applies boilerplate checks for debug flags
-// and compiling runtime
-func closuredebugruntimecheck(clo *ir.ClosureExpr) {
- if base.Debug.Closure > 0 {
- if clo.Esc() == ir.EscHeap {
- base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
- } else {
- base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
- }
- }
- if base.Flag.CompilingRuntime && clo.Esc() == ir.EscHeap {
- base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
- }
-}
-
-func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
- fn := clo.Func
-
- // If no closure vars, don't bother wrapping.
- if hasemptycvars(clo) {
- if base.Debug.Closure > 0 {
- base.WarnfAt(clo.Pos(), "closure converted to global")
- }
- return fn.Nname
- }
- closuredebugruntimecheck(clo)
-
- typ := typecheck.ClosureType(clo)
-
- clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil)
- clos.SetEsc(clo.Esc())
- clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter...))
-
- addr := typecheck.NodAddr(clos)
- addr.SetEsc(clo.Esc())
-
- // Force type conversion from *struct to the func type.
- cfn := typecheck.ConvNop(addr, clo.Type())
-
- // non-escaping temp to use, if any.
- if x := clo.Prealloc; x != nil {
- if !types.Identical(typ, x.Type()) {
- panic("closure type does not match order's assigned type")
- }
- addr.Alloc = x
- clo.Prealloc = nil
- }
-
- return walkexpr(cfn, init)
-}
-
-func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
- // Create closure in the form of a composite literal.
- // For x.M with receiver (x) type T, the generated code looks like:
- //
- // clos = &struct{F uintptr; R T}{T.M·f, x}
- //
- // Like walkclosure above.
-
- if n.X.Type().IsInterface() {
- // Trigger panic for method on nil interface now.
- // Otherwise it happens in the wrapper and is confusing.
- n.X = cheapexpr(n.X, init)
- n.X = walkexpr(n.X, nil)
-
- tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
-
- c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
- c.SetTypecheck(1)
- init.Append(c)
- }
-
- typ := typecheck.PartialCallType(n)
-
- clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil)
- clos.SetEsc(n.Esc())
- clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X}
-
- addr := typecheck.NodAddr(clos)
- addr.SetEsc(n.Esc())
-
- // Force type conversion from *struct to the func type.
- cfn := typecheck.ConvNop(addr, n.Type())
-
- // non-escaping temp to use, if any.
- if x := n.Prealloc; x != nil {
- if !types.Identical(typ, x.Type()) {
- panic("partial call type does not match order's assigned type")
- }
- addr.Alloc = x
- n.Prealloc = nil
- }
-
- return walkexpr(cfn, init)
-}
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
index c2a6a9e327..926b2dee95 100644
--- a/src/cmd/compile/internal/gc/compile.go
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -17,6 +17,7 @@ import (
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/compile/internal/walk"
)
// "Portable" code generation.
@@ -61,7 +62,7 @@ func compile(fn *ir.Func) {
ssagen.InitLSym(fn, true)
errorsBefore := base.Errors()
- walk(fn)
+ walk.Walk(fn)
if base.Errors() > errorsBefore {
return
}
diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go
index 5caa2e769f..4ac468fb4e 100644
--- a/src/cmd/compile/internal/gc/initorder.go
+++ b/src/cmd/compile/internal/gc/initorder.go
@@ -11,6 +11,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/staticinit"
)
// Package initialization
@@ -77,9 +78,9 @@ type InitOrder struct {
// corresponding list of statements to include in the init() function
// body.
func initOrder(l []ir.Node) []ir.Node {
- s := InitSchedule{
- initplans: make(map[ir.Node]*InitPlan),
- inittemps: make(map[ir.Node]*ir.Name),
+ s := staticinit.Schedule{
+ Plans: make(map[ir.Node]*staticinit.Plan),
+ Temps: make(map[ir.Node]*ir.Name),
}
o := InitOrder{
blocking: make(map[ir.Node][]ir.Node),
@@ -91,7 +92,7 @@ func initOrder(l []ir.Node) []ir.Node {
switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
- o.flushReady(s.staticInit)
+ o.flushReady(s.StaticInit)
case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
// nop
default:
@@ -124,7 +125,7 @@ func initOrder(l []ir.Node) []ir.Node {
base.Fatalf("expected empty map: %v", o.blocking)
}
- return s.out
+ return s.Out
}
func (o *InitOrder) processAssign(n ir.Node) {
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 2a8012b462..aeb58a3310 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -22,6 +22,7 @@ import (
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/compile/internal/walk"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
@@ -268,7 +269,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
n := n.(*ir.Func)
if n.OClosure != nil {
ir.CurFunc = n
- transformclosure(n)
+ walk.Closure(n)
}
}
}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index f159256da6..0ab3a8dad4 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -18,7 +18,6 @@ import (
"cmd/internal/objabi"
"encoding/json"
"fmt"
- "go/constant"
)
// These modes say which kind of object file to generate.
@@ -261,62 +260,6 @@ func addGCLocals() {
}
}
-// litsym writes the static literal c to n.
-// Neither n nor c is modified.
-func litsym(n *ir.Name, noff int64, c ir.Node, wid int) {
- if n.Op() != ir.ONAME {
- base.Fatalf("litsym n op %v", n.Op())
- }
- if n.Sym() == nil {
- base.Fatalf("litsym nil n sym")
- }
- if c.Op() == ir.ONIL {
- return
- }
- if c.Op() != ir.OLITERAL {
- base.Fatalf("litsym c op %v", c.Op())
- }
- s := n.Sym().Linksym()
- switch u := c.Val(); u.Kind() {
- case constant.Bool:
- i := int64(obj.Bool2int(constant.BoolVal(u)))
- s.WriteInt(base.Ctxt, noff, wid, i)
-
- case constant.Int:
- s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
-
- case constant.Float:
- f, _ := constant.Float64Val(u)
- switch c.Type().Kind() {
- case types.TFLOAT32:
- s.WriteFloat32(base.Ctxt, noff, float32(f))
- case types.TFLOAT64:
- s.WriteFloat64(base.Ctxt, noff, f)
- }
-
- case constant.Complex:
- re, _ := constant.Float64Val(constant.Real(u))
- im, _ := constant.Float64Val(constant.Imag(u))
- switch c.Type().Kind() {
- case types.TCOMPLEX64:
- s.WriteFloat32(base.Ctxt, noff, float32(re))
- s.WriteFloat32(base.Ctxt, noff+4, float32(im))
- case types.TCOMPLEX128:
- s.WriteFloat64(base.Ctxt, noff, re)
- s.WriteFloat64(base.Ctxt, noff+8, im)
- }
-
- case constant.String:
- i := constant.StringVal(u)
- symdata := staticdata.StringSym(n.Pos(), i)
- s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0)
- s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
-
- default:
- base.Fatalf("litsym unhandled OLITERAL %v", c)
- }
-}
-
func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym()
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
deleted file mode 100644
index d1c5bb04a1..0000000000
--- a/src/cmd/compile/internal/gc/order.go
+++ /dev/null
@@ -1,1489 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/escape"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/reflectdata"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "fmt"
-)
-
-// Rewrite tree to use separate statements to enforce
-// order of evaluation. Makes walk easier, because it
-// can (after this runs) reorder at will within an expression.
-//
-// Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
-//
-// Introduce temporaries as needed by runtime routines.
-// For example, the map runtime routines take the map key
-// by reference, so make sure all map keys are addressable
-// by copying them to temporaries as needed.
-// The same is true for channel operations.
-//
-// Arrange that map index expressions only appear in direct
-// assignments x = m[k] or m[k] = x, never in larger expressions.
-//
-// Arrange that receive expressions only appear in direct assignments
-// x = <-c or as standalone statements <-c, never in larger expressions.
-
-// TODO(rsc): The temporary introduction during multiple assignments
-// should be moved into this file, so that the temporaries can be cleaned
-// and so that conversions implicit in the OAS2FUNC and OAS2RECV
-// nodes can be made explicit and then have their temporaries cleaned.
-
-// TODO(rsc): Goto and multilevel break/continue can jump over
-// inserted VARKILL annotations. Work out a way to handle these.
-// The current implementation is safe, in that it will execute correctly.
-// But it won't reuse temporaries as aggressively as it might, and
-// it can result in unnecessary zeroing of those variables in the function
-// prologue.
-
-// Order holds state during the ordering process.
-type Order struct {
- out []ir.Node // list of generated statements
- temp []*ir.Name // stack of temporary variables
- free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
- edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
-}
-
-// Order rewrites fn.Nbody to apply the ordering constraints
-// described in the comment at the top of the file.
-func order(fn *ir.Func) {
- if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore order %v", fn.Sym())
- ir.DumpList(s, fn.Body)
- }
-
- orderBlock(&fn.Body, map[string][]*ir.Name{})
-}
-
-// append typechecks stmt and appends it to out.
-func (o *Order) append(stmt ir.Node) {
- o.out = append(o.out, typecheck.Stmt(stmt))
-}
-
-// newTemp allocates a new temporary with the given type,
-// pushes it onto the temp stack, and returns it.
-// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name {
- var v *ir.Name
- // Note: LongString is close to the type equality we want,
- // but not exactly. We still need to double-check with types.Identical.
- key := t.LongString()
- a := o.free[key]
- for i, n := range a {
- if types.Identical(t, n.Type()) {
- v = a[i]
- a[i] = a[len(a)-1]
- a = a[:len(a)-1]
- o.free[key] = a
- break
- }
- }
- if v == nil {
- v = typecheck.Temp(t)
- }
- if clear {
- o.append(ir.NewAssignStmt(base.Pos, v, nil))
- }
-
- o.temp = append(o.temp, v)
- return v
-}
-
-// copyExpr behaves like newTemp but also emits
-// code to initialize the temporary to the value n.
-func (o *Order) copyExpr(n ir.Node) ir.Node {
- return o.copyExpr1(n, false)
-}
-
-// copyExprClear is like copyExpr but clears the temp before assignment.
-// It is provided for use when the evaluation of tmp = n turns into
-// a function call that is passed a pointer to the temporary as the output space.
-// If the call blocks before tmp has been written,
-// the garbage collector will still treat the temporary as live,
-// so we must zero it before entering that call.
-// Today, this only happens for channel receive operations.
-// (The other candidate would be map access, but map access
-// returns a pointer to the result data instead of taking a pointer
-// to be filled in.)
-func (o *Order) copyExprClear(n ir.Node) *ir.Name {
- return o.copyExpr1(n, true)
-}
-
-func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name {
- t := n.Type()
- v := o.newTemp(t, clear)
- o.append(ir.NewAssignStmt(base.Pos, v, n))
- return v
-}
-
-// cheapExpr returns a cheap version of n.
-// The definition of cheap is that n is a variable or constant.
-// If not, cheapExpr allocates a new tmp, emits tmp = n,
-// and then returns tmp.
-func (o *Order) cheapExpr(n ir.Node) ir.Node {
- if n == nil {
- return nil
- }
-
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL:
- return n
- case ir.OLEN, ir.OCAP:
- n := n.(*ir.UnaryExpr)
- l := o.cheapExpr(n.X)
- if l == n.X {
- return n
- }
- a := ir.SepCopy(n).(*ir.UnaryExpr)
- a.X = l
- return typecheck.Expr(a)
- }
-
- return o.copyExpr(n)
-}
-
-// safeExpr returns a safe version of n.
-// The definition of safe is that n can appear multiple times
-// without violating the semantics of the original program,
-// and that assigning to the safe version has the same effect
-// as assigning to the original n.
-//
-// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n ir.Node) ir.Node {
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL:
- return n
-
- case ir.OLEN, ir.OCAP:
- n := n.(*ir.UnaryExpr)
- l := o.safeExpr(n.X)
- if l == n.X {
- return n
- }
- a := ir.SepCopy(n).(*ir.UnaryExpr)
- a.X = l
- return typecheck.Expr(a)
-
- case ir.ODOT:
- n := n.(*ir.SelectorExpr)
- l := o.safeExpr(n.X)
- if l == n.X {
- return n
- }
- a := ir.SepCopy(n).(*ir.SelectorExpr)
- a.X = l
- return typecheck.Expr(a)
-
- case ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- l := o.cheapExpr(n.X)
- if l == n.X {
- return n
- }
- a := ir.SepCopy(n).(*ir.SelectorExpr)
- a.X = l
- return typecheck.Expr(a)
-
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- l := o.cheapExpr(n.X)
- if l == n.X {
- return n
- }
- a := ir.SepCopy(n).(*ir.StarExpr)
- a.X = l
- return typecheck.Expr(a)
-
- case ir.OINDEX, ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- var l ir.Node
- if n.X.Type().IsArray() {
- l = o.safeExpr(n.X)
- } else {
- l = o.cheapExpr(n.X)
- }
- r := o.cheapExpr(n.Index)
- if l == n.X && r == n.Index {
- return n
- }
- a := ir.SepCopy(n).(*ir.IndexExpr)
- a.X = l
- a.Index = r
- return typecheck.Expr(a)
-
- default:
- base.Fatalf("order.safeExpr %v", n.Op())
- return nil // not reached
- }
-}
-
-// isaddrokay reports whether it is okay to pass n's address to runtime routines.
-// Taking the address of a variable makes the liveness and optimization analyses
-// lose track of where the variable's lifetime ends. To avoid hurting the analyses
-// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
-// because we emit explicit VARKILL instructions marking the end of those
-// temporaries' lifetimes.
-func isaddrokay(n ir.Node) bool {
- return ir.IsAssignable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n))
-}
-
-// addrTemp ensures that n is okay to pass by address to runtime routines.
-// If the original argument n is not okay, addrTemp creates a tmp, emits
-// tmp = n, and then returns tmp.
-// The result of addrTemp MUST be assigned back to n, e.g.
-// n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n ir.Node) ir.Node {
- if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
- // TODO: expand this to all static composite literal nodes?
- n = typecheck.DefaultLit(n, nil)
- types.CalcSize(n.Type())
- vstat := readonlystaticname(n.Type())
- var s InitSchedule
- s.staticassign(vstat, 0, n, n.Type())
- if s.out != nil {
- base.Fatalf("staticassign of const generated code: %+v", n)
- }
- vstat = typecheck.Expr(vstat).(*ir.Name)
- return vstat
- }
- if isaddrokay(n) {
- return n
- }
- return o.copyExpr(n)
-}
-
-// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
-// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
- // Most map calls need to take the address of the key.
- // Exception: map*_fast* calls. See golang.org/issue/19015.
- if mapfast(t) == mapslow {
- return o.addrTemp(n)
- }
- return n
-}
-
-// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
-// in n to avoid string allocations for keys in map lookups.
-// Returns a bool that signals if a modification was made.
-//
-// For:
-// x = m[string(k)]
-// x = m[T1{... Tn{..., string(k), ...}]
-// where k is []byte, T1 to Tn is a nesting of struct and array literals,
-// the allocation of backing bytes for the string can be avoided
-// by reusing the []byte backing array. These are special cases
-// for avoiding allocations when converting byte slices to strings.
-// It would be nice to handle these generally, but because
-// []byte keys are not allowed in maps, the use of string(k)
-// comes up in important cases in practice. See issue 3512.
-func mapKeyReplaceStrConv(n ir.Node) bool {
- var replaced bool
- switch n.Op() {
- case ir.OBYTES2STR:
- n := n.(*ir.ConvExpr)
- n.SetOp(ir.OBYTES2STRTMP)
- replaced = true
- case ir.OSTRUCTLIT:
- n := n.(*ir.CompLitExpr)
- for _, elem := range n.List {
- elem := elem.(*ir.StructKeyExpr)
- if mapKeyReplaceStrConv(elem.Value) {
- replaced = true
- }
- }
- case ir.OARRAYLIT:
- n := n.(*ir.CompLitExpr)
- for _, elem := range n.List {
- if elem.Op() == ir.OKEY {
- elem = elem.(*ir.KeyExpr).Value
- }
- if mapKeyReplaceStrConv(elem) {
- replaced = true
- }
- }
- }
- return replaced
-}
-
-type ordermarker int
-
-// markTemp returns the top of the temporary variable stack.
-func (o *Order) markTemp() ordermarker {
- return ordermarker(len(o.temp))
-}
-
-// popTemp pops temporaries off the stack until reaching the mark,
-// which must have been returned by markTemp.
-func (o *Order) popTemp(mark ordermarker) {
- for _, n := range o.temp[mark:] {
- key := n.Type().LongString()
- o.free[key] = append(o.free[key], n)
- }
- o.temp = o.temp[:mark]
-}
-
-// cleanTempNoPop emits VARKILL instructions to *out
-// for each temporary above the mark on the temporary stack.
-// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
- var out []ir.Node
- for i := len(o.temp) - 1; i >= int(mark); i-- {
- n := o.temp[i]
- out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
- }
- return out
-}
-
-// cleanTemp emits VARKILL instructions for each temporary above the
-// mark on the temporary stack and removes them from the stack.
-func (o *Order) cleanTemp(top ordermarker) {
- o.out = append(o.out, o.cleanTempNoPop(top)...)
- o.popTemp(top)
-}
-
-// stmtList orders each of the statements in the list.
-func (o *Order) stmtList(l ir.Nodes) {
- s := l
- for i := range s {
- orderMakeSliceCopy(s[i:])
- o.stmt(s[i])
- }
-}
-
-// orderMakeSliceCopy matches the pattern:
-// m = OMAKESLICE([]T, x); OCOPY(m, s)
-// and rewrites it to:
-// m = OMAKESLICECOPY([]T, x, s); nil
-func orderMakeSliceCopy(s []ir.Node) {
- if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
- return
- }
- if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
- return
- }
-
- as := s[0].(*ir.AssignStmt)
- cp := s[1].(*ir.BinaryExpr)
- if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
- as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
- as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
- // The line above this one is correct with the differing equality operators:
- // we want as.X and cp.X to be the same name,
- // but we want the initial data to be coming from a different name.
- return
- }
-
- mk := as.Y.(*ir.MakeExpr)
- if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
- return
- }
- mk.SetOp(ir.OMAKESLICECOPY)
- mk.Cap = cp.Y
- // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
- mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
- as.Y = typecheck.Expr(mk)
- s[1] = nil // remove separate copy call
-}
-
-// edge inserts coverage instrumentation for libfuzzer.
-func (o *Order) edge() {
- if base.Debug.Libfuzzer == 0 {
- return
- }
-
- // Create a new uint8 counter to be allocated in section
- // __libfuzzer_extra_counters.
- counter := staticname(types.Types[types.TUINT8])
- counter.Name().SetLibfuzzerExtraCounter(true)
-
- // counter += 1
- incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
- o.append(incr)
-}
-
-// orderBlock orders the block of statements in n into a new slice,
-// and then replaces the old slice in n with the new slice.
-// free is a map that can be used to obtain temporary variables by type.
-func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
- var order Order
- order.free = free
- mark := order.markTemp()
- order.edge()
- order.stmtList(*n)
- order.cleanTemp(mark)
- n.Set(order.out)
-}
-
-// exprInPlace orders the side effects in *np and
-// leaves them as the init list of the final *np.
-// The result of exprInPlace MUST be assigned back to n, e.g.
-// n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n ir.Node) ir.Node {
- var order Order
- order.free = o.free
- n = order.expr(n, nil)
- n = ir.InitExpr(order.out, n)
-
- // insert new temporaries from order
- // at head of outer list.
- o.temp = append(o.temp, order.temp...)
- return n
-}
-
-// orderStmtInPlace orders the side effects of the single statement *np
-// and replaces it with the resulting statement list.
-// The result of orderStmtInPlace MUST be assigned back to n, e.g.
-// n.Left = orderStmtInPlace(n.Left)
-// free is a map that can be used to obtain temporary variables by type.
-func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
- var order Order
- order.free = free
- mark := order.markTemp()
- order.stmt(n)
- order.cleanTemp(mark)
- return ir.NewBlockStmt(src.NoXPos, order.out)
-}
-
-// init moves n's init list to o.out.
-func (o *Order) init(n ir.Node) {
- if ir.MayBeShared(n) {
- // For concurrency safety, don't mutate potentially shared nodes.
- // First, ensure that no work is required here.
- if len(n.Init()) > 0 {
- base.Fatalf("order.init shared node with ninit")
- }
- return
- }
- o.stmtList(n.Init())
- n.PtrInit().Set(nil)
-}
-
-// call orders the call expression n.
-// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(nn ir.Node) {
- if len(nn.Init()) > 0 {
- // Caller should have already called o.init(nn).
- base.Fatalf("%v with unexpected ninit", nn.Op())
- }
-
- // Builtin functions.
- if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
- switch n := nn.(type) {
- default:
- base.Fatalf("unexpected call: %+v", n)
- case *ir.UnaryExpr:
- n.X = o.expr(n.X, nil)
- case *ir.ConvExpr:
- n.X = o.expr(n.X, nil)
- case *ir.BinaryExpr:
- n.X = o.expr(n.X, nil)
- n.Y = o.expr(n.Y, nil)
- case *ir.MakeExpr:
- n.Len = o.expr(n.Len, nil)
- n.Cap = o.expr(n.Cap, nil)
- case *ir.CallExpr:
- o.exprList(n.Args)
- }
- return
- }
-
- n := nn.(*ir.CallExpr)
- typecheck.FixVariadicCall(n)
- n.X = o.expr(n.X, nil)
- o.exprList(n.Args)
-
- if n.Op() == ir.OCALLINTER {
- return
- }
- keepAlive := func(arg ir.Node) {
- // If the argument is really a pointer being converted to uintptr,
- // arrange for the pointer to be kept alive until the call returns,
- // by copying it into a temp and marking that temp
- // still alive when we pop the temp stack.
- if arg.Op() == ir.OCONVNOP {
- arg := arg.(*ir.ConvExpr)
- if arg.X.Type().IsUnsafePtr() {
- x := o.copyExpr(arg.X)
- arg.X = x
- x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
- n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x)))
- }
- }
- }
-
- // Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.X.Type().Params().FieldSlice() {
- if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
- if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
- arg := arg.(*ir.CompLitExpr)
- for _, elt := range arg.List {
- keepAlive(elt)
- }
- } else {
- keepAlive(arg)
- }
- }
- }
-}
-
-// mapAssign appends n to o.out, introducing temporaries
-// to make sure that all map assignments have the form m[k] = x.
-// (Note: expr has already been called on n, so we know k is addressable.)
-//
-// If n is the multiple assignment form ..., m[k], ... = ..., x, ..., the rewrite is
-// t1 = m
-// t2 = k
-// ...., t3, ... = ..., x, ...
-// t1[t2] = t3
-//
-// The temporaries t1, t2 are needed in case the ... being assigned
-// contain m or k. They are usually unnecessary, but in the unnecessary
-// cases they are also typically registerizable, so not much harm done.
-// And this only applies to the multiple-assignment form.
-// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n ir.Node) {
- switch n.Op() {
- default:
- base.Fatalf("order.mapAssign %v", n.Op())
-
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- if n.X.Op() == ir.OINDEXMAP {
- n.Y = o.safeMapRHS(n.Y)
- }
- o.out = append(o.out, n)
- case ir.OASOP:
- n := n.(*ir.AssignOpStmt)
- if n.X.Op() == ir.OINDEXMAP {
- n.Y = o.safeMapRHS(n.Y)
- }
- o.out = append(o.out, n)
-
- case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
- n := n.(*ir.AssignListStmt)
- var post []ir.Node
- for i, m := range n.Lhs {
- switch {
- case m.Op() == ir.OINDEXMAP:
- m := m.(*ir.IndexExpr)
- if !ir.IsAutoTmp(m.X) {
- m.X = o.copyExpr(m.X)
- }
- if !ir.IsAutoTmp(m.Index) {
- m.Index = o.copyExpr(m.Index)
- }
- fallthrough
- case base.Flag.Cfg.Instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m):
- t := o.newTemp(m.Type(), false)
- n.Lhs[i] = t
- a := ir.NewAssignStmt(base.Pos, m, t)
- post = append(post, typecheck.Stmt(a))
- }
- }
-
- o.out = append(o.out, n)
- o.out = append(o.out, post...)
- }
-}
-
-func (o *Order) safeMapRHS(r ir.Node) ir.Node {
- // Make sure we evaluate the RHS before starting the map insert.
- // We need to make sure the RHS won't panic. See issue 22881.
- if r.Op() == ir.OAPPEND {
- r := r.(*ir.CallExpr)
- s := r.Args[1:]
- for i, n := range s {
- s[i] = o.cheapExpr(n)
- }
- return r
- }
- return o.cheapExpr(r)
-}
-
-// stmt orders the statement n, appending to o.out.
-// Temporaries created during the statement are cleaned
-// up using VARKILL instructions as possible.
-func (o *Order) stmt(n ir.Node) {
- if n == nil {
- return
- }
-
- lno := ir.SetPos(n)
- o.init(n)
-
- switch n.Op() {
- default:
- base.Fatalf("order.stmt %v", n.Op())
-
- case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
- o.out = append(o.out, n)
-
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- n.Y = o.expr(n.Y, n.X)
- o.mapAssign(n)
- o.cleanTemp(t)
-
- case ir.OASOP:
- n := n.(*ir.AssignOpStmt)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- n.Y = o.expr(n.Y, nil)
-
- if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
- // Rewrite m[k] op= r into m[k] = m[k] op r so
- // that we can ensure that if op panics
- // because r is zero, the panic happens before
- // the map assignment.
- // DeepCopy is a big hammer here, but safeExpr
- // makes sure there is nothing too deep being copied.
- l1 := o.safeExpr(n.X)
- l2 := ir.DeepCopy(src.NoXPos, l1)
- if l2.Op() == ir.OINDEXMAP {
- l2 := l2.(*ir.IndexExpr)
- l2.Assigned = false
- }
- l2 = o.copyExpr(l2)
- r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
- as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
- o.mapAssign(as)
- o.cleanTemp(t)
- return
- }
-
- o.mapAssign(n)
- o.cleanTemp(t)
-
- case ir.OAS2:
- n := n.(*ir.AssignListStmt)
- t := o.markTemp()
- o.exprList(n.Lhs)
- o.exprList(n.Rhs)
- o.mapAssign(n)
- o.cleanTemp(t)
-
- // Special: avoid copy of func call n.Right
- case ir.OAS2FUNC:
- n := n.(*ir.AssignListStmt)
- t := o.markTemp()
- o.exprList(n.Lhs)
- o.init(n.Rhs[0])
- o.call(n.Rhs[0])
- o.as2(n)
- o.cleanTemp(t)
-
- // Special: use temporary variables to hold result,
- // so that runtime can take address of temporary.
- // No temporary for blank assignment.
- //
- // OAS2MAPR: make sure key is addressable if needed,
- // and make sure OINDEXMAP is not copied out.
- case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
- n := n.(*ir.AssignListStmt)
- t := o.markTemp()
- o.exprList(n.Lhs)
-
- switch r := n.Rhs[0]; r.Op() {
- case ir.ODOTTYPE2:
- r := r.(*ir.TypeAssertExpr)
- r.X = o.expr(r.X, nil)
- case ir.ORECV:
- r := r.(*ir.UnaryExpr)
- r.X = o.expr(r.X, nil)
- case ir.OINDEXMAP:
- r := r.(*ir.IndexExpr)
- r.X = o.expr(r.X, nil)
- r.Index = o.expr(r.Index, nil)
- // See similar conversion for OINDEXMAP below.
- _ = mapKeyReplaceStrConv(r.Index)
- r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
- default:
- base.Fatalf("order.stmt: %v", r.Op())
- }
-
- o.okAs2(n)
- o.cleanTemp(t)
-
- // Special: does not save n onto out.
- case ir.OBLOCK:
- n := n.(*ir.BlockStmt)
- o.stmtList(n.List)
-
- // Special: n->left is not an expression; save as is.
- case ir.OBREAK,
- ir.OCONTINUE,
- ir.ODCL,
- ir.ODCLCONST,
- ir.ODCLTYPE,
- ir.OFALL,
- ir.OGOTO,
- ir.OLABEL,
- ir.ORETJMP:
- o.out = append(o.out, n)
-
- // Special: handle call arguments.
- case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- t := o.markTemp()
- o.call(n)
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- case ir.OCLOSE, ir.ORECV:
- n := n.(*ir.UnaryExpr)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- case ir.OCOPY:
- n := n.(*ir.BinaryExpr)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- n.Y = o.expr(n.Y, nil)
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- n := n.(*ir.CallExpr)
- t := o.markTemp()
- o.exprList(n.Args)
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- // Special: order arguments to inner call but not call itself.
- case ir.ODEFER, ir.OGO:
- n := n.(*ir.GoDeferStmt)
- t := o.markTemp()
- o.init(n.Call)
- o.call(n.Call)
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- case ir.ODELETE:
- n := n.(*ir.CallExpr)
- t := o.markTemp()
- n.Args[0] = o.expr(n.Args[0], nil)
- n.Args[1] = o.expr(n.Args[1], nil)
- n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- // Clean temporaries from condition evaluation at
- // beginning of loop body and after for statement.
- case ir.OFOR:
- n := n.(*ir.ForStmt)
- t := o.markTemp()
- n.Cond = o.exprInPlace(n.Cond)
- n.Body.Prepend(o.cleanTempNoPop(t)...)
- orderBlock(&n.Body, o.free)
- n.Post = orderStmtInPlace(n.Post, o.free)
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- // Clean temporaries from condition at
- // beginning of both branches.
- case ir.OIF:
- n := n.(*ir.IfStmt)
- t := o.markTemp()
- n.Cond = o.exprInPlace(n.Cond)
- n.Body.Prepend(o.cleanTempNoPop(t)...)
- n.Else.Prepend(o.cleanTempNoPop(t)...)
- o.popTemp(t)
- orderBlock(&n.Body, o.free)
- orderBlock(&n.Else, o.free)
- o.out = append(o.out, n)
-
- // Special: argument will be converted to interface using convT2E
- // so make sure it is an addressable temporary.
- case ir.OPANIC:
- n := n.(*ir.UnaryExpr)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- if !n.X.Type().IsInterface() {
- n.X = o.addrTemp(n.X)
- }
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- case ir.ORANGE:
- // n.Right is the expression being ranged over.
- // order it, and then make a copy if we need one.
- // We almost always do, to ensure that we don't
- // see any value changes made during the loop.
- // Usually the copy is cheap (e.g., array pointer,
- // chan, slice, string are all tiny).
- // The exception is ranging over an array value
- // (not a slice, not a pointer to array),
- // which must make a copy to avoid seeing updates made during
- // the range body. Ranging over an array value is uncommon though.
-
- // Mark []byte(str) range expression to reuse string backing storage.
- // It is safe because the storage cannot be mutated.
- n := n.(*ir.RangeStmt)
- if n.X.Op() == ir.OSTR2BYTES {
- n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
- }
-
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
-
- orderBody := true
- switch n.Type().Kind() {
- default:
- base.Fatalf("order.stmt range %v", n.Type())
-
- case types.TARRAY, types.TSLICE:
- if len(n.Vars) < 2 || ir.IsBlank(n.Vars[1]) {
- // for i := range x will only use x once, to compute len(x).
- // No need to copy it.
- break
- }
- fallthrough
-
- case types.TCHAN, types.TSTRING:
- // chan, string, slice, array ranges use value multiple times.
- // make copy.
- r := n.X
-
- if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
- r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
- r.SetType(types.Types[types.TSTRING])
- r = typecheck.Expr(r)
- }
-
- n.X = o.copyExpr(r)
-
- case types.TMAP:
- if isMapClear(n) {
- // Preserve the body of the map clear pattern so it can
- // be detected during walk. The loop body will not be used
- // when optimizing away the range loop to a runtime call.
- orderBody = false
- break
- }
-
- // copy the map value in case it is a map literal.
- // TODO(rsc): Make tmp = literal expressions reuse tmp.
- // For maps tmp is just one word so it hardly matters.
- r := n.X
- n.X = o.copyExpr(r)
-
- // n.Prealloc is the temp for the iterator.
- // hiter contains pointers and needs to be zeroed.
- n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true)
- }
- o.exprListInPlace(n.Vars)
- if orderBody {
- orderBlock(&n.Body, o.free)
- }
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- case ir.ORETURN:
- n := n.(*ir.ReturnStmt)
- o.exprList(n.Results)
- o.out = append(o.out, n)
-
- // Special: clean case temporaries in each block entry.
- // Select must enter one of its blocks, so there is no
- // need for a cleaning at the end.
- // Doubly special: evaluation order for select is stricter
- // than ordinary expressions. Even something like p.c
- // has to be hoisted into a temporary, so that it cannot be
- // reordered after the channel evaluation for a different
- // case (if p were nil, then the timing of the fault would
- // give this away).
- case ir.OSELECT:
- n := n.(*ir.SelectStmt)
- t := o.markTemp()
- for _, ncas := range n.Cases {
- ncas := ncas.(*ir.CaseStmt)
- r := ncas.Comm
- ir.SetPos(ncas)
-
- // Append any new body prologue to ninit.
- // The next loop will insert ninit into nbody.
- if len(ncas.Init()) != 0 {
- base.Fatalf("order select ninit")
- }
- if r == nil {
- continue
- }
- switch r.Op() {
- default:
- ir.Dump("select case", r)
- base.Fatalf("unknown op in select %v", r.Op())
-
- case ir.OSELRECV2:
- // case x, ok = <-c
- r := r.(*ir.AssignListStmt)
- recv := r.Rhs[0].(*ir.UnaryExpr)
- recv.X = o.expr(recv.X, nil)
- if !ir.IsAutoTmp(recv.X) {
- recv.X = o.copyExpr(recv.X)
- }
- init := *r.PtrInit()
- r.PtrInit().Set(nil)
-
- colas := r.Def
- do := func(i int, t *types.Type) {
- n := r.Lhs[i]
- if ir.IsBlank(n) {
- return
- }
- // If this is case x := <-ch or case x, y := <-ch, the case has
- // the ODCL nodes to declare x and y. We want to delay that
- // declaration (and possible allocation) until inside the case body.
- // Delete the ODCL nodes here and recreate them inside the body below.
- if colas {
- if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
- init = init[1:]
- }
- dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n))
- ncas.PtrInit().Append(dcl)
- }
- tmp := o.newTemp(t, t.HasPointers())
- as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
- ncas.PtrInit().Append(as)
- r.Lhs[i] = tmp
- }
- do(0, recv.X.Type().Elem())
- do(1, types.Types[types.TBOOL])
- if len(init) != 0 {
- ir.DumpList("ninit", r.Init())
- base.Fatalf("ninit on select recv")
- }
- orderBlock(ncas.PtrInit(), o.free)
-
- case ir.OSEND:
- r := r.(*ir.SendStmt)
- if len(r.Init()) != 0 {
- ir.DumpList("ninit", r.Init())
- base.Fatalf("ninit on select send")
- }
-
- // case c <- x
- // r->left is c, r->right is x, both are always evaluated.
- r.Chan = o.expr(r.Chan, nil)
-
- if !ir.IsAutoTmp(r.Chan) {
- r.Chan = o.copyExpr(r.Chan)
- }
- r.Value = o.expr(r.Value, nil)
- if !ir.IsAutoTmp(r.Value) {
- r.Value = o.copyExpr(r.Value)
- }
- }
- }
- // Now that we have accumulated all the temporaries, clean them.
- // Also insert any ninit queued during the previous loop.
- // (The temporary cleaning must follow that ninit work.)
- for _, cas := range n.Cases {
- cas := cas.(*ir.CaseStmt)
- orderBlock(&cas.Body, o.free)
- cas.Body.Prepend(o.cleanTempNoPop(t)...)
-
- // TODO(mdempsky): Is this actually necessary?
- // walkselect appears to walk Ninit.
- cas.Body.Prepend(cas.Init()...)
- cas.PtrInit().Set(nil)
- }
-
- o.out = append(o.out, n)
- o.popTemp(t)
-
- // Special: value being sent is passed as a pointer; make it addressable.
- case ir.OSEND:
- n := n.(*ir.SendStmt)
- t := o.markTemp()
- n.Chan = o.expr(n.Chan, nil)
- n.Value = o.expr(n.Value, nil)
- if base.Flag.Cfg.Instrumenting {
- // Force copying to the stack so that (chan T)(nil) <- x
- // is still instrumented as a read of x.
- n.Value = o.copyExpr(n.Value)
- } else {
- n.Value = o.addrTemp(n.Value)
- }
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
- // TODO(rsc): Clean temporaries more aggressively.
- // Note that because walkswitch will rewrite some of the
- // switch into a binary search, this is not as easy as it looks.
- // (If we ran that code here we could invoke order.stmt on
- // the if-else chain instead.)
- // For now just clean all the temporaries at the end.
- // In practice that's fine.
- case ir.OSWITCH:
- n := n.(*ir.SwitchStmt)
- if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
- // Add empty "default:" case for instrumentation.
- n.Cases.Append(ir.NewCaseStmt(base.Pos, nil, nil))
- }
-
- t := o.markTemp()
- n.Tag = o.expr(n.Tag, nil)
- for _, ncas := range n.Cases {
- ncas := ncas.(*ir.CaseStmt)
- o.exprListInPlace(ncas.List)
- orderBlock(&ncas.Body, o.free)
- }
-
- o.out = append(o.out, n)
- o.cleanTemp(t)
- }
-
- base.Pos = lno
-}
-
-func hasDefaultCase(n *ir.SwitchStmt) bool {
- for _, ncas := range n.Cases {
- ncas := ncas.(*ir.CaseStmt)
- if len(ncas.List) == 0 {
- return true
- }
- }
- return false
-}
-
-// exprList orders the expression list l into o.
-func (o *Order) exprList(l ir.Nodes) {
- s := l
- for i := range s {
- s[i] = o.expr(s[i], nil)
- }
-}
-
-// exprListInPlace orders the expression list l but saves
-// the side effects on the individual expression ninit lists.
-func (o *Order) exprListInPlace(l ir.Nodes) {
- s := l
- for i := range s {
- s[i] = o.exprInPlace(s[i])
- }
-}
-
-func (o *Order) exprNoLHS(n ir.Node) ir.Node {
- return o.expr(n, nil)
-}
-
-// expr orders a single expression, appending side
-// effects to o.out as needed.
-// If this is part of an assignment lhs = *np, lhs is given.
-// Otherwise lhs == nil. (When lhs != nil it may be possible
-// to avoid copying the result of the expression to a temporary.)
-// The result of expr MUST be assigned back to n, e.g.
-// n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs ir.Node) ir.Node {
- if n == nil {
- return n
- }
- lno := ir.SetPos(n)
- n = o.expr1(n, lhs)
- base.Pos = lno
- return n
-}
-
-func (o *Order) expr1(n, lhs ir.Node) ir.Node {
- o.init(n)
-
- switch n.Op() {
- default:
- if o.edit == nil {
- o.edit = o.exprNoLHS // create closure once
- }
- ir.EditChildren(n, o.edit)
- return n
-
- // Addition of strings turns into a function call.
- // Allocate a temporary to hold the strings.
- // Fewer than 5 strings use direct runtime helpers.
- case ir.OADDSTR:
- n := n.(*ir.AddStringExpr)
- o.exprList(n.List)
-
- if len(n.List) > 5 {
- t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
- n.Prealloc = o.newTemp(t, false)
- }
-
- // Mark string(byteSlice) arguments to reuse byteSlice backing
- // buffer during conversion. String concatenation does not
- // memorize the strings for later use, so it is safe.
- // However, we can do it only if there is at least one non-empty string literal.
- // Otherwise if all other arguments are empty strings,
- // concatstrings will return the reference to the temp string
- // to the caller.
- hasbyte := false
-
- haslit := false
- for _, n1 := range n.List {
- hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
- haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
- }
-
- if haslit && hasbyte {
- for _, n2 := range n.List {
- if n2.Op() == ir.OBYTES2STR {
- n2 := n2.(*ir.ConvExpr)
- n2.SetOp(ir.OBYTES2STRTMP)
- }
- }
- }
- return n
-
- case ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- n.X = o.expr(n.X, nil)
- n.Index = o.expr(n.Index, nil)
- needCopy := false
-
- if !n.Assigned {
- // Enforce that any []byte slices we are not copying
- // can not be changed before the map index by forcing
- // the map index to happen immediately following the
- // conversions. See copyExpr a few lines below.
- needCopy = mapKeyReplaceStrConv(n.Index)
-
- if base.Flag.Cfg.Instrumenting {
- // Race detector needs the copy.
- needCopy = true
- }
- }
-
- // key must be addressable
- n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
- if needCopy {
- return o.copyExpr(n)
- }
- return n
-
- // concrete type (not interface) argument might need an addressable
- // temporary to pass to the runtime conversion routine.
- case ir.OCONVIFACE:
- n := n.(*ir.ConvExpr)
- n.X = o.expr(n.X, nil)
- if n.X.Type().IsInterface() {
- return n
- }
- if _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
- // Need a temp if we need to pass the address to the conversion function.
- // We also process static composite literal node here, making a named static global
- // whose address we can put directly in an interface (see OCONVIFACE case in walk).
- n.X = o.addrTemp(n.X)
- }
- return n
-
- case ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
- call := n.X.(*ir.CallExpr)
- // When reordering unsafe.Pointer(f()) into a separate
- // statement, the conversion and function call must stay
- // together. See golang.org/issue/15329.
- o.init(call)
- o.call(call)
- if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
- return o.copyExpr(n)
- }
- } else {
- n.X = o.expr(n.X, nil)
- }
- return n
-
- case ir.OANDAND, ir.OOROR:
- // ... = LHS && RHS
- //
- // var r bool
- // r = LHS
- // if r { // or !r, for OROR
- // r = RHS
- // }
- // ... = r
-
- n := n.(*ir.LogicalExpr)
- r := o.newTemp(n.Type(), false)
-
- // Evaluate left-hand side.
- lhs := o.expr(n.X, nil)
- o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
-
- // Evaluate right-hand side, save generated code.
- saveout := o.out
- o.out = nil
- t := o.markTemp()
- o.edge()
- rhs := o.expr(n.Y, nil)
- o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
- o.cleanTemp(t)
- gen := o.out
- o.out = saveout
-
- // If left-hand side doesn't cause a short-circuit, issue right-hand side.
- nif := ir.NewIfStmt(base.Pos, r, nil, nil)
- if n.Op() == ir.OANDAND {
- nif.Body.Set(gen)
- } else {
- nif.Else.Set(gen)
- }
- o.out = append(o.out, nif)
- return r
-
- case ir.OCALLFUNC,
- ir.OCALLINTER,
- ir.OCALLMETH,
- ir.OCAP,
- ir.OCOMPLEX,
- ir.OCOPY,
- ir.OIMAG,
- ir.OLEN,
- ir.OMAKECHAN,
- ir.OMAKEMAP,
- ir.OMAKESLICE,
- ir.OMAKESLICECOPY,
- ir.ONEW,
- ir.OREAL,
- ir.ORECOVER,
- ir.OSTR2BYTES,
- ir.OSTR2BYTESTMP,
- ir.OSTR2RUNES:
-
- if isRuneCount(n) {
- // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
- conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
- conv.X = o.expr(conv.X, nil)
- } else {
- o.call(n)
- }
-
- if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
- return o.copyExpr(n)
- }
- return n
-
- case ir.OAPPEND:
- // Check for append(x, make([]T, y)...) .
- n := n.(*ir.CallExpr)
- if isAppendOfMake(n) {
- n.Args[0] = o.expr(n.Args[0], nil) // order x
- mk := n.Args[1].(*ir.MakeExpr)
- mk.Len = o.expr(mk.Len, nil) // order y
- } else {
- o.exprList(n.Args)
- }
-
- if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
- return o.copyExpr(n)
- }
- return n
-
- case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
- n := n.(*ir.SliceExpr)
- n.X = o.expr(n.X, nil)
- low, high, max := n.SliceBounds()
- low = o.expr(low, nil)
- low = o.cheapExpr(low)
- high = o.expr(high, nil)
- high = o.cheapExpr(high)
- max = o.expr(max, nil)
- max = o.cheapExpr(max)
- n.SetSliceBounds(low, high, max)
- if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
- return o.copyExpr(n)
- }
- return n
-
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- if n.Transient() && len(n.Func.ClosureVars) > 0 {
- n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
- }
- return n
-
- case ir.OCALLPART:
- n := n.(*ir.CallPartExpr)
- n.X = o.expr(n.X, nil)
- if n.Transient() {
- t := typecheck.PartialCallType(n)
- n.Prealloc = o.newTemp(t, false)
- }
- return n
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- o.exprList(n.List)
- if n.Transient() {
- t := types.NewArray(n.Type().Elem(), n.Len)
- n.Prealloc = o.newTemp(t, false)
- }
- return n
-
- case ir.ODOTTYPE, ir.ODOTTYPE2:
- n := n.(*ir.TypeAssertExpr)
- n.X = o.expr(n.X, nil)
- if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
- return o.copyExprClear(n)
- }
- return n
-
- case ir.ORECV:
- n := n.(*ir.UnaryExpr)
- n.X = o.expr(n.X, nil)
- return o.copyExprClear(n)
-
- case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n := n.(*ir.BinaryExpr)
- n.X = o.expr(n.X, nil)
- n.Y = o.expr(n.Y, nil)
-
- t := n.X.Type()
- switch {
- case t.IsString():
- // Mark string(byteSlice) arguments to reuse byteSlice backing
- // buffer during conversion. String comparison does not
- // memorize the strings for later use, so it is safe.
- if n.X.Op() == ir.OBYTES2STR {
- n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
- }
- if n.Y.Op() == ir.OBYTES2STR {
- n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
- }
-
- case t.IsStruct() || t.IsArray():
- // for complex comparisons, we need both args to be
- // addressable so we can pass them to the runtime.
- n.X = o.addrTemp(n.X)
- n.Y = o.addrTemp(n.Y)
- }
- return n
-
- case ir.OMAPLIT:
- // Order map by converting:
- // map[int]int{
- // a(): b(),
- // c(): d(),
- // e(): f(),
- // }
- // to
- // m := map[int]int{}
- // m[a()] = b()
- // m[c()] = d()
- // m[e()] = f()
- // Then order the result.
- // Without this special case, order would otherwise compute all
- // the keys and values before storing any of them to the map.
- // See issue 26552.
- n := n.(*ir.CompLitExpr)
- entries := n.List
- statics := entries[:0]
- var dynamics []*ir.KeyExpr
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
-
- if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
- dynamics = append(dynamics, r)
- continue
- }
-
- // Recursively ordering some static entries can change them to dynamic;
- // e.g., OCONVIFACE nodes. See #31777.
- r = o.expr(r, nil).(*ir.KeyExpr)
- if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
- dynamics = append(dynamics, r)
- continue
- }
-
- statics = append(statics, r)
- }
- n.List.Set(statics)
-
- if len(dynamics) == 0 {
- return n
- }
-
- // Emit the creation of the map (with all its static entries).
- m := o.newTemp(n.Type(), false)
- as := ir.NewAssignStmt(base.Pos, m, n)
- typecheck.Stmt(as)
- o.stmt(as)
-
- // Emit eval+insert of dynamic entries, one at a time.
- for _, r := range dynamics {
- as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
- typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
- o.stmt(as)
- }
- return m
- }
-
- // No return - type-assertions above. Each case must return for itself.
-}
-
-// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
-// The caller should order the right-hand side of the assignment before calling order.as2.
-// It rewrites,
-// a, b, a = ...
-// as
-// tmp1, tmp2, tmp3 = ...
-// a, b, a = tmp1, tmp2, tmp3
-// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *ir.AssignListStmt) {
- tmplist := []ir.Node{}
- left := []ir.Node{}
- for ni, l := range n.Lhs {
- if !ir.IsBlank(l) {
- tmp := o.newTemp(l.Type(), l.Type().HasPointers())
- n.Lhs[ni] = tmp
- tmplist = append(tmplist, tmp)
- left = append(left, l)
- }
- }
-
- o.out = append(o.out, n)
-
- as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- as.Lhs.Set(left)
- as.Rhs.Set(tmplist)
- o.stmt(typecheck.Stmt(as))
-}
-
-// okAs2 orders OAS2XXX with ok.
-// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *ir.AssignListStmt) {
- var tmp1, tmp2 ir.Node
- if !ir.IsBlank(n.Lhs[0]) {
- typ := n.Rhs[0].Type()
- tmp1 = o.newTemp(typ, typ.HasPointers())
- }
-
- if !ir.IsBlank(n.Lhs[1]) {
- tmp2 = o.newTemp(types.Types[types.TBOOL], false)
- }
-
- o.out = append(o.out, n)
-
- if tmp1 != nil {
- r := ir.NewAssignStmt(base.Pos, n.Lhs[0], tmp1)
- o.mapAssign(typecheck.Stmt(r))
- n.Lhs[0] = tmp1
- }
- if tmp2 != nil {
- r := ir.NewAssignStmt(base.Pos, n.Lhs[1], typecheck.Conv(tmp2, n.Lhs[1].Type()))
- o.mapAssign(typecheck.Stmt(r))
- n.Lhs[1] = tmp2
- }
-}
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
deleted file mode 100644
index c52bf1479b..0000000000
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/ssagen"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "cmd/internal/sys"
-)
-
-func instrument(fn *ir.Func) {
- if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) {
- return
- }
-
- if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) {
- fn.SetInstrumentBody(true)
- }
-
- if base.Flag.Race {
- lno := base.Pos
- base.Pos = src.NoXPos
-
- if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Exit.Append(mkcall("racefuncexit", nil, nil))
- } else {
-
- // nodpc is the PC of the caller as extracted by
- // getcallerpc. We use -widthptr(FP) for x86.
- // This only works for amd64. This will not
- // work on arm or others that might support
- // race in the future.
- nodpc := ir.RegFP.CloneName()
- nodpc.SetType(types.Types[types.TUINTPTR])
- nodpc.SetFrameOffset(int64(-types.PtrSize))
- fn.Dcl = append(fn.Dcl, nodpc)
- fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Exit.Append(mkcall("racefuncexit", nil, nil))
- }
- base.Pos = lno
- }
-}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
deleted file mode 100644
index 2b2178a8bd..0000000000
--- a/src/cmd/compile/internal/gc/range.go
+++ /dev/null
@@ -1,495 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/reflectdata"
- "cmd/compile/internal/ssagen"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/sys"
- "unicode/utf8"
-)
-
-func cheapComputableIndex(width int64) bool {
- switch ssagen.Arch.LinkArch.Family {
- // MIPS does not have R+R addressing
- // Arm64 may lack ability to generate this code in our assembler,
- // but the architecture supports it.
- case sys.PPC64, sys.S390X:
- return width == 1
- case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
- switch width {
- case 1, 2, 4, 8:
- return true
- }
- }
- return false
-}
-
-// walkrange transforms various forms of ORANGE into
-// simpler forms. The result must be assigned back to n.
-// Node n may also be modified in place, and may also be
-// the returned node.
-func walkrange(nrange *ir.RangeStmt) ir.Node {
- if isMapClear(nrange) {
- m := nrange.X
- lno := ir.SetPos(m)
- n := mapClear(m)
- base.Pos = lno
- return n
- }
-
- nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
- nfor.SetInit(nrange.Init())
- nfor.Label = nrange.Label
-
- // variable name conventions:
- // ohv1, hv1, hv2: hidden (old) val 1, 2
- // ha, hit: hidden aggregate, iterator
- // hn, hp: hidden len, pointer
- // hb: hidden bool
- // a, v1, v2: not hidden aggregate, val 1, 2
-
- t := nrange.Type()
-
- a := nrange.X
- lno := ir.SetPos(a)
-
- var v1, v2 ir.Node
- l := len(nrange.Vars)
- if l > 0 {
- v1 = nrange.Vars[0]
- }
-
- if l > 1 {
- v2 = nrange.Vars[1]
- }
-
- if ir.IsBlank(v2) {
- v2 = nil
- }
-
- if ir.IsBlank(v1) && v2 == nil {
- v1 = nil
- }
-
- if v1 == nil && v2 != nil {
- base.Fatalf("walkrange: v2 != nil while v1 == nil")
- }
-
- var ifGuard *ir.IfStmt
-
- var body []ir.Node
- var init []ir.Node
- switch t.Kind() {
- default:
- base.Fatalf("walkrange")
-
- case types.TARRAY, types.TSLICE:
- if nn := arrayClear(nrange, v1, v2, a); nn != nil {
- base.Pos = lno
- return nn
- }
-
- // order.stmt arranged for a copy of the array/slice variable if needed.
- ha := a
-
- hv1 := typecheck.Temp(types.Types[types.TINT])
- hn := typecheck.Temp(types.Types[types.TINT])
-
- init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
- init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
-
- nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
- nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))
-
- // for range ha { body }
- if v1 == nil {
- break
- }
-
- // for v1 := range ha { body }
- if v2 == nil {
- body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
- break
- }
-
- // for v1, v2 := range ha { body }
- if cheapComputableIndex(nrange.Type().Elem().Width) {
- // v1, v2 = hv1, ha[hv1]
- tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
- tmp.SetBounded(true)
- // Use OAS2 to correctly handle assignments
- // of the form "v1, a[v1] := range".
- a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- a.Lhs = []ir.Node{v1, v2}
- a.Rhs = []ir.Node{hv1, tmp}
- body = []ir.Node{a}
- break
- }
-
- // TODO(austin): OFORUNTIL is a strange beast, but is
- // necessary for expressing the control flow we need
- // while also making "break" and "continue" work. It
- // would be nice to just lower ORANGE during SSA, but
- // racewalk needs to see many of the operations
- // involved in ORANGE's implementation. If racewalk
- // moves into SSA, consider moving ORANGE into SSA and
- // eliminating OFORUNTIL.
-
- // TODO(austin): OFORUNTIL inhibits bounds-check
- // elimination on the index variable (see #20711).
- // Enhance the prove pass to understand this.
- ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
- ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
- nfor.SetOp(ir.OFORUNTIL)
-
- hp := typecheck.Temp(types.NewPtr(nrange.Type().Elem()))
- tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
- tmp.SetBounded(true)
- init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
-
- // Use OAS2 to correctly handle assignments
- // of the form "v1, a[v1] := range".
- a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- a.Lhs = []ir.Node{v1, v2}
- a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}
- body = append(body, a)
-
- // Advance pointer as part of the late increment.
- //
- // This runs *after* the condition check, so we know
- // advancing the pointer is safe and won't go past the
- // end of the allocation.
- as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width))
- nfor.Late = []ir.Node{typecheck.Stmt(as)}
-
- case types.TMAP:
- // order.stmt allocated the iterator for us.
- // we only use a once, so no copy needed.
- ha := a
-
- hit := nrange.Prealloc
- th := hit.Type()
- keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
- elemsym := th.Field(1).Sym // ditto
-
- fn := typecheck.LookupRuntime("mapiterinit")
-
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
- init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
- nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
-
- fn = typecheck.LookupRuntime("mapiternext")
- fn = typecheck.SubstArgTypes(fn, th)
- nfor.Post = mkcall1(fn, nil, nil, typecheck.NodAddr(hit))
-
- key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
- if v1 == nil {
- body = nil
- } else if v2 == nil {
- body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
- } else {
- elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
- a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- a.Lhs = []ir.Node{v1, v2}
- a.Rhs = []ir.Node{key, elem}
- body = []ir.Node{a}
- }
-
- case types.TCHAN:
- // order.stmt arranged for a copy of the channel variable.
- ha := a
-
- hv1 := typecheck.Temp(t.Elem())
- hv1.SetTypecheck(1)
- if t.Elem().HasPointers() {
- init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
- }
- hb := typecheck.Temp(types.Types[types.TBOOL])
-
- nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false))
- a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil)
- a.SetTypecheck(1)
- a.Lhs = []ir.Node{hv1, hb}
- a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
- *nfor.Cond.PtrInit() = []ir.Node{a}
- if v1 == nil {
- body = nil
- } else {
- body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
- }
- // Zero hv1. This prevents hv1 from being the sole, inaccessible
- // reference to an otherwise GC-able value during the next channel receive.
- // See issue 15281.
- body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
-
- case types.TSTRING:
- // Transform string range statements like "for v1, v2 = range a" into
- //
- // ha := a
- // for hv1 := 0; hv1 < len(ha); {
- // hv1t := hv1
- // hv2 := rune(ha[hv1])
- // if hv2 < utf8.RuneSelf {
- // hv1++
- // } else {
- // hv2, hv1 = decoderune(ha, hv1)
- // }
- // v1, v2 = hv1t, hv2
- // // original body
- // }
-
- // order.stmt arranged for a copy of the string variable.
- ha := a
-
- hv1 := typecheck.Temp(types.Types[types.TINT])
- hv1t := typecheck.Temp(types.Types[types.TINT])
- hv2 := typecheck.Temp(types.RuneType)
-
- // hv1 := 0
- init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
-
- // hv1 < len(ha)
- nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
-
- if v1 != nil {
- // hv1t = hv1
- body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
- }
-
- // hv2 := rune(ha[hv1])
- nind := ir.NewIndexExpr(base.Pos, ha, hv1)
- nind.SetBounded(true)
- body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
-
- // if hv2 < utf8.RuneSelf
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf))
-
- // hv1++
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))}
-
- // } else {
- eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- nif.Else = []ir.Node{eif}
-
- // hv2, hv1 = decoderune(ha, hv1)
- eif.Lhs = []ir.Node{hv2, hv1}
- fn := typecheck.LookupRuntime("decoderune")
- eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)}
-
- body = append(body, nif)
-
- if v1 != nil {
- if v2 != nil {
- // v1, v2 = hv1t, hv2
- a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- a.Lhs = []ir.Node{v1, v2}
- a.Rhs = []ir.Node{hv1t, hv2}
- body = append(body, a)
- } else {
- // v1 = hv1t
- body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
- }
- }
- }
-
- typecheck.Stmts(init)
-
- if ifGuard != nil {
- ifGuard.PtrInit().Append(init...)
- ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
- } else {
- nfor.PtrInit().Append(init...)
- }
-
- typecheck.Stmts(nfor.Cond.Init())
-
- nfor.Cond = typecheck.Expr(nfor.Cond)
- nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
- nfor.Post = typecheck.Stmt(nfor.Post)
- typecheck.Stmts(body)
- nfor.Body.Append(body...)
- nfor.Body.Append(nrange.Body...)
-
- var n ir.Node = nfor
- if ifGuard != nil {
- ifGuard.Body = []ir.Node{n}
- n = ifGuard
- }
-
- n = walkstmt(n)
-
- base.Pos = lno
- return n
-}
-
-// isMapClear checks if n is of the form:
-//
-// for k := range m {
-// delete(m, k)
-// }
-//
-// where == for keys of map m is reflexive.
-func isMapClear(n *ir.RangeStmt) bool {
- if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
- return false
- }
-
- if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || len(n.Vars) != 1 {
- return false
- }
-
- k := n.Vars[0]
- if k == nil || ir.IsBlank(k) {
- return false
- }
-
- // Require k to be a new variable name.
- if !ir.DeclaredBy(k, n) {
- return false
- }
-
- if len(n.Body) != 1 {
- return false
- }
-
- stmt := n.Body[0] // only stmt in body
- if stmt == nil || stmt.Op() != ir.ODELETE {
- return false
- }
-
- m := n.X
- if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
- return false
- }
-
- // Keys where equality is not reflexive can not be deleted from maps.
- if !types.IsReflexive(m.Type().Key()) {
- return false
- }
-
- return true
-}
-
-// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m ir.Node) ir.Node {
- t := m.Type()
-
- // instantiate mapclear(typ *type, hmap map[any]any)
- fn := typecheck.LookupRuntime("mapclear")
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
- n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
- return walkstmt(typecheck.Stmt(n))
-}
-
-// Lower n into runtime·memclr if possible, for
-// fast zeroing of slices and arrays (issue 5373).
-// Look for instances of
-//
-// for i := range a {
-// a[i] = zero
-// }
-//
-// in which the evaluation of a is side-effect-free.
-//
-// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
- if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
- return nil
- }
-
- if v1 == nil || v2 != nil {
- return nil
- }
-
- if len(loop.Body) != 1 || loop.Body[0] == nil {
- return nil
- }
-
- stmt1 := loop.Body[0] // only stmt in body
- if stmt1.Op() != ir.OAS {
- return nil
- }
- stmt := stmt1.(*ir.AssignStmt)
- if stmt.X.Op() != ir.OINDEX {
- return nil
- }
- lhs := stmt.X.(*ir.IndexExpr)
-
- if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) {
- return nil
- }
-
- elemsize := loop.Type().Elem().Width
- if elemsize <= 0 || !ir.IsZero(stmt.Y) {
- return nil
- }
-
- // Convert to
- // if len(a) != 0 {
- // hp = &a[0]
- // hn = len(a)*sizeof(elem(a))
- // memclr{NoHeap,Has}Pointers(hp, hn)
- // i = len(a) - 1
- // }
- n := ir.NewIfStmt(base.Pos, nil, nil, nil)
- n.Body.Set(nil)
- n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0))
-
- // hp = &a[0]
- hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
-
- ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0))
- ix.SetBounded(true)
- addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
- n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
-
- // hn = len(a) * sizeof(elem(a))
- hn := typecheck.Temp(types.Types[types.TUINTPTR])
- mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR])
- n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
-
- var fn ir.Node
- if a.Type().Elem().HasPointers() {
- // memclrHasPointers(hp, hn)
- ir.CurFunc.SetWBPos(stmt.Pos())
- fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
- } else {
- // memclrNoHeapPointers(hp, hn)
- fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
- }
-
- n.Body.Append(fn)
-
- // i = len(a) - 1
- v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1)))
-
- n.Body.Append(v1)
-
- n.Cond = typecheck.Expr(n.Cond)
- n.Cond = typecheck.DefaultLit(n.Cond, nil)
- typecheck.Stmts(n.Body)
- return walkstmt(n)
-}
-
-// addptr returns (*T)(uintptr(p) + n).
-func addptr(p ir.Node, n int64) ir.Node {
- t := p.Type()
-
- p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
- p.SetType(types.Types[types.TUINTPTR])
-
- p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n))
-
- p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
- p.SetType(t)
-
- return p
-}
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
deleted file mode 100644
index 51bb1e5355..0000000000
--- a/src/cmd/compile/internal/gc/select.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
-)
-
-func walkselect(sel *ir.SelectStmt) {
- lno := ir.SetPos(sel)
- if len(sel.Compiled) != 0 {
- base.Fatalf("double walkselect")
- }
-
- init := sel.Init()
- sel.PtrInit().Set(nil)
-
- init = append(init, walkselectcases(sel.Cases)...)
- sel.Cases = ir.Nodes{}
-
- sel.Compiled.Set(init)
- walkstmtlist(sel.Compiled)
-
- base.Pos = lno
-}
-
-func walkselectcases(cases ir.Nodes) []ir.Node {
- ncas := len(cases)
- sellineno := base.Pos
-
- // optimization: zero-case select
- if ncas == 0 {
- return []ir.Node{mkcall("block", nil, nil)}
- }
-
- // optimization: one-case select: single op.
- if ncas == 1 {
- cas := cases[0].(*ir.CaseStmt)
- ir.SetPos(cas)
- l := cas.Init()
- if cas.Comm != nil { // not default:
- n := cas.Comm
- l = append(l, n.Init()...)
- n.PtrInit().Set(nil)
- switch n.Op() {
- default:
- base.Fatalf("select %v", n.Op())
-
- case ir.OSEND:
- // already ok
-
- case ir.OSELRECV2:
- r := n.(*ir.AssignListStmt)
- if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) {
- n = r.Rhs[0]
- break
- }
- r.SetOp(ir.OAS2RECV)
- }
-
- l = append(l, n)
- }
-
- l = append(l, cas.Body...)
- l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
- return l
- }
-
- // convert case value arguments to addresses.
- // this rewrite is used by both the general code and the next optimization.
- var dflt *ir.CaseStmt
- for _, cas := range cases {
- cas := cas.(*ir.CaseStmt)
- ir.SetPos(cas)
- n := cas.Comm
- if n == nil {
- dflt = cas
- continue
- }
- switch n.Op() {
- case ir.OSEND:
- n := n.(*ir.SendStmt)
- n.Value = typecheck.NodAddr(n.Value)
- n.Value = typecheck.Expr(n.Value)
-
- case ir.OSELRECV2:
- n := n.(*ir.AssignListStmt)
- if !ir.IsBlank(n.Lhs[0]) {
- n.Lhs[0] = typecheck.NodAddr(n.Lhs[0])
- n.Lhs[0] = typecheck.Expr(n.Lhs[0])
- }
- }
- }
-
- // optimization: two-case select but one is default: single non-blocking op.
- if ncas == 2 && dflt != nil {
- cas := cases[0].(*ir.CaseStmt)
- if cas == dflt {
- cas = cases[1].(*ir.CaseStmt)
- }
-
- n := cas.Comm
- ir.SetPos(n)
- r := ir.NewIfStmt(base.Pos, nil, nil, nil)
- r.PtrInit().Set(cas.Init())
- var call ir.Node
- switch n.Op() {
- default:
- base.Fatalf("select %v", n.Op())
-
- case ir.OSEND:
- // if selectnbsend(c, v) { body } else { default body }
- n := n.(*ir.SendStmt)
- ch := n.Chan
- call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value)
-
- case ir.OSELRECV2:
- n := n.(*ir.AssignListStmt)
- recv := n.Rhs[0].(*ir.UnaryExpr)
- ch := recv.X
- elem := n.Lhs[0]
- if ir.IsBlank(elem) {
- elem = typecheck.NodNil()
- }
- if ir.IsBlank(n.Lhs[1]) {
- // if selectnbrecv(&v, c) { body } else { default body }
- call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
- } else {
- // TODO(cuonglm): make this use selectnbrecv()
- // if selectnbrecv2(&v, &received, c) { body } else { default body }
- receivedp := typecheck.Expr(typecheck.NodAddr(n.Lhs[1]))
- call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
- }
- }
-
- r.Cond = typecheck.Expr(call)
- r.Body.Set(cas.Body)
- r.Else.Set(append(dflt.Init(), dflt.Body...))
- return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)}
- }
-
- if dflt != nil {
- ncas--
- }
- casorder := make([]*ir.CaseStmt, ncas)
- nsends, nrecvs := 0, 0
-
- var init []ir.Node
-
- // generate sel-struct
- base.Pos = sellineno
- selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas)))
- init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
-
- // No initialization for order; runtime.selectgo is responsible for that.
- order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
-
- var pc0, pcs ir.Node
- if base.Flag.Race {
- pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
- pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0))))
- } else {
- pc0 = typecheck.NodNil()
- }
-
- // register cases
- for _, cas := range cases {
- cas := cas.(*ir.CaseStmt)
- ir.SetPos(cas)
-
- init = append(init, cas.Init()...)
- cas.PtrInit().Set(nil)
-
- n := cas.Comm
- if n == nil { // default:
- continue
- }
-
- var i int
- var c, elem ir.Node
- switch n.Op() {
- default:
- base.Fatalf("select %v", n.Op())
- case ir.OSEND:
- n := n.(*ir.SendStmt)
- i = nsends
- nsends++
- c = n.Chan
- elem = n.Value
- case ir.OSELRECV2:
- n := n.(*ir.AssignListStmt)
- nrecvs++
- i = ncas - nrecvs
- recv := n.Rhs[0].(*ir.UnaryExpr)
- c = recv.X
- elem = n.Lhs[0]
- }
-
- casorder[i] = cas
-
- setField := func(f string, val ir.Node) {
- r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), typecheck.Lookup(f)), val)
- init = append(init, typecheck.Stmt(r))
- }
-
- c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR])
- setField("c", c)
- if !ir.IsBlank(elem) {
- elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR])
- setField("elem", elem)
- }
-
- // TODO(mdempsky): There should be a cleaner way to
- // handle this.
- if base.Flag.Race {
- r := mkcall("selectsetpc", nil, nil, typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i)))))
- init = append(init, r)
- }
- }
- if nsends+nrecvs != ncas {
- base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
- }
-
- // run the select
- base.Pos = sellineno
- chosen := typecheck.Temp(types.Types[types.TINT])
- recvOK := typecheck.Temp(types.Types[types.TBOOL])
- r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- r.Lhs = []ir.Node{chosen, recvOK}
- fn := typecheck.LookupRuntime("selectgo")
- r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))}
- init = append(init, typecheck.Stmt(r))
-
- // selv and order are no longer alive after selectgo.
- init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv))
- init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, order))
- if base.Flag.Race {
- init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, pcs))
- }
-
- // dispatch cases
- dispatch := func(cond ir.Node, cas *ir.CaseStmt) {
- cond = typecheck.Expr(cond)
- cond = typecheck.DefaultLit(cond, nil)
-
- r := ir.NewIfStmt(base.Pos, cond, nil, nil)
-
- if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 {
- n := n.(*ir.AssignListStmt)
- if !ir.IsBlank(n.Lhs[1]) {
- x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK)
- r.Body.Append(typecheck.Stmt(x))
- }
- }
-
- r.Body.Append(cas.Body.Take()...)
- r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
- init = append(init, r)
- }
-
- if dflt != nil {
- ir.SetPos(dflt)
- dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(0)), dflt)
- }
- for i, cas := range casorder {
- ir.SetPos(cas)
- dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(int64(i))), cas)
- }
-
- return init
-}
-
-// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n ir.Node, i int64) ir.Node {
- s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i)))
- t := types.NewPtr(types.Types[types.TUINT8])
- return typecheck.ConvNop(s, t)
-}
-
-var scase *types.Type
-
-// Keep in sync with src/runtime/select.go.
-func scasetype() *types.Type {
- if scase == nil {
- scase = typecheck.NewStructType([]*ir.Field{
- ir.NewField(base.Pos, typecheck.Lookup("c"), nil, types.Types[types.TUNSAFEPTR]),
- ir.NewField(base.Pos, typecheck.Lookup("elem"), nil, types.Types[types.TUNSAFEPTR]),
- })
- scase.SetNoalg(true)
- }
- return scase
-}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
deleted file mode 100644
index 337b67af46..0000000000
--- a/src/cmd/compile/internal/gc/sinit.go
+++ /dev/null
@@ -1,1156 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/reflectdata"
- "cmd/compile/internal/staticdata"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "fmt"
-)
-
-type InitEntry struct {
- Xoffset int64 // struct, array only
- Expr ir.Node // bytes of run-time computed expressions
-}
-
-type InitPlan struct {
- E []InitEntry
-}
-
-// An InitSchedule is used to decompose assignment statements into
-// static and dynamic initialization parts. Static initializations are
-// handled by populating variables' linker symbol data, while dynamic
-// initializations are accumulated to be executed in order.
-type InitSchedule struct {
- // out is the ordered list of dynamic initialization
- // statements.
- out []ir.Node
-
- initplans map[ir.Node]*InitPlan
- inittemps map[ir.Node]*ir.Name
-}
-
-func (s *InitSchedule) append(n ir.Node) {
- s.out = append(s.out, n)
-}
-
-// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n ir.Node) {
- if !s.tryStaticInit(n) {
- if base.Flag.Percent != 0 {
- ir.Dump("nonstatic", n)
- }
- s.append(n)
- }
-}
-
-// tryStaticInit attempts to statically execute an initialization
-// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(nn ir.Node) bool {
- // Only worry about simple "l = r" assignments. Multiple
- // variable/expression OAS2 assignments have already been
- // replaced by multiple simple OAS assignments, and the other
- // OAS2* assignments mostly necessitate dynamic execution
- // anyway.
- if nn.Op() != ir.OAS {
- return false
- }
- n := nn.(*ir.AssignStmt)
- if ir.IsBlank(n.X) && !anySideEffects(n.Y) {
- // Discard.
- return true
- }
- lno := ir.SetPos(n)
- defer func() { base.Pos = lno }()
- nam := n.X.(*ir.Name)
- return s.staticassign(nam, 0, n.Y, nam.Type())
-}
-
-// like staticassign but we are copying an already
-// initialized value r.
-func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
- if rn.Class_ == ir.PFUNC {
- // TODO if roff != 0 { panic }
- staticdata.InitFunc(l, loff, rn)
- return true
- }
- if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
- return false
- }
- if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
- return false
- }
- if rn.Defn.Op() != ir.OAS {
- return false
- }
- if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
- return false
- }
- orig := rn
- r := rn.Defn.(*ir.AssignStmt).Y
-
- for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
- r = r.(*ir.ConvExpr).X
- }
-
- switch r.Op() {
- case ir.OMETHEXPR:
- r = r.(*ir.MethodExpr).FuncName()
- fallthrough
- case ir.ONAME:
- r := r.(*ir.Name)
- if s.staticcopy(l, loff, r, typ) {
- return true
- }
- // We may have skipped past one or more OCONVNOPs, so
- // use conv to ensure r is assignable to l (#13263).
- dst := ir.Node(l)
- if loff != 0 || !types.Identical(typ, l.Type()) {
- dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
- }
- s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ)))
- return true
-
- case ir.ONIL:
- return true
-
- case ir.OLITERAL:
- if ir.IsZero(r) {
- return true
- }
- litsym(l, loff, r, int(typ.Width))
- return true
-
- case ir.OADDR:
- r := r.(*ir.AddrExpr)
- if a := r.X; a.Op() == ir.ONAME {
- a := a.(*ir.Name)
- staticdata.InitAddr(l, loff, a, 0)
- return true
- }
-
- case ir.OPTRLIT:
- r := r.(*ir.AddrExpr)
- switch r.X.Op() {
- case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
- // copy pointer
- staticdata.InitAddr(l, loff, s.inittemps[r], 0)
- return true
- }
-
- case ir.OSLICELIT:
- r := r.(*ir.CompLitExpr)
- // copy slice
- staticdata.InitSlice(l, loff, s.inittemps[r], r.Len)
- return true
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- r := r.(*ir.CompLitExpr)
- p := s.initplans[r]
- for i := range p.E {
- e := &p.E[i]
- typ := e.Expr.Type()
- if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
- litsym(l, loff+e.Xoffset, e.Expr, int(typ.Width))
- continue
- }
- x := e.Expr
- if x.Op() == ir.OMETHEXPR {
- x = x.(*ir.MethodExpr).FuncName()
- }
- if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
- continue
- }
- // Requires computation, but we're
- // copying someone else's computation.
- ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
- rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
- ir.SetPos(rr)
- s.append(ir.NewAssignStmt(base.Pos, ll, rr))
- }
-
- return true
- }
-
- return false
-}
-
-func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
- for r.Op() == ir.OCONVNOP {
- r = r.(*ir.ConvExpr).X
- }
-
- switch r.Op() {
- case ir.ONAME:
- r := r.(*ir.Name)
- return s.staticcopy(l, loff, r, typ)
-
- case ir.OMETHEXPR:
- r := r.(*ir.MethodExpr)
- return s.staticcopy(l, loff, r.FuncName(), typ)
-
- case ir.ONIL:
- return true
-
- case ir.OLITERAL:
- if ir.IsZero(r) {
- return true
- }
- litsym(l, loff, r, int(typ.Width))
- return true
-
- case ir.OADDR:
- r := r.(*ir.AddrExpr)
- if name, offset, ok := stataddr(r.X); ok {
- staticdata.InitAddr(l, loff, name, offset)
- return true
- }
- fallthrough
-
- case ir.OPTRLIT:
- r := r.(*ir.AddrExpr)
- switch r.X.Op() {
- case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
- // Init pointer.
- a := staticname(r.X.Type())
-
- s.inittemps[r] = a
- staticdata.InitAddr(l, loff, a, 0)
-
- // Init underlying literal.
- if !s.staticassign(a, 0, r.X, a.Type()) {
- s.append(ir.NewAssignStmt(base.Pos, a, r.X))
- }
- return true
- }
- //dump("not static ptrlit", r);
-
- case ir.OSTR2BYTES:
- r := r.(*ir.ConvExpr)
- if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL {
- sval := ir.StringVal(r.X)
- staticdata.InitSliceBytes(l, loff, sval)
- return true
- }
-
- case ir.OSLICELIT:
- r := r.(*ir.CompLitExpr)
- s.initplan(r)
- // Init slice.
- ta := types.NewArray(r.Type().Elem(), r.Len)
- ta.SetNoalg(true)
- a := staticname(ta)
- s.inittemps[r] = a
- staticdata.InitSlice(l, loff, a, r.Len)
- // Fall through to init underlying array.
- l = a
- loff = 0
- fallthrough
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- r := r.(*ir.CompLitExpr)
- s.initplan(r)
-
- p := s.initplans[r]
- for i := range p.E {
- e := &p.E[i]
- if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
- litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
- continue
- }
- ir.SetPos(e.Expr)
- if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
- a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
- s.append(ir.NewAssignStmt(base.Pos, a, e.Expr))
- }
- }
-
- return true
-
- case ir.OMAPLIT:
- break
-
- case ir.OCLOSURE:
- r := r.(*ir.ClosureExpr)
- if hasemptycvars(r) {
- if base.Debug.Closure > 0 {
- base.WarnfAt(r.Pos(), "closure converted to global")
- }
- // Closures with no captured variables are globals,
- // so the assignment can be done at link time.
- // TODO if roff != 0 { panic }
- staticdata.InitFunc(l, loff, r.Func.Nname)
- return true
- }
- closuredebugruntimecheck(r)
-
- case ir.OCONVIFACE:
- // This logic is mirrored in isStaticCompositeLiteral.
- // If you change something here, change it there, and vice versa.
-
- // Determine the underlying concrete type and value we are converting from.
- r := r.(*ir.ConvExpr)
- val := ir.Node(r)
- for val.Op() == ir.OCONVIFACE {
- val = val.(*ir.ConvExpr).X
- }
-
- if val.Type().IsInterface() {
- // val is an interface type.
- // If val is nil, we can statically initialize l;
- // both words are zero and so there no work to do, so report success.
- // If val is non-nil, we have no concrete type to record,
- // and we won't be able to statically initialize its value, so report failure.
- return val.Op() == ir.ONIL
- }
-
- markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
-
- var itab *ir.AddrExpr
- if typ.IsEmptyInterface() {
- itab = reflectdata.TypePtr(val.Type())
- } else {
- itab = reflectdata.ITabAddr(val.Type(), typ)
- }
-
- // Create a copy of l to modify while we emit data.
-
- // Emit itab, advance offset.
- staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0)
-
- // Emit data.
- if types.IsDirectIface(val.Type()) {
- if val.Op() == ir.ONIL {
- // Nil is zero, nothing to do.
- return true
- }
- // Copy val directly into n.
- ir.SetPos(val)
- if !s.staticassign(l, loff+int64(types.PtrSize), val, val.Type()) {
- a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type())
- s.append(ir.NewAssignStmt(base.Pos, a, val))
- }
- } else {
- // Construct temp to hold val, write pointer to temp into n.
- a := staticname(val.Type())
- s.inittemps[val] = a
- if !s.staticassign(a, 0, val, val.Type()) {
- s.append(ir.NewAssignStmt(base.Pos, a, val))
- }
- staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0)
- }
-
- return true
- }
-
- //dump("not static", r);
- return false
-}
-
-// initContext is the context in which static data is populated.
-// It is either in an init function or in any other function.
-// Static data populated in an init function will be written either
-// zero times (as a readonly, static data symbol) or
-// one time (during init function execution).
-// Either way, there is no opportunity for races or further modification,
-// so the data can be written to a (possibly readonly) data symbol.
-// Static data populated in any other function needs to be local to
-// that function to allow multiple instances of that function
-// to execute concurrently without clobbering each others' data.
-type initContext uint8
-
-const (
- inInitFunction initContext = iota
- inNonInitFunction
-)
-
-func (c initContext) String() string {
- if c == inInitFunction {
- return "inInitFunction"
- }
- return "inNonInitFunction"
-}
-
-// from here down is the walk analysis
-// of composite literals.
-// most of the work is to generate
-// data statements for the constant
-// part of the composite literal.
-
-var statuniqgen int // name generator for static temps
-
-// staticname returns a name backed by a (writable) static data symbol.
-// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *ir.Name {
- // Don't use lookupN; it interns the resulting string, but these are all unique.
- n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
- statuniqgen++
- typecheck.Declare(n, ir.PEXTERN)
- n.SetType(t)
- n.Sym().Linksym().Set(obj.AttrLocal, true)
- return n
-}
-
-// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *ir.Name {
- n := staticname(t)
- n.MarkReadonly()
- n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
- return n
-}
-
-func isSimpleName(nn ir.Node) bool {
- if nn.Op() != ir.ONAME {
- return false
- }
- n := nn.(*ir.Name)
- return n.Class_ != ir.PAUTOHEAP && n.Class_ != ir.PEXTERN
-}
-
-func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, r))
-}
-
-// initGenType is a bitmap indicating the types of generation that will occur for a static value.
-type initGenType uint8
-
-const (
- initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
- initConst // contains some constant values, which may be written into data symbols
-)
-
-// getdyn calculates the initGenType for n.
-// If top is false, getdyn is recursing.
-func getdyn(n ir.Node, top bool) initGenType {
- switch n.Op() {
- default:
- if ir.IsConstNode(n) {
- return initConst
- }
- return initDynamic
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- if !top {
- return initDynamic
- }
- if n.Len/4 > int64(len(n.List)) {
- // <25% of entries have explicit values.
- // Very rough estimation, it takes 4 bytes of instructions
- // to initialize 1 byte of result. So don't use a static
- // initializer if the dynamic initialization code would be
- // smaller than the static value.
- // See issue 23780.
- return initDynamic
- }
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- }
- lit := n.(*ir.CompLitExpr)
-
- var mode initGenType
- for _, n1 := range lit.List {
- switch n1.Op() {
- case ir.OKEY:
- n1 = n1.(*ir.KeyExpr).Value
- case ir.OSTRUCTKEY:
- n1 = n1.(*ir.StructKeyExpr).Value
- }
- mode |= getdyn(n1, false)
- if mode == initDynamic|initConst {
- break
- }
- }
- return mode
-}
-
-// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n ir.Node) bool {
- switch n.Op() {
- case ir.OSLICELIT:
- return false
- case ir.OARRAYLIT:
- n := n.(*ir.CompLitExpr)
- for _, r := range n.List {
- if r.Op() == ir.OKEY {
- r = r.(*ir.KeyExpr).Value
- }
- if !isStaticCompositeLiteral(r) {
- return false
- }
- }
- return true
- case ir.OSTRUCTLIT:
- n := n.(*ir.CompLitExpr)
- for _, r := range n.List {
- r := r.(*ir.StructKeyExpr)
- if !isStaticCompositeLiteral(r.Value) {
- return false
- }
- }
- return true
- case ir.OLITERAL, ir.ONIL:
- return true
- case ir.OCONVIFACE:
- // See staticassign's OCONVIFACE case for comments.
- n := n.(*ir.ConvExpr)
- val := ir.Node(n)
- for val.Op() == ir.OCONVIFACE {
- val = val.(*ir.ConvExpr).X
- }
- if val.Type().IsInterface() {
- return val.Op() == ir.ONIL
- }
- if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL {
- return true
- }
- return isStaticCompositeLiteral(val)
- }
- return false
-}
-
-// initKind is a kind of static initialization: static, dynamic, or local.
-// Static initialization represents literals and
-// literal components of composite literals.
-// Dynamic initialization represents non-literals and
-// non-literal components of composite literals.
-// LocalCode initialization represents initialization
-// that occurs purely in generated code local to the function of use.
-// Initialization code is sometimes generated in passes,
-// first static then dynamic.
-type initKind uint8
-
-const (
- initKindStatic initKind = iota + 1
- initKindDynamic
- initKindLocalCode
-)
-
-// fixedlit handles struct, array, and slice literals.
-// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
- isBlank := var_ == ir.BlankNode
- var splitnode func(ir.Node) (a ir.Node, value ir.Node)
- switch n.Op() {
- case ir.OARRAYLIT, ir.OSLICELIT:
- var k int64
- splitnode = func(r ir.Node) (ir.Node, ir.Node) {
- if r.Op() == ir.OKEY {
- kv := r.(*ir.KeyExpr)
- k = typecheck.IndexConst(kv.Key)
- if k < 0 {
- base.Fatalf("fixedlit: invalid index %v", kv.Key)
- }
- r = kv.Value
- }
- a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(k))
- k++
- if isBlank {
- return ir.BlankNode, r
- }
- return a, r
- }
- case ir.OSTRUCTLIT:
- splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
- r := rn.(*ir.StructKeyExpr)
- if r.Field.IsBlank() || isBlank {
- return ir.BlankNode, r.Value
- }
- ir.SetPos(r)
- return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value
- }
- default:
- base.Fatalf("fixedlit bad op: %v", n.Op())
- }
-
- for _, r := range n.List {
- a, value := splitnode(r)
- if a == ir.BlankNode && !anySideEffects(value) {
- // Discard.
- continue
- }
-
- switch value.Op() {
- case ir.OSLICELIT:
- value := value.(*ir.CompLitExpr)
- if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
- slicelit(ctxt, value, a, init)
- continue
- }
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- value := value.(*ir.CompLitExpr)
- fixedlit(ctxt, kind, value, a, init)
- continue
- }
-
- islit := ir.IsConstNode(value)
- if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
- continue
- }
-
- // build list of assignments: var[index] = expr
- ir.SetPos(a)
- as := ir.NewAssignStmt(base.Pos, a, value)
- as = typecheck.Stmt(as).(*ir.AssignStmt)
- switch kind {
- case initKindStatic:
- genAsStatic(as)
- case initKindDynamic, initKindLocalCode:
- a = orderStmtInPlace(as, map[string][]*ir.Name{})
- a = walkstmt(a)
- init.Append(a)
- default:
- base.Fatalf("fixedlit: bad kind %d", kind)
- }
-
- }
-}
-
-func isSmallSliceLit(n *ir.CompLitExpr) bool {
- if n.Op() != ir.OSLICELIT {
- return false
- }
-
- return n.Type().Elem().Width == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Width
-}
-
-func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
- // make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type().Elem(), n.Len)
- types.CalcSize(t)
-
- if ctxt == inNonInitFunction {
- // put everything into static array
- vstat := staticname(t)
-
- fixedlit(ctxt, initKindStatic, n, vstat, init)
- fixedlit(ctxt, initKindDynamic, n, vstat, init)
-
- // copy static to slice
- var_ = typecheck.AssignExpr(var_)
- name, offset, ok := stataddr(var_)
- if !ok || name.Class_ != ir.PEXTERN {
- base.Fatalf("slicelit: %v", var_)
- }
- staticdata.InitSlice(name, offset, vstat, t.NumElem())
- return
- }
-
- // recipe for var = []t{...}
- // 1. make a static array
- // var vstat [...]t
- // 2. assign (data statements) the constant part
- // vstat = constpart{}
- // 3. make an auto pointer to array and allocate heap to it
- // var vauto *[...]t = new([...]t)
- // 4. copy the static array to the auto array
- // *vauto = vstat
- // 5. for each dynamic part assign to the array
- // vauto[i] = dynamic part
- // 6. assign slice of allocated heap to var
- // var = vauto[:]
- //
- // an optimization is done if there is no constant part
- // 3. var vauto *[...]t = new([...]t)
- // 5. vauto[i] = dynamic part
- // 6. var = vauto[:]
-
- // if the literal contains constants,
- // make static initialized array (1),(2)
- var vstat ir.Node
-
- mode := getdyn(n, true)
- if mode&initConst != 0 && !isSmallSliceLit(n) {
- if ctxt == inInitFunction {
- vstat = readonlystaticname(t)
- } else {
- vstat = staticname(t)
- }
- fixedlit(ctxt, initKindStatic, n, vstat, init)
- }
-
- // make new auto *array (3 declare)
- vauto := typecheck.Temp(types.NewPtr(t))
-
- // set auto to point at new temp or heap (3 assign)
- var a ir.Node
- if x := n.Prealloc; x != nil {
- // temp allocated during order.go for dddarg
- if !types.Identical(t, x.Type()) {
- panic("dotdotdot base type does not match order's assigned type")
- }
-
- if vstat == nil {
- a = ir.NewAssignStmt(base.Pos, x, nil)
- a = typecheck.Stmt(a)
- init.Append(a) // zero new temp
- } else {
- // Declare that we're about to initialize all of x.
- // (Which happens at the *vauto = vstat below.)
- init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x))
- }
-
- a = typecheck.NodAddr(x)
- } else if n.Esc() == ir.EscNone {
- a = typecheck.Temp(t)
- if vstat == nil {
- a = ir.NewAssignStmt(base.Pos, typecheck.Temp(t), nil)
- a = typecheck.Stmt(a)
- init.Append(a) // zero new temp
- a = a.(*ir.AssignStmt).X
- } else {
- init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a))
- }
-
- a = typecheck.NodAddr(a)
- } else {
- a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
- }
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a))
-
- if vstat != nil {
- // copy static to heap (4)
- a = ir.NewStarExpr(base.Pos, vauto)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat))
- }
-
- // put dynamics into array (5)
- var index int64
- for _, value := range n.List {
- if value.Op() == ir.OKEY {
- kv := value.(*ir.KeyExpr)
- index = typecheck.IndexConst(kv.Key)
- if index < 0 {
- base.Fatalf("slicelit: invalid index %v", kv.Key)
- }
- value = kv.Value
- }
- a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(index))
- a.SetBounded(true)
- index++
-
- // TODO need to check bounds?
-
- switch value.Op() {
- case ir.OSLICELIT:
- break
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- value := value.(*ir.CompLitExpr)
- k := initKindDynamic
- if vstat == nil {
- // Generate both static and dynamic initializations.
- // See issue #31987.
- k = initKindLocalCode
- }
- fixedlit(ctxt, k, value, a, init)
- continue
- }
-
- if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value
- continue
- }
-
- // build list of vauto[c] = expr
- ir.SetPos(value)
- as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value))
- as = orderStmtInPlace(as, map[string][]*ir.Name{})
- as = walkstmt(as)
- init.Append(as)
- }
-
- // make slice out of heap (6)
- a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto))
-
- a = typecheck.Stmt(a)
- a = orderStmtInPlace(a, map[string][]*ir.Name{})
- a = walkstmt(a)
- init.Append(a)
-}
-
-func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
- // make the map var
- a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil)
- a.SetEsc(n.Esc())
- a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(int64(len(n.List)))}
- litas(m, a, init)
-
- entries := n.List
-
- // The order pass already removed any dynamic (runtime-computed) entries.
- // All remaining entries are static. Double-check that.
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
- if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
- base.Fatalf("maplit: entry is not a literal: %v", r)
- }
- }
-
- if len(entries) > 25 {
- // For a large number of entries, put them in an array and loop.
-
- // build types [count]Tindex and [count]Tvalue
- tk := types.NewArray(n.Type().Key(), int64(len(entries)))
- te := types.NewArray(n.Type().Elem(), int64(len(entries)))
-
- tk.SetNoalg(true)
- te.SetNoalg(true)
-
- types.CalcSize(tk)
- types.CalcSize(te)
-
- // make and initialize static arrays
- vstatk := readonlystaticname(tk)
- vstate := readonlystaticname(te)
-
- datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
- datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
- datak.List.Append(r.Key)
- datae.List.Append(r.Value)
- }
- fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
- fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
-
- // loop adding structure elements to map
- // for i = 0; i < len(vstatk); i++ {
- // map[vstatk[i]] = vstate[i]
- // }
- i := typecheck.Temp(types.Types[types.TINT])
- rhs := ir.NewIndexExpr(base.Pos, vstate, i)
- rhs.SetBounded(true)
-
- kidx := ir.NewIndexExpr(base.Pos, vstatk, i)
- kidx.SetBounded(true)
- lhs := ir.NewIndexExpr(base.Pos, m, kidx)
-
- zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
- cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem()))
- incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
- body := ir.NewAssignStmt(base.Pos, lhs, rhs)
-
- loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil)
- loop.Body = []ir.Node{body}
- *loop.PtrInit() = []ir.Node{zero}
-
- appendWalkStmt(init, loop)
- return
- }
- // For a small number of entries, just add them directly.
-
- // Build list of var[c] = expr.
- // Use temporaries so that mapassign1 can have addressable key, elem.
- // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
- tmpkey := typecheck.Temp(m.Type().Key())
- tmpelem := typecheck.Temp(m.Type().Elem())
-
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
- index, elem := r.Key, r.Value
-
- ir.SetPos(index)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index))
-
- ir.SetPos(elem)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem))
-
- ir.SetPos(tmpelem)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem))
- }
-
- appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey))
- appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpelem))
-}
-
-func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
- t := n.Type()
- switch n.Op() {
- default:
- base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
-
- case ir.ONAME:
- n := n.(*ir.Name)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
-
- case ir.OMETHEXPR:
- n := n.(*ir.MethodExpr)
- anylit(n.FuncName(), var_, init)
-
- case ir.OPTRLIT:
- n := n.(*ir.AddrExpr)
- if !t.IsPtr() {
- base.Fatalf("anylit: not ptr")
- }
-
- var r ir.Node
- if n.Alloc != nil {
- // n.Right is stack temporary used as backing store.
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Alloc, nil)) // zero backing store, just in case (#18410)
- r = typecheck.NodAddr(n.Alloc)
- } else {
- r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
- r.SetEsc(n.Esc())
- }
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r))
-
- var_ = ir.NewStarExpr(base.Pos, var_)
- var_ = typecheck.AssignExpr(var_)
- anylit(n.X, var_, init)
-
- case ir.OSTRUCTLIT, ir.OARRAYLIT:
- n := n.(*ir.CompLitExpr)
- if !t.IsStruct() && !t.IsArray() {
- base.Fatalf("anylit: not struct/array")
- }
-
- if isSimpleName(var_) && len(n.List) > 4 {
- // lay out static data
- vstat := readonlystaticname(t)
-
- ctxt := inInitFunction
- if n.Op() == ir.OARRAYLIT {
- ctxt = inNonInitFunction
- }
- fixedlit(ctxt, initKindStatic, n, vstat, init)
-
- // copy static to var
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat))
-
- // add expressions to automatic
- fixedlit(inInitFunction, initKindDynamic, n, var_, init)
- break
- }
-
- var components int64
- if n.Op() == ir.OARRAYLIT {
- components = t.NumElem()
- } else {
- components = int64(t.NumFields())
- }
- // initialization of an array or struct with unspecified components (missing fields or arrays)
- if isSimpleName(var_) || int64(len(n.List)) < components {
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))
- }
-
- fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- slicelit(inInitFunction, n, var_, init)
-
- case ir.OMAPLIT:
- n := n.(*ir.CompLitExpr)
- if !t.IsMap() {
- base.Fatalf("anylit: not map")
- }
- maplit(n, var_, init)
- }
-}
-
-// oaslit handles special composite literal assignments.
-// It returns true if n's effects have been added to init,
-// in which case n should be dropped from the program by the caller.
-func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
- if n.X == nil || n.Y == nil {
- // not a special composite literal assignment
- return false
- }
- if n.X.Type() == nil || n.Y.Type() == nil {
- // not a special composite literal assignment
- return false
- }
- if !isSimpleName(n.X) {
- // not a special composite literal assignment
- return false
- }
- if !types.Identical(n.X.Type(), n.Y.Type()) {
- // not a special composite literal assignment
- return false
- }
-
- switch n.Y.Op() {
- default:
- // not a special composite literal assignment
- return false
-
- case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
- if refersToCommonName(n.X, n.Y) {
- // not a special composite literal assignment
- return false
- }
- anylit(n.Y, n.X, init)
- }
-
- return true
-}
-
-func getlit(lit ir.Node) int {
- if ir.IsSmallIntConst(lit) {
- return int(ir.Int64Val(lit))
- }
- return -1
-}
-
-// stataddr returns the static address of n, if n has one, or else nil.
-func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) {
- if n == nil {
- return nil, 0, false
- }
-
- switch n.Op() {
- case ir.ONAME:
- n := n.(*ir.Name)
- return n, 0, true
-
- case ir.OMETHEXPR:
- n := n.(*ir.MethodExpr)
- return stataddr(n.FuncName())
-
- case ir.ODOT:
- n := n.(*ir.SelectorExpr)
- if name, offset, ok = stataddr(n.X); !ok {
- break
- }
- offset += n.Offset
- return name, offset, true
-
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- if n.X.Type().IsSlice() {
- break
- }
- if name, offset, ok = stataddr(n.X); !ok {
- break
- }
- l := getlit(n.Index)
- if l < 0 {
- break
- }
-
- // Check for overflow.
- if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) {
- break
- }
- offset += int64(l) * n.Type().Width
- return name, offset, true
- }
-
- return nil, 0, false
-}
-
-func (s *InitSchedule) initplan(n ir.Node) {
- if s.initplans[n] != nil {
- return
- }
- p := new(InitPlan)
- s.initplans[n] = p
- switch n.Op() {
- default:
- base.Fatalf("initplan")
-
- case ir.OARRAYLIT, ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- var k int64
- for _, a := range n.List {
- if a.Op() == ir.OKEY {
- kv := a.(*ir.KeyExpr)
- k = typecheck.IndexConst(kv.Key)
- if k < 0 {
- base.Fatalf("initplan arraylit: invalid index %v", kv.Key)
- }
- a = kv.Value
- }
- s.addvalue(p, k*n.Type().Elem().Width, a)
- k++
- }
-
- case ir.OSTRUCTLIT:
- n := n.(*ir.CompLitExpr)
- for _, a := range n.List {
- if a.Op() != ir.OSTRUCTKEY {
- base.Fatalf("initplan structlit")
- }
- a := a.(*ir.StructKeyExpr)
- if a.Field.IsBlank() {
- continue
- }
- s.addvalue(p, a.Offset, a.Value)
- }
-
- case ir.OMAPLIT:
- n := n.(*ir.CompLitExpr)
- for _, a := range n.List {
- if a.Op() != ir.OKEY {
- base.Fatalf("initplan maplit")
- }
- a := a.(*ir.KeyExpr)
- s.addvalue(p, -1, a.Value)
- }
- }
-}
-
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) {
- // special case: zero can be dropped entirely
- if ir.IsZero(n) {
- return
- }
-
- // special case: inline struct and array (not slice) literals
- if isvaluelit(n) {
- s.initplan(n)
- q := s.initplans[n]
- for _, qe := range q.E {
- // qe is a copy; we are not modifying entries in q.E
- qe.Xoffset += xoffset
- p.E = append(p.E, qe)
- }
- return
- }
-
- // add to plan
- p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
-}
-
-func isvaluelit(n ir.Node) bool {
- return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
-}
-
-func genAsStatic(as *ir.AssignStmt) {
- if as.X.Type() == nil {
- base.Fatalf("genAsStatic as.Left not typechecked")
- }
-
- name, offset, ok := stataddr(as.X)
- if !ok || (name.Class_ != ir.PEXTERN && as.X != ir.BlankNode) {
- base.Fatalf("genAsStatic: lhs %v", as.X)
- }
-
- switch r := as.Y; r.Op() {
- case ir.OLITERAL:
- litsym(name, offset, r, int(r.Type().Width))
- return
- case ir.OMETHEXPR:
- r := r.(*ir.MethodExpr)
- staticdata.InitFunc(name, offset, r.FuncName())
- return
- case ir.ONAME:
- r := r.(*ir.Name)
- if r.Offset_ != 0 {
- base.Fatalf("genAsStatic %+v", as)
- }
- if r.Class_ == ir.PFUNC {
- staticdata.InitFunc(name, offset, r)
- return
- }
- }
- base.Fatalf("genAsStatic: rhs %v", as.Y)
-}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
deleted file mode 100644
index 17bbd1c3a2..0000000000
--- a/src/cmd/compile/internal/gc/subr.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/ssagen"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "fmt"
-)
-
-// backingArrayPtrLen extracts the pointer and length from a slice or string.
-// This constructs two nodes referring to n, so n must be a cheapexpr.
-func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
- var init ir.Nodes
- c := cheapexpr(n, &init)
- if c != n || len(init) != 0 {
- base.Fatalf("backingArrayPtrLen not cheap: %v", n)
- }
- ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
- if n.Type().IsString() {
- ptr.SetType(types.Types[types.TUINT8].PtrTo())
- } else {
- ptr.SetType(n.Type().Elem().PtrTo())
- }
- length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
- length.SetType(types.Types[types.TINT])
- return ptr, length
-}
-
-// updateHasCall checks whether expression n contains any function
-// calls and sets the n.HasCall flag if so.
-func updateHasCall(n ir.Node) {
- if n == nil {
- return
- }
- n.SetHasCall(calcHasCall(n))
-}
-
-func calcHasCall(n ir.Node) bool {
- if len(n.Init()) != 0 {
- // TODO(mdempsky): This seems overly conservative.
- return true
- }
-
- switch n.Op() {
- default:
- base.Fatalf("calcHasCall %+v", n)
- panic("unreachable")
-
- case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
- if n.HasCall() {
- base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
- }
- return false
- case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- return true
- case ir.OANDAND, ir.OOROR:
- // hard with instrumented code
- n := n.(*ir.LogicalExpr)
- if base.Flag.Cfg.Instrumenting {
- return true
- }
- return n.X.HasCall() || n.Y.HasCall()
- case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
- ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
- // These ops might panic, make sure they are done
- // before we start marshaling args for a call. See issue 16760.
- return true
-
- // When using soft-float, these ops might be rewritten to function calls
- // so we ensure they are evaluated first.
- case ir.OADD, ir.OSUB, ir.OMUL:
- n := n.(*ir.BinaryExpr)
- if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
- return true
- }
- return n.X.HasCall() || n.Y.HasCall()
- case ir.ONEG:
- n := n.(*ir.UnaryExpr)
- if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
- return true
- }
- return n.X.HasCall()
- case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- n := n.(*ir.BinaryExpr)
- if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
- return true
- }
- return n.X.HasCall() || n.Y.HasCall()
- case ir.OCONV:
- n := n.(*ir.ConvExpr)
- if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
- return true
- }
- return n.X.HasCall()
-
- case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
- n := n.(*ir.BinaryExpr)
- return n.X.HasCall() || n.Y.HasCall()
-
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- return n.X.HasCall() || n.Y != nil && n.Y.HasCall()
-
- case ir.OADDR:
- n := n.(*ir.AddrExpr)
- return n.X.HasCall()
- case ir.OPAREN:
- n := n.(*ir.ParenExpr)
- return n.X.HasCall()
- case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
- ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
- ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
- ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
- n := n.(*ir.UnaryExpr)
- return n.X.HasCall()
- case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- n := n.(*ir.SelectorExpr)
- return n.X.HasCall()
-
- case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
- return false
-
- // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
- case ir.OADDSTR:
- // TODO(rsc): This used to check left and right, which are not part of OADDSTR.
- return false
- case ir.OBLOCK:
- // TODO(rsc): Surely the block's statements matter.
- return false
- case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
- // TODO(rsc): Some conversions are themselves calls, no?
- n := n.(*ir.ConvExpr)
- return n.X.HasCall()
- case ir.ODOTTYPE2:
- // TODO(rsc): Shouldn't this be up with ODOTTYPE above?
- n := n.(*ir.TypeAssertExpr)
- return n.X.HasCall()
- case ir.OSLICEHEADER:
- // TODO(rsc): What about len and cap?
- n := n.(*ir.SliceHeaderExpr)
- return n.Ptr.HasCall()
- case ir.OAS2DOTTYPE, ir.OAS2FUNC:
- // TODO(rsc): Surely we need to check List and Rlist.
- return false
- }
-}
-
-func badtype(op ir.Op, tl, tr *types.Type) {
- var s string
- if tl != nil {
- s += fmt.Sprintf("\n\t%v", tl)
- }
- if tr != nil {
- s += fmt.Sprintf("\n\t%v", tr)
- }
-
- // common mistake: *struct and *interface.
- if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
- if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
- s += "\n\t(*struct vs *interface)"
- } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
- s += "\n\t(*interface vs *struct)"
- }
- }
-
- base.Errorf("illegal types for operand: %v%s", op, s)
-}
-
-// brcom returns !(op).
-// For example, brcom(==) is !=.
-func brcom(op ir.Op) ir.Op {
- switch op {
- case ir.OEQ:
- return ir.ONE
- case ir.ONE:
- return ir.OEQ
- case ir.OLT:
- return ir.OGE
- case ir.OGT:
- return ir.OLE
- case ir.OLE:
- return ir.OGT
- case ir.OGE:
- return ir.OLT
- }
- base.Fatalf("brcom: no com for %v\n", op)
- return op
-}
-
-// brrev returns reverse(op).
-// For example, Brrev(<) is >.
-func brrev(op ir.Op) ir.Op {
- switch op {
- case ir.OEQ:
- return ir.OEQ
- case ir.ONE:
- return ir.ONE
- case ir.OLT:
- return ir.OGT
- case ir.OGT:
- return ir.OLT
- case ir.OLE:
- return ir.OGE
- case ir.OGE:
- return ir.OLE
- }
- base.Fatalf("brrev: no rev for %v\n", op)
- return op
-}
-
-// return side effect-free n, appending side effects to init.
-// result is assignable if n is.
-func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
- if n == nil {
- return nil
- }
-
- if len(n.Init()) != 0 {
- walkstmtlist(n.Init())
- init.Append(n.PtrInit().Take()...)
- }
-
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
- return n
-
- case ir.OLEN, ir.OCAP:
- n := n.(*ir.UnaryExpr)
- l := safeexpr(n.X, init)
- if l == n.X {
- return n
- }
- a := ir.Copy(n).(*ir.UnaryExpr)
- a.X = l
- return walkexpr(typecheck.Expr(a), init)
-
- case ir.ODOT, ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- l := safeexpr(n.X, init)
- if l == n.X {
- return n
- }
- a := ir.Copy(n).(*ir.SelectorExpr)
- a.X = l
- return walkexpr(typecheck.Expr(a), init)
-
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- l := safeexpr(n.X, init)
- if l == n.X {
- return n
- }
- a := ir.Copy(n).(*ir.StarExpr)
- a.X = l
- return walkexpr(typecheck.Expr(a), init)
-
- case ir.OINDEX, ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- l := safeexpr(n.X, init)
- r := safeexpr(n.Index, init)
- if l == n.X && r == n.Index {
- return n
- }
- a := ir.Copy(n).(*ir.IndexExpr)
- a.X = l
- a.Index = r
- return walkexpr(typecheck.Expr(a), init)
-
- case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- if isStaticCompositeLiteral(n) {
- return n
- }
- }
-
- // make a copy; must not be used as an lvalue
- if ir.IsAssignable(n) {
- base.Fatalf("missing lvalue case in safeexpr: %v", n)
- }
- return cheapexpr(n, init)
-}
-
-func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
- l := typecheck.Temp(t)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
- return l
-}
-
-// return side-effect free and cheap n, appending side effects to init.
-// result may not be assignable.
-func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL:
- return n
- }
-
- return copyexpr(n, n.Type(), init)
-}
-
-// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab ir.Node) ir.Node {
- typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil)
- typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
- typ.SetTypecheck(1)
- typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab
- typ.SetBounded(true) // guaranteed not to fault
- return typ
-}
-
-// ifaceData loads the data field from an interface.
-// The concrete type must be known to have type t.
-// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
- if t.IsInterface() {
- base.Fatalf("ifaceData interface: %v", t)
- }
- ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
- if types.IsDirectIface(t) {
- ptr.SetType(t)
- ptr.SetTypecheck(1)
- return ptr
- }
- ptr.SetType(types.NewPtr(t))
- ptr.SetTypecheck(1)
- ind := ir.NewStarExpr(pos, ptr)
- ind.SetType(t)
- ind.SetTypecheck(1)
- ind.SetBounded(true)
- return ind
-}
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
deleted file mode 100644
index 9ffa8b67bb..0000000000
--- a/src/cmd/compile/internal/gc/swt.go
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "go/constant"
- "go/token"
- "sort"
-)
-
-// walkswitch walks a switch statement.
-func walkswitch(sw *ir.SwitchStmt) {
- // Guard against double walk, see #25776.
- if len(sw.Cases) == 0 && len(sw.Compiled) > 0 {
- return // Was fatal, but eliminating every possible source of double-walking is hard
- }
-
- if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
- walkTypeSwitch(sw)
- } else {
- walkExprSwitch(sw)
- }
-}
-
-// walkExprSwitch generates an AST implementing sw. sw is an
-// expression switch.
-func walkExprSwitch(sw *ir.SwitchStmt) {
- lno := ir.SetPos(sw)
-
- cond := sw.Tag
- sw.Tag = nil
-
- // convert switch {...} to switch true {...}
- if cond == nil {
- cond = ir.NewBool(true)
- cond = typecheck.Expr(cond)
- cond = typecheck.DefaultLit(cond, nil)
- }
-
- // Given "switch string(byteslice)",
- // with all cases being side-effect free,
- // use a zero-cost alias of the byte slice.
- // Do this before calling walkexpr on cond,
- // because walkexpr will lower the string
- // conversion into a runtime call.
- // See issue 24937 for more discussion.
- if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
- cond := cond.(*ir.ConvExpr)
- cond.SetOp(ir.OBYTES2STRTMP)
- }
-
- cond = walkexpr(cond, sw.PtrInit())
- if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
- cond = copyexpr(cond, cond.Type(), &sw.Compiled)
- }
-
- base.Pos = lno
-
- s := exprSwitch{
- exprname: cond,
- }
-
- var defaultGoto ir.Node
- var body ir.Nodes
- for _, ncase := range sw.Cases {
- ncase := ncase.(*ir.CaseStmt)
- label := typecheck.AutoLabel(".s")
- jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
-
- // Process case dispatch.
- if len(ncase.List) == 0 {
- if defaultGoto != nil {
- base.Fatalf("duplicate default case not detected during typechecking")
- }
- defaultGoto = jmp
- }
-
- for _, n1 := range ncase.List {
- s.Add(ncase.Pos(), n1, jmp)
- }
-
- // Process body.
- body.Append(ir.NewLabelStmt(ncase.Pos(), label))
- body.Append(ncase.Body...)
- if fall, pos := endsInFallthrough(ncase.Body); !fall {
- br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
- br.SetPos(pos)
- body.Append(br)
- }
- }
- sw.Cases.Set(nil)
-
- if defaultGoto == nil {
- br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
- br.SetPos(br.Pos().WithNotStmt())
- defaultGoto = br
- }
-
- s.Emit(&sw.Compiled)
- sw.Compiled.Append(defaultGoto)
- sw.Compiled.Append(body.Take()...)
- walkstmtlist(sw.Compiled)
-}
-
-// An exprSwitch walks an expression switch.
-type exprSwitch struct {
- exprname ir.Node // value being switched on
-
- done ir.Nodes
- clauses []exprClause
-}
-
-type exprClause struct {
- pos src.XPos
- lo, hi ir.Node
- jmp ir.Node
-}
-
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
- c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
- if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
- s.clauses = append(s.clauses, c)
- return
- }
-
- s.flush()
- s.clauses = append(s.clauses, c)
- s.flush()
-}
-
-func (s *exprSwitch) Emit(out *ir.Nodes) {
- s.flush()
- out.Append(s.done.Take()...)
-}
-
-func (s *exprSwitch) flush() {
- cc := s.clauses
- s.clauses = nil
- if len(cc) == 0 {
- return
- }
-
- // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
- // The code below is structured to implicitly handle this case
- // (e.g., sort.Slice doesn't need to invoke the less function
- // when there's only a single slice element).
-
- if s.exprname.Type().IsString() && len(cc) >= 2 {
- // Sort strings by length and then by value. It is
- // much cheaper to compare lengths than values, and
- // all we need here is consistency. We respect this
- // sorting below.
- sort.Slice(cc, func(i, j int) bool {
- si := ir.StringVal(cc[i].lo)
- sj := ir.StringVal(cc[j].lo)
- if len(si) != len(sj) {
- return len(si) < len(sj)
- }
- return si < sj
- })
-
- // runLen returns the string length associated with a
- // particular run of exprClauses.
- runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
-
- // Collapse runs of consecutive strings with the same length.
- var runs [][]exprClause
- start := 0
- for i := 1; i < len(cc); i++ {
- if runLen(cc[start:]) != runLen(cc[i:]) {
- runs = append(runs, cc[start:i])
- start = i
- }
- }
- runs = append(runs, cc[start:])
-
- // Perform two-level binary search.
- binarySearch(len(runs), &s.done,
- func(i int) ir.Node {
- return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(runs[i-1])))
- },
- func(i int, nif *ir.IfStmt) {
- run := runs[i]
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(run)))
- s.search(run, &nif.Body)
- },
- )
- return
- }
-
- sort.Slice(cc, func(i, j int) bool {
- return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
- })
-
- // Merge consecutive integer cases.
- if s.exprname.Type().IsInteger() {
- merged := cc[:1]
- for _, c := range cc[1:] {
- last := &merged[len(merged)-1]
- if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) {
- last.hi = c.lo
- } else {
- merged = append(merged, c)
- }
- }
- cc = merged
- }
-
- s.search(cc, &s.done)
-}
-
-func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
- binarySearch(len(cc), out,
- func(i int) ir.Node {
- return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi)
- },
- func(i int, nif *ir.IfStmt) {
- c := &cc[i]
- nif.Cond = c.test(s.exprname)
- nif.Body = []ir.Node{c.jmp}
- },
- )
-}
-
-func (c *exprClause) test(exprname ir.Node) ir.Node {
- // Integer range.
- if c.hi != c.lo {
- low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo)
- high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi)
- return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high)
- }
-
- // Optimize "switch true { ...}" and "switch false { ... }".
- if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
- if ir.BoolVal(exprname) {
- return c.lo
- } else {
- return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo)
- }
- }
-
- return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
-}
-
-func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
- // In theory, we could be more aggressive, allowing any
- // side-effect-free expressions in cases, but it's a bit
- // tricky because some of that information is unavailable due
- // to the introduction of temporaries during order.
- // Restricting to constants is simple and probably powerful
- // enough.
-
- for _, ncase := range sw.Cases {
- ncase := ncase.(*ir.CaseStmt)
- for _, v := range ncase.List {
- if v.Op() != ir.OLITERAL {
- return false
- }
- }
- }
- return true
-}
-
-// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
-func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
- // Search backwards for the index of the fallthrough
- // statement. Do not assume it'll be in the last
- // position, since in some cases (e.g. when the statement
- // list contains autotmp_ variables), one or more OVARKILL
- // nodes will be at the end of the list.
-
- i := len(stmts) - 1
- for i >= 0 && stmts[i].Op() == ir.OVARKILL {
- i--
- }
- if i < 0 {
- return false, src.NoXPos
- }
- return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
-}
-
-// walkTypeSwitch generates an AST that implements sw, where sw is a
-// type switch.
-func walkTypeSwitch(sw *ir.SwitchStmt) {
- var s typeSwitch
- s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
- sw.Tag = nil
-
- s.facename = walkexpr(s.facename, sw.PtrInit())
- s.facename = copyexpr(s.facename, s.facename.Type(), &sw.Compiled)
- s.okname = typecheck.Temp(types.Types[types.TBOOL])
-
- // Get interface descriptor word.
- // For empty interfaces this will be the type.
- // For non-empty interfaces this will be the itab.
- itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename)
-
- // For empty interfaces, do:
- // if e._type == nil {
- // do nil case if it exists, otherwise default
- // }
- // h := e._type.hash
- // Use a similar strategy for non-empty interfaces.
- ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
- ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil())
- base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
- ifNil.Cond = typecheck.Expr(ifNil.Cond)
- ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
- // ifNil.Nbody assigned at end.
- sw.Compiled.Append(ifNil)
-
- // Load hash from type or itab.
- dotHash := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil)
- dotHash.SetType(types.Types[types.TUINT32])
- dotHash.SetTypecheck(1)
- if s.facename.Type().IsEmptyInterface() {
- dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime._type
- } else {
- dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime.itab
- }
- dotHash.SetBounded(true) // guaranteed not to fault
- s.hashname = copyexpr(dotHash, dotHash.Type(), &sw.Compiled)
-
- br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
- var defaultGoto, nilGoto ir.Node
- var body ir.Nodes
- for _, ncase := range sw.Cases {
- ncase := ncase.(*ir.CaseStmt)
- var caseVar ir.Node
- if len(ncase.Vars) != 0 {
- caseVar = ncase.Vars[0]
- }
-
- // For single-type cases with an interface type,
- // we initialize the case variable as part of the type assertion.
- // In other cases, we initialize it in the body.
- var singleType *types.Type
- if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE {
- singleType = ncase.List[0].Type()
- }
- caseVarInitialized := false
-
- label := typecheck.AutoLabel(".s")
- jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
-
- if len(ncase.List) == 0 { // default:
- if defaultGoto != nil {
- base.Fatalf("duplicate default case not detected during typechecking")
- }
- defaultGoto = jmp
- }
-
- for _, n1 := range ncase.List {
- if ir.IsNil(n1) { // case nil:
- if nilGoto != nil {
- base.Fatalf("duplicate nil case not detected during typechecking")
- }
- nilGoto = jmp
- continue
- }
-
- if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
- caseVarInitialized = true
- } else {
- s.Add(ncase.Pos(), n1.Type(), nil, jmp)
- }
- }
-
- body.Append(ir.NewLabelStmt(ncase.Pos(), label))
- if caseVar != nil && !caseVarInitialized {
- val := s.facename
- if singleType != nil {
- // We have a single concrete type. Extract the data.
- if singleType.IsInterface() {
- base.Fatalf("singleType interface should have been handled in Add")
- }
- val = ifaceData(ncase.Pos(), s.facename, singleType)
- }
- l := []ir.Node{
- ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
- ir.NewAssignStmt(ncase.Pos(), caseVar, val),
- }
- typecheck.Stmts(l)
- body.Append(l...)
- }
- body.Append(ncase.Body...)
- body.Append(br)
- }
- sw.Cases.Set(nil)
-
- if defaultGoto == nil {
- defaultGoto = br
- }
- if nilGoto == nil {
- nilGoto = defaultGoto
- }
- ifNil.Body = []ir.Node{nilGoto}
-
- s.Emit(&sw.Compiled)
- sw.Compiled.Append(defaultGoto)
- sw.Compiled.Append(body.Take()...)
-
- walkstmtlist(sw.Compiled)
-}
-
-// A typeSwitch walks a type switch.
-type typeSwitch struct {
- // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename ir.Node // value being type-switched on
- hashname ir.Node // type hash of the value being type-switched on
- okname ir.Node // boolean used for comma-ok type assertions
-
- done ir.Nodes
- clauses []typeClause
-}
-
-type typeClause struct {
- hash uint32
- body ir.Nodes
-}
-
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
- var body ir.Nodes
- if caseVar != nil {
- l := []ir.Node{
- ir.NewDecl(pos, ir.ODCL, caseVar),
- ir.NewAssignStmt(pos, caseVar, nil),
- }
- typecheck.Stmts(l)
- body.Append(l...)
- } else {
- caseVar = ir.BlankNode
- }
-
- // cv, ok = iface.(type)
- as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
- as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
- dot := ir.NewTypeAssertExpr(pos, s.facename, nil)
- dot.SetType(typ) // iface.(type)
- as.Rhs = []ir.Node{dot}
- appendWalkStmt(&body, as)
-
- // if ok { goto label }
- nif := ir.NewIfStmt(pos, nil, nil, nil)
- nif.Cond = s.okname
- nif.Body = []ir.Node{jmp}
- body.Append(nif)
-
- if !typ.IsInterface() {
- s.clauses = append(s.clauses, typeClause{
- hash: types.TypeHash(typ),
- body: body,
- })
- return
- }
-
- s.flush()
- s.done.Append(body.Take()...)
-}
-
-func (s *typeSwitch) Emit(out *ir.Nodes) {
- s.flush()
- out.Append(s.done.Take()...)
-}
-
-func (s *typeSwitch) flush() {
- cc := s.clauses
- s.clauses = nil
- if len(cc) == 0 {
- return
- }
-
- sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
-
- // Combine adjacent cases with the same hash.
- merged := cc[:1]
- for _, c := range cc[1:] {
- last := &merged[len(merged)-1]
- if last.hash == c.hash {
- last.body.Append(c.body.Take()...)
- } else {
- merged = append(merged, c)
- }
- }
- cc = merged
-
- binarySearch(len(cc), &s.done,
- func(i int) ir.Node {
- return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(int64(cc[i-1].hash)))
- },
- func(i int, nif *ir.IfStmt) {
- // TODO(mdempsky): Omit hash equality check if
- // there's only one type.
- c := cc[i]
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(int64(c.hash)))
- nif.Body.Append(c.body.Take()...)
- },
- )
-}
-
-// binarySearch constructs a binary search tree for handling n cases,
-// and appends it to out. It's used for efficiently implementing
-// switch statements.
-//
-// less(i) should return a boolean expression. If it evaluates true,
-// then cases before i will be tested; otherwise, cases i and later.
-//
-// leaf(i, nif) should setup nif (an OIF node) to test case i. In
-// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
- const binarySearchMin = 4 // minimum number of cases for binary search
-
- var do func(lo, hi int, out *ir.Nodes)
- do = func(lo, hi int, out *ir.Nodes) {
- n := hi - lo
- if n < binarySearchMin {
- for i := lo; i < hi; i++ {
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
- leaf(i, nif)
- base.Pos = base.Pos.WithNotStmt()
- nif.Cond = typecheck.Expr(nif.Cond)
- nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
- out.Append(nif)
- out = &nif.Else
- }
- return
- }
-
- half := lo + n/2
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
- nif.Cond = less(half)
- base.Pos = base.Pos.WithNotStmt()
- nif.Cond = typecheck.Expr(nif.Cond)
- nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
- do(lo, half, &nif.Body)
- do(half, hi, &nif.Else)
- out.Append(nif)
- }
-
- do(0, n, out)
-}
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
deleted file mode 100644
index f86dbba2c9..0000000000
--- a/src/cmd/compile/internal/gc/walk.go
+++ /dev/null
@@ -1,4039 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/escape"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/reflectdata"
- "cmd/compile/internal/ssagen"
- "cmd/compile/internal/staticdata"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/objabi"
- "cmd/internal/src"
- "cmd/internal/sys"
- "encoding/binary"
- "errors"
- "fmt"
- "go/constant"
- "go/token"
- "strings"
-)
-
-// The constant is known to runtime.
-const tmpstringbufsize = 32
-const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-
-func walk(fn *ir.Func) {
- ir.CurFunc = fn
- errorsBefore := base.Errors()
- order(fn)
- if base.Errors() > errorsBefore {
- return
- }
-
- if base.Flag.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
- ir.DumpList(s, ir.CurFunc.Body)
- }
-
- lno := base.Pos
-
- // Final typecheck for any unused variables.
- for i, ln := range fn.Dcl {
- if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) {
- ln = typecheck.AssignExpr(ln).(*ir.Name)
- fn.Dcl[i] = ln
- }
- }
-
- // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Dcl {
- if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() {
- ln.Defn.(*ir.TypeSwitchGuard).Used = true
- }
- }
-
- for _, ln := range fn.Dcl {
- if ln.Op() != ir.ONAME || (ln.Class_ != ir.PAUTO && ln.Class_ != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() {
- continue
- }
- if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
- if defn.Used {
- continue
- }
- base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym())
- defn.Used = true // suppress repeats
- } else {
- base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
- }
- }
-
- base.Pos = lno
- if base.Errors() > errorsBefore {
- return
- }
- walkstmtlist(ir.CurFunc.Body)
- if base.Flag.W != 0 {
- s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
- ir.DumpList(s, ir.CurFunc.Body)
- }
-
- zeroResults()
- heapmoves()
- if base.Flag.W != 0 && len(ir.CurFunc.Enter) > 0 {
- s := fmt.Sprintf("enter %v", ir.CurFunc.Sym())
- ir.DumpList(s, ir.CurFunc.Enter)
- }
-
- if base.Flag.Cfg.Instrumenting {
- instrument(fn)
- }
-}
-
-func walkstmtlist(s []ir.Node) {
- for i := range s {
- s[i] = walkstmt(s[i])
- }
-}
-
-func paramoutheap(fn *ir.Func) bool {
- for _, ln := range fn.Dcl {
- switch ln.Class_ {
- case ir.PPARAMOUT:
- if ir.IsParamStackCopy(ln) || ln.Addrtaken() {
- return true
- }
-
- case ir.PAUTO:
- // stop early - parameters are over
- return false
- }
- }
-
- return false
-}
-
-// The result of walkstmt MUST be assigned back to n, e.g.
-// n.Left = walkstmt(n.Left)
-func walkstmt(n ir.Node) ir.Node {
- if n == nil {
- return n
- }
-
- ir.SetPos(n)
-
- walkstmtlist(n.Init())
-
- switch n.Op() {
- default:
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- base.Errorf("%v is not a top level statement", n.Sym())
- } else {
- base.Errorf("%v is not a top level statement", n.Op())
- }
- ir.Dump("nottop", n)
- return n
-
- case ir.OAS,
- ir.OASOP,
- ir.OAS2,
- ir.OAS2DOTTYPE,
- ir.OAS2RECV,
- ir.OAS2FUNC,
- ir.OAS2MAPR,
- ir.OCLOSE,
- ir.OCOPY,
- ir.OCALLMETH,
- ir.OCALLINTER,
- ir.OCALL,
- ir.OCALLFUNC,
- ir.ODELETE,
- ir.OSEND,
- ir.OPRINT,
- ir.OPRINTN,
- ir.OPANIC,
- ir.ORECOVER,
- ir.OGETG:
- if n.Typecheck() == 0 {
- base.Fatalf("missing typecheck: %+v", n)
- }
- init := n.Init()
- n.PtrInit().Set(nil)
- n = walkexpr(n, &init)
- if n.Op() == ir.ONAME {
- // copy rewrote to a statement list and a temp for the length.
- // Throw away the temp to avoid plain values as statements.
- n = ir.NewBlockStmt(n.Pos(), init)
- init.Set(nil)
- }
- if len(init) > 0 {
- switch n.Op() {
- case ir.OAS, ir.OAS2, ir.OBLOCK:
- n.PtrInit().Prepend(init...)
-
- default:
- init.Append(n)
- n = ir.NewBlockStmt(n.Pos(), init)
- }
- }
- return n
-
- // special case for a receive where we throw away
- // the value received.
- case ir.ORECV:
- n := n.(*ir.UnaryExpr)
- if n.Typecheck() == 0 {
- base.Fatalf("missing typecheck: %+v", n)
- }
- init := n.Init()
- n.PtrInit().Set(nil)
-
- n.X = walkexpr(n.X, &init)
- call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
- return ir.InitExpr(init, call)
-
- case ir.OBREAK,
- ir.OCONTINUE,
- ir.OFALL,
- ir.OGOTO,
- ir.OLABEL,
- ir.ODCLCONST,
- ir.ODCLTYPE,
- ir.OCHECKNIL,
- ir.OVARDEF,
- ir.OVARKILL,
- ir.OVARLIVE:
- return n
-
- case ir.ODCL:
- n := n.(*ir.Decl)
- v := n.X.(*ir.Name)
- if v.Class_ == ir.PAUTOHEAP {
- if base.Flag.CompilingRuntime {
- base.Errorf("%v escapes to heap, not allowed in runtime", v)
- }
- nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type()))
- nn.Def = true
- return walkstmt(typecheck.Stmt(nn))
- }
- return n
-
- case ir.OBLOCK:
- n := n.(*ir.BlockStmt)
- walkstmtlist(n.List)
- return n
-
- case ir.OCASE:
- base.Errorf("case statement out of place")
- panic("unreachable")
-
- case ir.ODEFER:
- n := n.(*ir.GoDeferStmt)
- ir.CurFunc.SetHasDefer(true)
- ir.CurFunc.NumDefers++
- if ir.CurFunc.NumDefers > maxOpenDefers {
- // Don't allow open-coded defers if there are more than
- // 8 defers in the function, since we use a single
- // byte to record active defers.
- ir.CurFunc.SetOpenCodedDeferDisallowed(true)
- }
- if n.Esc() != ir.EscNever {
- // If n.Esc is not EscNever, then this defer occurs in a loop,
- // so open-coded defers cannot be used in this function.
- ir.CurFunc.SetOpenCodedDeferDisallowed(true)
- }
- fallthrough
- case ir.OGO:
- n := n.(*ir.GoDeferStmt)
- var init ir.Nodes
- switch call := n.Call; call.Op() {
- case ir.OPRINT, ir.OPRINTN:
- call := call.(*ir.CallExpr)
- n.Call = wrapCall(call, &init)
-
- case ir.ODELETE:
- call := call.(*ir.CallExpr)
- if mapfast(call.Args[0].Type()) == mapslow {
- n.Call = wrapCall(call, &init)
- } else {
- n.Call = walkexpr(call, &init)
- }
-
- case ir.OCOPY:
- call := call.(*ir.BinaryExpr)
- n.Call = copyany(call, &init, true)
-
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- call := call.(*ir.CallExpr)
- if len(call.Body) > 0 {
- n.Call = wrapCall(call, &init)
- } else {
- n.Call = walkexpr(call, &init)
- }
-
- default:
- n.Call = walkexpr(call, &init)
- }
- if len(init) > 0 {
- init.Append(n)
- return ir.NewBlockStmt(n.Pos(), init)
- }
- return n
-
- case ir.OFOR, ir.OFORUNTIL:
- n := n.(*ir.ForStmt)
- if n.Cond != nil {
- walkstmtlist(n.Cond.Init())
- init := n.Cond.Init()
- n.Cond.PtrInit().Set(nil)
- n.Cond = walkexpr(n.Cond, &init)
- n.Cond = ir.InitExpr(init, n.Cond)
- }
-
- n.Post = walkstmt(n.Post)
- if n.Op() == ir.OFORUNTIL {
- walkstmtlist(n.Late)
- }
- walkstmtlist(n.Body)
- return n
-
- case ir.OIF:
- n := n.(*ir.IfStmt)
- n.Cond = walkexpr(n.Cond, n.PtrInit())
- walkstmtlist(n.Body)
- walkstmtlist(n.Else)
- return n
-
- case ir.ORETURN:
- n := n.(*ir.ReturnStmt)
- ir.CurFunc.NumReturns++
- if len(n.Results) == 0 {
- return n
- }
- if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) {
- // assign to the function out parameters,
- // so that ascompatee can fix up conflicts
- var rl []ir.Node
-
- for _, ln := range ir.CurFunc.Dcl {
- cl := ln.Class_
- if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
- break
- }
- if cl == ir.PPARAMOUT {
- var ln ir.Node = ln
- if ir.IsParamStackCopy(ln) {
- ln = walkexpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil)
- }
- rl = append(rl, ln)
- }
- }
-
- if got, want := len(n.Results), len(rl); got != want {
- // order should have rewritten multi-value function calls
- // with explicit OAS2FUNC nodes.
- base.Fatalf("expected %v return arguments, have %v", want, got)
- }
-
- // move function calls out, to make ascompatee's job easier.
- walkexprlistsafe(n.Results, n.PtrInit())
-
- n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit()))
- return n
- }
- walkexprlist(n.Results, n.PtrInit())
-
- // For each return parameter (lhs), assign the corresponding result (rhs).
- lhs := ir.CurFunc.Type().Results()
- rhs := n.Results
- res := make([]ir.Node, lhs.NumFields())
- for i, nl := range lhs.FieldSlice() {
- nname := ir.AsNode(nl.Nname)
- if ir.IsParamHeapCopy(nname) {
- nname = nname.Name().Stackcopy
- }
- a := ir.NewAssignStmt(base.Pos, nname, rhs[i])
- res[i] = convas(a, n.PtrInit())
- }
- n.Results.Set(res)
- return n
-
- case ir.ORETJMP:
- n := n.(*ir.BranchStmt)
- return n
-
- case ir.OINLMARK:
- n := n.(*ir.InlineMarkStmt)
- return n
-
- case ir.OSELECT:
- n := n.(*ir.SelectStmt)
- walkselect(n)
- return n
-
- case ir.OSWITCH:
- n := n.(*ir.SwitchStmt)
- walkswitch(n)
- return n
-
- case ir.ORANGE:
- n := n.(*ir.RangeStmt)
- return walkrange(n)
- }
-
- // No return! Each case must return (or panic),
- // to avoid confusion about what gets returned
- // in the presence of type assertions.
-}
-
-// walk the whole tree of the body of an
-// expression or simple statement.
-// the types expressions are calculated.
-// compile-time constants are evaluated.
-// complex side effects like statements are appended to init
-func walkexprlist(s []ir.Node, init *ir.Nodes) {
- for i := range s {
- s[i] = walkexpr(s[i], init)
- }
-}
-
-func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
- for i, n := range s {
- s[i] = safeexpr(n, init)
- s[i] = walkexpr(s[i], init)
- }
-}
-
-func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
- for i, n := range s {
- s[i] = cheapexpr(n, init)
- s[i] = walkexpr(s[i], init)
- }
-}
-
-// convFuncName builds the runtime function name for interface conversion.
-// It also reports whether the function expects the data by address.
-// Not all names are possible. For example, we never generate convE2E or convE2I.
-func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
- tkind := to.Tie()
- switch from.Tie() {
- case 'I':
- if tkind == 'I' {
- return "convI2I", false
- }
- case 'T':
- switch {
- case from.Size() == 2 && from.Align == 2:
- return "convT16", false
- case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
- return "convT32", false
- case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
- return "convT64", false
- }
- if sc := from.SoleComponent(); sc != nil {
- switch {
- case sc.IsString():
- return "convTstring", false
- case sc.IsSlice():
- return "convTslice", false
- }
- }
-
- switch tkind {
- case 'E':
- if !from.HasPointers() {
- return "convT2Enoptr", true
- }
- return "convT2E", true
- case 'I':
- if !from.HasPointers() {
- return "convT2Inoptr", true
- }
- return "convT2I", true
- }
- }
- base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
- panic("unreachable")
-}
-
-// The result of walkexpr MUST be assigned back to n, e.g.
-// n.Left = walkexpr(n.Left, init)
-func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
- if n == nil {
- return n
- }
-
- // Eagerly checkwidth all expressions for the back end.
- if n.Type() != nil && !n.Type().WidthCalculated() {
- switch n.Type().Kind() {
- case types.TBLANK, types.TNIL, types.TIDEAL:
- default:
- types.CheckSize(n.Type())
- }
- }
-
- if init == n.PtrInit() {
- // not okay to use n->ninit when walking n,
- // because we might replace n with some other node
- // and would lose the init list.
- base.Fatalf("walkexpr init == &n->ninit")
- }
-
- if len(n.Init()) != 0 {
- walkstmtlist(n.Init())
- init.Append(n.PtrInit().Take()...)
- }
-
- lno := ir.SetPos(n)
-
- if base.Flag.LowerW > 1 {
- ir.Dump("before walk expr", n)
- }
-
- if n.Typecheck() != 1 {
- base.Fatalf("missed typecheck: %+v", n)
- }
-
- if n.Type().IsUntyped() {
- base.Fatalf("expression has untyped type: %+v", n)
- }
-
- if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP {
- n := n.(*ir.Name)
- nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr)
- nn.X.MarkNonNil()
- return walkexpr(typecheck.Expr(nn), init)
- }
-
- n = walkexpr1(n, init)
-
- // Expressions that are constant at run time but not
- // considered const by the language spec are not turned into
- // constants until walk. For example, if n is y%1 == 0, the
- // walk of y%1 may have replaced it by 0.
- // Check whether n with its updated args is itself now a constant.
- t := n.Type()
- n = typecheck.EvalConst(n)
- if n.Type() != t {
- base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
- }
- if n.Op() == ir.OLITERAL {
- n = typecheck.Expr(n)
- // Emit string symbol now to avoid emitting
- // any concurrently during the backend.
- if v := n.Val(); v.Kind() == constant.String {
- _ = staticdata.StringSym(n.Pos(), constant.StringVal(v))
- }
- }
-
- updateHasCall(n)
-
- if base.Flag.LowerW != 0 && n != nil {
- ir.Dump("after walk expr", n)
- }
-
- base.Pos = lno
- return n
-}
-
-func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
- switch n.Op() {
- default:
- ir.Dump("walk", n)
- base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op())
- panic("unreachable")
-
- case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR:
- return n
-
- case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
- // TODO(mdempsky): Just return n; see discussion on CL 38655.
- // Perhaps refactor to use Node.mayBeShared for these instead.
- // If these return early, make sure to still call
- // stringsym for constant strings.
- return n
-
- case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
- n := n.(*ir.UnaryExpr)
- n.X = walkexpr(n.X, init)
- return n
-
- case ir.ODOTMETH, ir.ODOTINTER:
- n := n.(*ir.SelectorExpr)
- n.X = walkexpr(n.X, init)
- return n
-
- case ir.OADDR:
- n := n.(*ir.AddrExpr)
- n.X = walkexpr(n.X, init)
- return n
-
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- n.X = walkexpr(n.X, init)
- return n
-
- case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
- n := n.(*ir.BinaryExpr)
- n.X = walkexpr(n.X, init)
- n.Y = walkexpr(n.Y, init)
- return n
-
- case ir.ODOT, ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- usefield(n)
- n.X = walkexpr(n.X, init)
- return n
-
- case ir.ODOTTYPE, ir.ODOTTYPE2:
- n := n.(*ir.TypeAssertExpr)
- n.X = walkexpr(n.X, init)
- // Set up interface type addresses for back end.
- n.Ntype = reflectdata.TypePtr(n.Type())
- if n.Op() == ir.ODOTTYPE {
- n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type())
- }
- if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
- n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())}
- }
- return n
-
- case ir.OLEN, ir.OCAP:
- n := n.(*ir.UnaryExpr)
- if isRuneCount(n) {
- // Replace len([]rune(string)) with runtime.countrunes(string).
- return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
- }
-
- n.X = walkexpr(n.X, init)
-
- // replace len(*[10]int) with 10.
- // delayed until now to preserve side effects.
- t := n.X.Type()
-
- if t.IsPtr() {
- t = t.Elem()
- }
- if t.IsArray() {
- safeexpr(n.X, init)
- con := typecheck.OrigInt(n, t.NumElem())
- con.SetTypecheck(1)
- return con
- }
- return n
-
- case ir.OCOMPLEX:
- n := n.(*ir.BinaryExpr)
- n.X = walkexpr(n.X, init)
- n.Y = walkexpr(n.Y, init)
- return n
-
- case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n := n.(*ir.BinaryExpr)
- return walkcompare(n, init)
-
- case ir.OANDAND, ir.OOROR:
- n := n.(*ir.LogicalExpr)
- n.X = walkexpr(n.X, init)
-
- // cannot put side effects from n.Right on init,
- // because they cannot run before n.Left is checked.
- // save elsewhere and store on the eventual n.Right.
- var ll ir.Nodes
-
- n.Y = walkexpr(n.Y, &ll)
- n.Y = ir.InitExpr(ll, n.Y)
- return n
-
- case ir.OPRINT, ir.OPRINTN:
- return walkprint(n.(*ir.CallExpr), init)
-
- case ir.OPANIC:
- n := n.(*ir.UnaryExpr)
- return mkcall("gopanic", nil, init, n.X)
-
- case ir.ORECOVER:
- n := n.(*ir.CallExpr)
- return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP))
-
- case ir.OCLOSUREREAD, ir.OCFUNC:
- return n
-
- case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- if n.Op() == ir.OCALLINTER {
- usemethod(n)
- markUsedIfaceMethod(n)
- }
-
- if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {
- // Transform direct call of a closure to call of a normal function.
- // transformclosure already did all preparation work.
-
- // Prepend captured variables to argument list.
- clo := n.X.(*ir.ClosureExpr)
- n.Args.Prepend(clo.Func.ClosureEnter...)
- clo.Func.ClosureEnter.Set(nil)
-
- // Replace OCLOSURE with ONAME/PFUNC.
- n.X = clo.Func.Nname
-
- // Update type of OCALLFUNC node.
- // Output arguments had not changed, but their offsets could.
- if n.X.Type().NumResults() == 1 {
- n.SetType(n.X.Type().Results().Field(0).Type)
- } else {
- n.SetType(n.X.Type().Results())
- }
- }
-
- walkCall(n, init)
- return n
-
- case ir.OAS, ir.OASOP:
- init.Append(n.PtrInit().Take()...)
-
- var left, right ir.Node
- switch n.Op() {
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- left, right = n.X, n.Y
- case ir.OASOP:
- n := n.(*ir.AssignOpStmt)
- left, right = n.X, n.Y
- }
-
- // Recognize m[k] = append(m[k], ...) so we can reuse
- // the mapassign call.
- var mapAppend *ir.CallExpr
- if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
- left := left.(*ir.IndexExpr)
- mapAppend = right.(*ir.CallExpr)
- if !ir.SameSafeExpr(left, mapAppend.Args[0]) {
- base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0])
- }
- }
-
- left = walkexpr(left, init)
- left = safeexpr(left, init)
- if mapAppend != nil {
- mapAppend.Args[0] = left
- }
-
- if n.Op() == ir.OASOP {
- // Rewrite x op= y into x = x op y.
- n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right)))
- } else {
- n.(*ir.AssignStmt).X = left
- }
- as := n.(*ir.AssignStmt)
-
- if oaslit(as, init) {
- return ir.NewBlockStmt(as.Pos(), nil)
- }
-
- if as.Y == nil {
- // TODO(austin): Check all "implicit zeroing"
- return as
- }
-
- if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) {
- return as
- }
-
- switch as.Y.Op() {
- default:
- as.Y = walkexpr(as.Y, init)
-
- case ir.ORECV:
- // x = <-c; as.Left is x, as.Right.Left is c.
- // order.stmt made sure x is addressable.
- recv := as.Y.(*ir.UnaryExpr)
- recv.X = walkexpr(recv.X, init)
-
- n1 := typecheck.NodAddr(as.X)
- r := recv.X // the channel
- return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
-
- case ir.OAPPEND:
- // x = append(...)
- call := as.Y.(*ir.CallExpr)
- if call.Type().Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
- }
- var r ir.Node
- switch {
- case isAppendOfMake(call):
- // x = append(y, make([]T, y)...)
- r = extendslice(call, init)
- case call.IsDDD:
- r = appendslice(call, init) // also works for append(slice, string).
- default:
- r = walkappend(call, init, as)
- }
- as.Y = r
- if r.Op() == ir.OAPPEND {
- // Left in place for back end.
- // Do not add a new write barrier.
- // Set up address of type for back end.
- r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
- return as
- }
- // Otherwise, lowered for race detector.
- // Treat as ordinary assignment.
- }
-
- if as.X != nil && as.Y != nil {
- return convas(as, init)
- }
- return as
-
- case ir.OAS2:
- n := n.(*ir.AssignListStmt)
- init.Append(n.PtrInit().Take()...)
- walkexprlistsafe(n.Lhs, init)
- walkexprlistsafe(n.Rhs, init)
- return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init))
-
- // a,b,... = fn()
- case ir.OAS2FUNC:
- n := n.(*ir.AssignListStmt)
- init.Append(n.PtrInit().Take()...)
-
- r := n.Rhs[0]
- walkexprlistsafe(n.Lhs, init)
- r = walkexpr(r, init)
-
- if ir.IsIntrinsicCall(r.(*ir.CallExpr)) {
- n.Rhs = []ir.Node{r}
- return n
- }
- init.Append(r)
-
- ll := ascompatet(n.Lhs, r.Type())
- return ir.NewBlockStmt(src.NoXPos, ll)
-
- // x, y = <-c
- // order.stmt made sure x is addressable or blank.
- case ir.OAS2RECV:
- n := n.(*ir.AssignListStmt)
- init.Append(n.PtrInit().Take()...)
-
- r := n.Rhs[0].(*ir.UnaryExpr) // recv
- walkexprlistsafe(n.Lhs, init)
- r.X = walkexpr(r.X, init)
- var n1 ir.Node
- if ir.IsBlank(n.Lhs[0]) {
- n1 = typecheck.NodNil()
- } else {
- n1 = typecheck.NodAddr(n.Lhs[0])
- }
- fn := chanfn("chanrecv2", 2, r.X.Type())
- ok := n.Lhs[1]
- call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1)
- return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call))
-
- // a,b = m[i]
- case ir.OAS2MAPR:
- n := n.(*ir.AssignListStmt)
- init.Append(n.PtrInit().Take()...)
-
- r := n.Rhs[0].(*ir.IndexExpr)
- walkexprlistsafe(n.Lhs, init)
- r.X = walkexpr(r.X, init)
- r.Index = walkexpr(r.Index, init)
- t := r.X.Type()
-
- fast := mapfast(t)
- var key ir.Node
- if fast != mapslow {
- // fast versions take key by value
- key = r.Index
- } else {
- // standard version takes key by reference
- // order.expr made sure key is addressable.
- key = typecheck.NodAddr(r.Index)
- }
-
- // from:
- // a,b = m[i]
- // to:
- // var,b = mapaccess2*(t, m, i)
- // a = *var
- a := n.Lhs[0]
-
- var call *ir.CallExpr
- if w := t.Elem().Width; w <= zeroValSize {
- fn := mapfn(mapaccess2[fast], t)
- call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
- } else {
- fn := mapfn("mapaccess2_fat", t)
- z := reflectdata.ZeroAddr(w)
- call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
- }
-
- // mapaccess2* returns a typed bool, but due to spec changes,
- // the boolean result of i.(T) is now untyped so we make it the
- // same type as the variable on the lhs.
- if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() {
- call.Type().Field(1).Type = ok.Type()
- }
- n.Rhs = []ir.Node{call}
- n.SetOp(ir.OAS2FUNC)
-
- // don't generate a = *var if a is _
- if ir.IsBlank(a) {
- return walkexpr(typecheck.Stmt(n), init)
- }
-
- var_ := typecheck.Temp(types.NewPtr(t.Elem()))
- var_.SetTypecheck(1)
- var_.MarkNonNil() // mapaccess always returns a non-nil pointer
-
- n.Lhs[0] = var_
- init.Append(walkexpr(n, init))
-
- as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_))
- return walkexpr(typecheck.Stmt(as), init)
-
- case ir.ODELETE:
- n := n.(*ir.CallExpr)
- init.Append(n.PtrInit().Take()...)
- map_ := n.Args[0]
- key := n.Args[1]
- map_ = walkexpr(map_, init)
- key = walkexpr(key, init)
-
- t := map_.Type()
- fast := mapfast(t)
- if fast == mapslow {
- // order.stmt made sure key is addressable.
- key = typecheck.NodAddr(key)
- }
- return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
-
- case ir.OAS2DOTTYPE:
- n := n.(*ir.AssignListStmt)
- walkexprlistsafe(n.Lhs, init)
- n.Rhs[0] = walkexpr(n.Rhs[0], init)
- return n
-
- case ir.OCONVIFACE:
- n := n.(*ir.ConvExpr)
- n.X = walkexpr(n.X, init)
-
- fromType := n.X.Type()
- toType := n.Type()
-
- if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, ir.CurFunc.LSym)
- }
-
- // typeword generates the type word of the interface value.
- typeword := func() ir.Node {
- if toType.IsEmptyInterface() {
- return reflectdata.TypePtr(fromType)
- }
- return reflectdata.ITabAddr(fromType, toType)
- }
-
- // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
- if types.IsDirectIface(fromType) {
- l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X)
- l.SetType(toType)
- l.SetTypecheck(n.Typecheck())
- return l
- }
-
- if ir.Names.Staticuint64s == nil {
- ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s"))
- ir.Names.Staticuint64s.Class_ = ir.PEXTERN
- // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
- // individual bytes.
- ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
- ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase"))
- ir.Names.Zerobase.Class_ = ir.PEXTERN
- ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR])
- }
-
- // Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
- // by using an existing addressable value identical to n.Left
- // or creating one on the stack.
- var value ir.Node
- switch {
- case fromType.Size() == 0:
- // n.Left is zero-sized. Use zerobase.
- cheapexpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246.
- value = ir.Names.Zerobase
- case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
- // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
- // and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.X = cheapexpr(n.X, init)
- // byteindex widens n.Left so that the multiplication doesn't overflow.
- index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3))
- if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
- index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
- }
- xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index)
- xe.SetBounded(true)
- value = xe
- case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly():
- // n.Left is a readonly global; use it directly.
- value = n.X
- case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024:
- // n.Left does not escape. Use a stack temporary initialized to n.Left.
- value = typecheck.Temp(fromType)
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X)))
- }
-
- if value != nil {
- // Value is identical to n.Left.
- // Construct the interface directly: {type/itab, &value}.
- l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value)))
- l.SetType(toType)
- l.SetTypecheck(n.Typecheck())
- return l
- }
-
- // Implement interface to empty interface conversion.
- // tmp = i.itab
- // if tmp != nil {
- // tmp = tmp.type
- // }
- // e = iface{tmp, i.data}
- if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
- // Evaluate the input interface.
- c := typecheck.Temp(fromType)
- init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
-
- // Get the itab out of the interface.
- tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
- init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c))))
-
- // Get the type out of the itab.
- nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil)
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))}
- init.Append(nif)
-
- // Build the result.
- e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
- e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
- e.SetTypecheck(1)
- return e
- }
-
- fnname, needsaddr := convFuncName(fromType, toType)
-
- if !needsaddr && !fromType.IsInterface() {
- // Use a specialized conversion routine that only returns a data pointer.
- // ptr = convT2X(val)
- // e = iface{typ/tab, ptr}
- fn := typecheck.LookupRuntime(fnname)
- types.CalcSize(fromType)
- fn = typecheck.SubstArgTypes(fn, fromType)
- types.CalcSize(fn.Type())
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
- call.Args = []ir.Node{n.X}
- e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck.Expr(call), init), init))
- e.SetType(toType)
- e.SetTypecheck(1)
- return e
- }
-
- var tab ir.Node
- if fromType.IsInterface() {
- // convI2I
- tab = reflectdata.TypePtr(toType)
- } else {
- // convT2x
- tab = typeword()
- }
-
- v := n.X
- if needsaddr {
- // Types of large or unknown size are passed by reference.
- // Orderexpr arranged for n.Left to be a temporary for all
- // the conversions it could see. Comparison of an interface
- // with a non-interface, especially in a switch on interface value
- // with non-interface cases, is not visible to order.stmt, so we
- // have to fall back on allocating a temp here.
- if !ir.IsAssignable(v) {
- v = copyexpr(v, v.Type(), init)
- }
- v = typecheck.NodAddr(v)
- }
-
- types.CalcSize(fromType)
- fn := typecheck.LookupRuntime(fnname)
- fn = typecheck.SubstArgTypes(fn, fromType, toType)
- types.CalcSize(fn.Type())
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
- call.Args = []ir.Node{tab, v}
- return walkexpr(typecheck.Expr(call), init)
-
- case ir.OCONV, ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- n.X = walkexpr(n.X, init)
- if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() {
- return n.X
- }
- if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
- if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T
- return walkCheckPtrAlignment(n, init, nil)
- }
- if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
- return walkCheckPtrArithmetic(n, init)
- }
- }
- param, result := rtconvfn(n.X.Type(), n.Type())
- if param == types.Txxx {
- return n
- }
- fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
- return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type())
-
- case ir.ODIV, ir.OMOD:
- n := n.(*ir.BinaryExpr)
- n.X = walkexpr(n.X, init)
- n.Y = walkexpr(n.Y, init)
-
- // rewrite complex div into function call.
- et := n.X.Type().Kind()
-
- if types.IsComplex[et] && n.Op() == ir.ODIV {
- t := n.Type()
- call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128]))
- return typecheck.Conv(call, t)
- }
-
- // Nothing to do for float divisions.
- if types.IsFloat[et] {
- return n
- }
-
- // rewrite 64-bit div and mod on 32-bit architectures.
- // TODO: Remove this code once we can introduce
- // runtime calls late in SSA processing.
- if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) {
- if n.Y.Op() == ir.OLITERAL {
- // Leave div/mod by constant powers of 2 or small 16-bit constants.
- // The SSA backend will handle those.
- switch et {
- case types.TINT64:
- c := ir.Int64Val(n.Y)
- if c < 0 {
- c = -c
- }
- if c != 0 && c&(c-1) == 0 {
- return n
- }
- case types.TUINT64:
- c := ir.Uint64Val(n.Y)
- if c < 1<<16 {
- return n
- }
- if c != 0 && c&(c-1) == 0 {
- return n
- }
- }
- }
- var fn string
- if et == types.TINT64 {
- fn = "int64"
- } else {
- fn = "uint64"
- }
- if n.Op() == ir.ODIV {
- fn += "div"
- } else {
- fn += "mod"
- }
- return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et]))
- }
- return n
-
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- n.X = walkexpr(n.X, init)
-
- // save the original node for bounds checking elision.
- // If it was a ODIV/OMOD walk might rewrite it.
- r := n.Index
-
- n.Index = walkexpr(n.Index, init)
-
- // if range of type cannot exceed static array bound,
- // disable bounds check.
- if n.Bounded() {
- return n
- }
- t := n.X.Type()
- if t != nil && t.IsPtr() {
- t = t.Elem()
- }
- if t.IsArray() {
- n.SetBounded(bounded(r, t.NumElem()))
- if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
- base.Warn("index bounds check elided")
- }
- if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
- base.Errorf("index out of bounds")
- }
- } else if ir.IsConst(n.X, constant.String) {
- n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X)))))
- if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
- base.Warn("index bounds check elided")
- }
- if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
- base.Errorf("index out of bounds")
- }
- }
-
- if ir.IsConst(n.Index, constant.Int) {
- if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) {
- base.Errorf("index out of bounds")
- }
- }
- return n
-
- case ir.OINDEXMAP:
- // Replace m[k] with *map{access1,assign}(maptype, m, &k)
- n := n.(*ir.IndexExpr)
- n.X = walkexpr(n.X, init)
- n.Index = walkexpr(n.Index, init)
- map_ := n.X
- key := n.Index
- t := map_.Type()
- var call *ir.CallExpr
- if n.Assigned {
- // This m[k] expression is on the left-hand side of an assignment.
- fast := mapfast(t)
- if fast == mapslow {
- // standard version takes key by reference.
- // order.expr made sure key is addressable.
- key = typecheck.NodAddr(key)
- }
- call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
- } else {
- // m[k] is not the target of an assignment.
- fast := mapfast(t)
- if fast == mapslow {
- // standard version takes key by reference.
- // order.expr made sure key is addressable.
- key = typecheck.NodAddr(key)
- }
-
- if w := t.Elem().Width; w <= zeroValSize {
- call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key)
- } else {
- z := reflectdata.ZeroAddr(w)
- call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z)
- }
- }
- call.SetType(types.NewPtr(t.Elem()))
- call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
- star := ir.NewStarExpr(base.Pos, call)
- star.SetType(t.Elem())
- star.SetTypecheck(1)
- return star
-
- case ir.ORECV:
- base.Fatalf("walkexpr ORECV") // should see inside OAS only
- panic("unreachable")
-
- case ir.OSLICEHEADER:
- n := n.(*ir.SliceHeaderExpr)
- n.Ptr = walkexpr(n.Ptr, init)
- n.LenCap[0] = walkexpr(n.LenCap[0], init)
- n.LenCap[1] = walkexpr(n.LenCap[1], init)
- return n
-
- case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
- n := n.(*ir.SliceExpr)
-
- checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
- if checkSlice {
- conv := n.X.(*ir.ConvExpr)
- conv.X = walkexpr(conv.X, init)
- } else {
- n.X = walkexpr(n.X, init)
- }
-
- low, high, max := n.SliceBounds()
- low = walkexpr(low, init)
- if low != nil && ir.IsZero(low) {
- // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
- low = nil
- }
- high = walkexpr(high, init)
- max = walkexpr(max, init)
- n.SetSliceBounds(low, high, max)
- if checkSlice {
- n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, max)
- }
-
- if n.Op().IsSlice3() {
- if max != nil && max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, max.(*ir.UnaryExpr).X) {
- // Reduce x[i:j:cap(x)] to x[i:j].
- if n.Op() == ir.OSLICE3 {
- n.SetOp(ir.OSLICE)
- } else {
- n.SetOp(ir.OSLICEARR)
- }
- return reduceSlice(n)
- }
- return n
- }
- return reduceSlice(n)
-
- case ir.ONEW:
- n := n.(*ir.UnaryExpr)
- if n.Type().Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
- }
- if n.Esc() == ir.EscNone {
- if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
- base.Fatalf("large ONEW with EscNone: %v", n)
- }
- r := typecheck.Temp(n.Type().Elem())
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp
- return typecheck.Expr(typecheck.NodAddr(r))
- }
- return callnew(n.Type().Elem())
-
- case ir.OADDSTR:
- return addstr(n.(*ir.AddStringExpr), init)
-
- case ir.OAPPEND:
- // order should make sure we only see OAS(node, OAPPEND), which we handle above.
- base.Fatalf("append outside assignment")
- panic("unreachable")
-
- case ir.OCOPY:
- return copyany(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime)
-
- case ir.OCLOSE:
- // cannot use chanfn - closechan takes any, not chan any
- n := n.(*ir.UnaryExpr)
- fn := typecheck.LookupRuntime("closechan")
- fn = typecheck.SubstArgTypes(fn, n.X.Type())
- return mkcall1(fn, nil, init, n.X)
-
- case ir.OMAKECHAN:
- // When size fits into int, use makechan instead of
- // makechan64, which is faster and shorter on 32 bit platforms.
- n := n.(*ir.MakeExpr)
- size := n.Len
- fnname := "makechan64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL size is positive and fits in an int.
- // The case of size overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makechan during runtime.
- if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
- fnname = "makechan"
- argtype = types.Types[types.TINT]
- }
-
- return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
-
- case ir.OMAKEMAP:
- n := n.(*ir.MakeExpr)
- t := n.Type()
- hmapType := reflectdata.MapType(t)
- hint := n.Len
-
- // var h *hmap
- var h ir.Node
- if n.Esc() == ir.EscNone {
- // Allocate hmap on stack.
-
- // var hv hmap
- hv := typecheck.Temp(hmapType)
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil)))
- // h = &hv
- h = typecheck.NodAddr(hv)
-
- // Allocate one bucket pointed to by hmap.buckets on stack if hint
- // is not larger than BUCKETSIZE. In case hint is larger than
- // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
- // Maximum key and elem size is 128 bytes, larger objects
- // are stored with an indirection. So max bucket size is 2048+eps.
- if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
-
- // In case hint is larger than BUCKETSIZE runtime.makemap
- // will allocate the buckets on the heap, see #20184
- //
- // if hint <= BUCKETSIZE {
- // var bv bmap
- // b = &bv
- // h.buckets = b
- // }
-
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
- nif.Likely = true
-
- // var bv bmap
- bv := typecheck.Temp(reflectdata.MapBucketType(t))
- nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
-
- // b = &bv
- b := typecheck.NodAddr(bv)
-
- // h.buckets = b
- bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
- nif.Body.Append(na)
- appendWalkStmt(init, nif)
- }
- }
-
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
- // Handling make(map[any]any) and
- // make(map[any]any, hint) where hint <= BUCKETSIZE
- // special allows for faster map initialization and
- // improves binary size by using calls with fewer arguments.
- // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
- // and no buckets will be allocated by makemap. Therefore,
- // no buckets need to be allocated in this code path.
- if n.Esc() == ir.EscNone {
- // Only need to initialize h.hash0 since
- // hmap h has been allocated on the stack already.
- // h.hash0 = fastrand()
- rand := mkcall("fastrand", types.Types[types.TUINT32], init)
- hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
- return typecheck.ConvNop(h, t)
- }
- // Call runtime.makehmap to allocate an
- // hmap on the heap and initialize hmap's hash0 field.
- fn := typecheck.LookupRuntime("makemap_small")
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init)
- }
-
- if n.Esc() != ir.EscNone {
- h = typecheck.NodNil()
- }
- // Map initialization with a variable or large hint is
- // more complicated. We therefore generate a call to
- // runtime.makemap to initialize hmap and allocate the
- // map buckets.
-
- // When hint fits into int, use makemap instead of
- // makemap64, which is faster and shorter on 32 bit platforms.
- fnname := "makemap64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL hint is positive and fits in an int.
- // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
- // The case of hint overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makemap during runtime.
- if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
- fnname = "makemap"
- argtype = types.Types[types.TINT]
- }
-
- fn := typecheck.LookupRuntime(fnname)
- fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
-
- case ir.OMAKESLICE:
- n := n.(*ir.MakeExpr)
- l := n.Len
- r := n.Cap
- if r == nil {
- r = safeexpr(l, init)
- l = r
- }
- t := n.Type()
- if t.Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
- }
- if n.Esc() == ir.EscNone {
- if why := escape.HeapAllocReason(n); why != "" {
- base.Fatalf("%v has EscNone, but %v", n, why)
- }
- // var arr [r]T
- // n = arr[:l]
- i := typecheck.IndexConst(r)
- if i < 0 {
- base.Fatalf("walkexpr: invalid index %v", r)
- }
-
- // cap is constrained to [0,2^31) or [0,2^63) depending on whether
- // we're in 32-bit or 64-bit systems. So it's safe to do:
- //
- // if uint64(len) > cap {
- // if len < 0 { panicmakeslicelen() }
- // panicmakeslicecap()
- // }
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
- niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
- niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
- nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
- init.Append(typecheck.Stmt(nif))
-
- t = types.NewArray(t.Elem(), i) // [r]T
- var_ := typecheck.Temp(t)
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
- r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l]
- r.SetSliceBounds(nil, l, nil)
- // The conv is necessary in case n.Type is named.
- return walkexpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
- }
-
- // n escapes; set up a call to makeslice.
- // When len and cap can fit into int, use makeslice instead of
- // makeslice64, which is faster and shorter on 32 bit platforms.
-
- len, cap := l, r
-
- fnname := "makeslice64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
- // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makeslice during runtime.
- if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
- (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
- fnname = "makeslice"
- argtype = types.Types[types.TINT]
- }
-
- m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
- m.SetType(t)
-
- fn := typecheck.LookupRuntime(fnname)
- m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
- m.Ptr.MarkNonNil()
- m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])}
- return walkexpr(typecheck.Expr(m), init)
-
- case ir.OMAKESLICECOPY:
- n := n.(*ir.MakeExpr)
- if n.Esc() == ir.EscNone {
- base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
- }
-
- t := n.Type()
- if t.Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
- }
-
- length := typecheck.Conv(n.Len, types.Types[types.TINT])
- copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
- copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
-
- if !t.Elem().HasPointers() && n.Bounded() {
- // When len(to)==len(from) and elements have no pointers:
- // replace make+copy with runtime.mallocgc+runtime.memmove.
-
- // We do not check for overflow of len(to)*elem.Width here
- // since len(from) is an existing checked slice capacity
- // with same elem.Width for the from slice.
- size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR]))
-
- // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
- fn := typecheck.LookupRuntime("mallocgc")
- sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
- sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
- sh.Ptr.MarkNonNil()
- sh.LenCap = []ir.Node{length, length}
- sh.SetType(t)
-
- s := typecheck.Temp(t)
- r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
- r = walkexpr(r, init)
- init.Append(r)
-
- // instantiate memmove(to *any, frm *any, size uintptr)
- fn = typecheck.LookupRuntime("memmove")
- fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
- ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
- init.Append(walkexpr(typecheck.Stmt(ncopy), init))
-
- return s
- }
- // Replace make+copy with runtime.makeslicecopy.
- // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
- fn := typecheck.LookupRuntime("makeslicecopy")
- s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
- s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
- s.Ptr.MarkNonNil()
- s.LenCap = []ir.Node{length, length}
- s.SetType(t)
- return walkexpr(typecheck.Expr(s), init)
-
- case ir.ORUNESTR:
- n := n.(*ir.ConvExpr)
- a := typecheck.NodNil()
- if n.Esc() == ir.EscNone {
- t := types.NewArray(types.Types[types.TUINT8], 4)
- a = typecheck.NodAddr(typecheck.Temp(t))
- }
- // intstring(*[4]byte, rune)
- return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
-
- case ir.OBYTES2STR, ir.ORUNES2STR:
- n := n.(*ir.ConvExpr)
- a := typecheck.NodNil()
- if n.Esc() == ir.EscNone {
- // Create temporary buffer for string on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- a = typecheck.NodAddr(typecheck.Temp(t))
- }
- if n.Op() == ir.ORUNES2STR {
- // slicerunetostring(*[32]byte, []rune) string
- return mkcall("slicerunetostring", n.Type(), init, a, n.X)
- }
- // slicebytetostring(*[32]byte, ptr *byte, n int) string
- n.X = cheapexpr(n.X, init)
- ptr, len := backingArrayPtrLen(n.X)
- return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
-
- case ir.OBYTES2STRTMP:
- n := n.(*ir.ConvExpr)
- n.X = walkexpr(n.X, init)
- if !base.Flag.Cfg.Instrumenting {
- // Let the backend handle OBYTES2STRTMP directly
- // to avoid a function call to slicebytetostringtmp.
- return n
- }
- // slicebytetostringtmp(ptr *byte, n int) string
- n.X = cheapexpr(n.X, init)
- ptr, len := backingArrayPtrLen(n.X)
- return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
-
- case ir.OSTR2BYTES:
- n := n.(*ir.ConvExpr)
- s := n.X
- if ir.IsConst(s, constant.String) {
- sc := ir.StringVal(s)
-
- // Allocate a [n]byte of the right size.
- t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
- var a ir.Node
- if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
- a = typecheck.NodAddr(typecheck.Temp(t))
- } else {
- a = callnew(t)
- }
- p := typecheck.Temp(t.PtrTo()) // *[n]byte
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
-
- // Copy from the static string data to the [n]byte.
- if len(sc) > 0 {
- as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo())))
- appendWalkStmt(init, as)
- }
-
- // Slice the [n]byte to a []byte.
- slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p)
- slice.SetType(n.Type())
- slice.SetTypecheck(1)
- return walkexpr(slice, init)
- }
-
- a := typecheck.NodNil()
- if n.Esc() == ir.EscNone {
- // Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- a = typecheck.NodAddr(typecheck.Temp(t))
- }
- // stringtoslicebyte(*32[byte], string) []byte
- return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
-
- case ir.OSTR2BYTESTMP:
- // []byte(string) conversion that creates a slice
- // referring to the actual string bytes.
- // This conversion is handled later by the backend and
- // is only for use by internal compiler optimizations
- // that know that the slice won't be mutated.
- // The only such case today is:
- // for i, c := range []byte(string)
- n := n.(*ir.ConvExpr)
- n.X = walkexpr(n.X, init)
- return n
-
- case ir.OSTR2RUNES:
- n := n.(*ir.ConvExpr)
- a := typecheck.NodNil()
- if n.Esc() == ir.EscNone {
- // Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
- a = typecheck.NodAddr(typecheck.Temp(t))
- }
- // stringtoslicerune(*[32]rune, string) []rune
- return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
-
- case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
- if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
- n := n.(*ir.CompLitExpr) // not OPTRLIT
- // n can be directly represented in the read-only data section.
- // Make direct reference to the static data. See issue 12841.
- vstat := readonlystaticname(n.Type())
- fixedlit(inInitFunction, initKindStatic, n, vstat, init)
- return typecheck.Expr(vstat)
- }
- var_ := typecheck.Temp(n.Type())
- anylit(n, var_, init)
- return var_
-
- case ir.OSEND:
- n := n.(*ir.SendStmt)
- n1 := n.Value
- n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send")
- n1 = walkexpr(n1, init)
- n1 = typecheck.NodAddr(n1)
- return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1)
-
- case ir.OCLOSURE:
- return walkclosure(n.(*ir.ClosureExpr), init)
-
- case ir.OCALLPART:
- return walkpartialcall(n.(*ir.CallPartExpr), init)
- }
-
- // No return! Each case must return (or panic),
- // to avoid confusion about what gets returned
- // in the presence of type assertions.
-}
-
-// markTypeUsedInInterface marks that type t is converted to an interface.
-// This information is used in the linker in dead method elimination.
-func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
- tsym := reflectdata.TypeSym(t).Linksym()
- // Emit a marker relocation. The linker will know the type is converted
- // to an interface if "from" is reachable.
- r := obj.Addrel(from)
- r.Sym = tsym
- r.Type = objabi.R_USEIFACE
-}
-
-// markUsedIfaceMethod marks that an interface method is used in the current
-// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *ir.CallExpr) {
- dot := n.X.(*ir.SelectorExpr)
- ityp := dot.X.Type()
- tsym := reflectdata.TypeSym(ityp).Linksym()
- r := obj.Addrel(ir.CurFunc.LSym)
- r.Sym = tsym
- // dot.Xoffset is the method index * Widthptr (the offset of code pointer
- // in itab).
- midx := dot.Offset / int64(types.PtrSize)
- r.Add = reflectdata.InterfaceMethodOffset(ityp, midx)
- r.Type = objabi.R_USEIFACEMETHOD
-}
-
-// rtconvfn returns the parameter and result types that will be used by a
-// runtime function to convert from type src to type dst. The runtime function
-// name can be derived from the names of the returned types.
-//
-// If no such function is necessary, it returns (Txxx, Txxx).
-func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
- if ssagen.Arch.SoftFloat {
- return types.Txxx, types.Txxx
- }
-
- switch ssagen.Arch.LinkArch.Family {
- case sys.ARM, sys.MIPS:
- if src.IsFloat() {
- switch dst.Kind() {
- case types.TINT64, types.TUINT64:
- return types.TFLOAT64, dst.Kind()
- }
- }
- if dst.IsFloat() {
- switch src.Kind() {
- case types.TINT64, types.TUINT64:
- return src.Kind(), types.TFLOAT64
- }
- }
-
- case sys.I386:
- if src.IsFloat() {
- switch dst.Kind() {
- case types.TINT64, types.TUINT64:
- return types.TFLOAT64, dst.Kind()
- case types.TUINT32, types.TUINT, types.TUINTPTR:
- return types.TFLOAT64, types.TUINT32
- }
- }
- if dst.IsFloat() {
- switch src.Kind() {
- case types.TINT64, types.TUINT64:
- return src.Kind(), types.TFLOAT64
- case types.TUINT32, types.TUINT, types.TUINTPTR:
- return types.TUINT32, types.TFLOAT64
- }
- }
- }
- return types.Txxx, types.Txxx
-}
-
-// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *ir.SliceExpr) ir.Node {
- low, high, max := n.SliceBounds()
- if high != nil && high.Op() == ir.OLEN && ir.SameSafeExpr(n.X, high.(*ir.UnaryExpr).X) {
- // Reduce x[i:len(x)] to x[i:].
- high = nil
- }
- n.SetSliceBounds(low, high, max)
- if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
- // Reduce x[:] to x.
- if base.Debug.Slice > 0 {
- base.Warn("slice: omit slice operation")
- }
- return n.X
- }
- return n
-}
-
-func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt {
- // convas will turn map assigns into function calls,
- // making it impossible for reorder3 to work.
- n := ir.NewAssignStmt(base.Pos, l, r)
-
- if l.Op() == ir.OINDEXMAP {
- return n
- }
-
- return convas(n, init)
-}
-
-func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
- // check assign expression list to
- // an expression list. called in
- // expr-list = expr-list
-
- // ensure order of evaluation for function calls
- for i := range nl {
- nl[i] = safeexpr(nl[i], init)
- }
- for i1 := range nr {
- nr[i1] = safeexpr(nr[i1], init)
- }
-
- var nn []*ir.AssignStmt
- i := 0
- for ; i < len(nl); i++ {
- if i >= len(nr) {
- break
- }
- // Do not generate 'x = x' during return. See issue 4014.
- if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) {
- continue
- }
- nn = append(nn, ascompatee1(nl[i], nr[i], init))
- }
-
- // cannot happen: caller checked that lists had same length
- if i < len(nl) || i < len(nr) {
- var nln, nrn ir.Nodes
- nln.Set(nl)
- nrn.Set(nr)
- base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc))
- }
- return reorder3(nn)
-}
-
-// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l ir.Node, rt *types.Type) bool {
- if l.HasCall() || l.Op() == ir.OINDEXMAP {
- return true
- }
- if types.Identical(l.Type(), rt) {
- return false
- }
- // There might be a conversion required, which might involve a runtime call.
- return true
-}
-
-// check assign type list to
-// an expression list. called in
-// expr-list = func()
-func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
- if len(nl) != nr.NumFields() {
- base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
- }
-
- var nn, mm ir.Nodes
- for i, l := range nl {
- if ir.IsBlank(l) {
- continue
- }
- r := nr.Field(i)
-
- // Any assignment to an lvalue that might cause a function call must be
- // deferred until all the returned values have been read.
- if fncall(l, r.Type) {
- tmp := ir.Node(typecheck.Temp(r.Type))
- tmp = typecheck.Expr(tmp)
- a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm)
- mm.Append(a)
- l = tmp
- }
-
- res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
- res.Offset = base.Ctxt.FixedFrameSize() + r.Offset
- res.SetType(r.Type)
- res.SetTypecheck(1)
-
- a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn)
- updateHasCall(a)
- if a.HasCall() {
- ir.Dump("ascompatet ucount", a)
- base.Fatalf("ascompatet: too many function calls evaluating parameters")
- }
-
- nn.Append(a)
- }
- return append(nn, mm...)
-}
-
-func walkCall(n *ir.CallExpr, init *ir.Nodes) {
- if len(n.Rargs) != 0 {
- return // already walked
- }
-
- params := n.X.Type().Params()
- args := n.Args
-
- n.X = walkexpr(n.X, init)
- walkexprlist(args, init)
-
- // If this is a method call, add the receiver at the beginning of the args.
- if n.Op() == ir.OCALLMETH {
- withRecv := make([]ir.Node, len(args)+1)
- dot := n.X.(*ir.SelectorExpr)
- withRecv[0] = dot.X
- dot.X = nil
- copy(withRecv[1:], args)
- args = withRecv
- }
-
- // For any argument whose evaluation might require a function call,
- // store that argument into a temporary variable,
- // to prevent that calls from clobbering arguments already on the stack.
- // When instrumenting, all arguments might require function calls.
- var tempAssigns []ir.Node
- for i, arg := range args {
- updateHasCall(arg)
- // Determine param type.
- var t *types.Type
- if n.Op() == ir.OCALLMETH {
- if i == 0 {
- t = n.X.Type().Recv().Type
- } else {
- t = params.Field(i - 1).Type
- }
- } else {
- t = params.Field(i).Type
- }
- if base.Flag.Cfg.Instrumenting || fncall(arg, t) {
- // make assignment of fncall to tempAt
- tmp := typecheck.Temp(t)
- a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init)
- tempAssigns = append(tempAssigns, a)
- // replace arg with temp
- args[i] = tmp
- }
- }
-
- n.Args.Set(tempAssigns)
- n.Rargs.Set(args)
-}
-
-// generate code for print
-func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
- // Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(nn.Args, init)
-
- // For println, add " " between elements and "\n" at the end.
- if nn.Op() == ir.OPRINTN {
- s := nn.Args
- t := make([]ir.Node, 0, len(s)*2)
- for i, n := range s {
- if i != 0 {
- t = append(t, ir.NewString(" "))
- }
- t = append(t, n)
- }
- t = append(t, ir.NewString("\n"))
- nn.Args.Set(t)
- }
-
- // Collapse runs of constant strings.
- s := nn.Args
- t := make([]ir.Node, 0, len(s))
- for i := 0; i < len(s); {
- var strs []string
- for i < len(s) && ir.IsConst(s[i], constant.String) {
- strs = append(strs, ir.StringVal(s[i]))
- i++
- }
- if len(strs) > 0 {
- t = append(t, ir.NewString(strings.Join(strs, "")))
- }
- if i < len(s) {
- t = append(t, s[i])
- i++
- }
- }
- nn.Args.Set(t)
-
- calls := []ir.Node{mkcall("printlock", nil, init)}
- for i, n := range nn.Args {
- if n.Op() == ir.OLITERAL {
- if n.Type() == types.UntypedRune {
- n = typecheck.DefaultLit(n, types.RuneType)
- }
-
- switch n.Val().Kind() {
- case constant.Int:
- n = typecheck.DefaultLit(n, types.Types[types.TINT64])
-
- case constant.Float:
- n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
- }
- }
-
- if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
- n = typecheck.DefaultLit(n, types.Types[types.TINT64])
- }
- n = typecheck.DefaultLit(n, nil)
- nn.Args[i] = n
- if n.Type() == nil || n.Type().Kind() == types.TFORW {
- continue
- }
-
- var on *ir.Name
- switch n.Type().Kind() {
- case types.TINTER:
- if n.Type().IsEmptyInterface() {
- on = typecheck.LookupRuntime("printeface")
- } else {
- on = typecheck.LookupRuntime("printiface")
- }
- on = typecheck.SubstArgTypes(on, n.Type()) // any-1
- case types.TPTR:
- if n.Type().Elem().NotInHeap() {
- on = typecheck.LookupRuntime("printuintptr")
- n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
- n.SetType(types.Types[types.TUNSAFEPTR])
- n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
- n.SetType(types.Types[types.TUINTPTR])
- break
- }
- fallthrough
- case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
- on = typecheck.LookupRuntime("printpointer")
- on = typecheck.SubstArgTypes(on, n.Type()) // any-1
- case types.TSLICE:
- on = typecheck.LookupRuntime("printslice")
- on = typecheck.SubstArgTypes(on, n.Type()) // any-1
- case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
- if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
- on = typecheck.LookupRuntime("printhex")
- } else {
- on = typecheck.LookupRuntime("printuint")
- }
- case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
- on = typecheck.LookupRuntime("printint")
- case types.TFLOAT32, types.TFLOAT64:
- on = typecheck.LookupRuntime("printfloat")
- case types.TCOMPLEX64, types.TCOMPLEX128:
- on = typecheck.LookupRuntime("printcomplex")
- case types.TBOOL:
- on = typecheck.LookupRuntime("printbool")
- case types.TSTRING:
- cs := ""
- if ir.IsConst(n, constant.String) {
- cs = ir.StringVal(n)
- }
- switch cs {
- case " ":
- on = typecheck.LookupRuntime("printsp")
- case "\n":
- on = typecheck.LookupRuntime("printnl")
- default:
- on = typecheck.LookupRuntime("printstring")
- }
- default:
- badtype(ir.OPRINT, n.Type(), nil)
- continue
- }
-
- r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
- if params := on.Type().Params().FieldSlice(); len(params) > 0 {
- t := params[0].Type
- if !types.Identical(t, n.Type()) {
- n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
- n.SetType(t)
- }
- r.Args.Append(n)
- }
- calls = append(calls, r)
- }
-
- calls = append(calls, mkcall("printunlock", nil, init))
-
- typecheck.Stmts(calls)
- walkexprlist(calls, init)
-
- r := ir.NewBlockStmt(base.Pos, nil)
- r.List.Set(calls)
- return walkstmt(typecheck.Stmt(r))
-}
-
-func callnew(t *types.Type) ir.Node {
- types.CalcSize(t)
- n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t))
- n.SetType(types.NewPtr(t))
- n.SetTypecheck(1)
- n.MarkNonNil()
- return n
-}
-
-func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
- if n.Op() != ir.OAS {
- base.Fatalf("convas: not OAS %v", n.Op())
- }
- defer updateHasCall(n)
-
- n.SetTypecheck(1)
-
- if n.X == nil || n.Y == nil {
- return n
- }
-
- lt := n.X.Type()
- rt := n.Y.Type()
- if lt == nil || rt == nil {
- return n
- }
-
- if ir.IsBlank(n.X) {
- n.Y = typecheck.DefaultLit(n.Y, nil)
- return n
- }
-
- if !types.Identical(lt, rt) {
- n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
- n.Y = walkexpr(n.Y, init)
- }
- types.CalcSize(n.Y.Type())
-
- return n
-}
-
-// reorder3
-// from ascompatee
-// a,b = c,d
-// simultaneous assignment. there cannot
-// be later use of an earlier lvalue.
-//
-// function calls have been removed.
-func reorder3(all []*ir.AssignStmt) []ir.Node {
- // If a needed expression may be affected by an
- // earlier assignment, make an early copy of that
- // expression and use the copy instead.
- var early []ir.Node
-
- var mapinit ir.Nodes
- for i, n := range all {
- l := n.X
-
- // Save subexpressions needed on left side.
- // Drill through non-dereferences.
- for {
- switch ll := l; ll.Op() {
- case ir.ODOT:
- ll := ll.(*ir.SelectorExpr)
- l = ll.X
- continue
- case ir.OPAREN:
- ll := ll.(*ir.ParenExpr)
- l = ll.X
- continue
- case ir.OINDEX:
- ll := ll.(*ir.IndexExpr)
- if ll.X.Type().IsArray() {
- ll.Index = reorder3save(ll.Index, all, i, &early)
- l = ll.X
- continue
- }
- }
- break
- }
-
- switch l.Op() {
- default:
- base.Fatalf("reorder3 unexpected lvalue %v", l.Op())
-
- case ir.ONAME:
- break
-
- case ir.OINDEX, ir.OINDEXMAP:
- l := l.(*ir.IndexExpr)
- l.X = reorder3save(l.X, all, i, &early)
- l.Index = reorder3save(l.Index, all, i, &early)
- if l.Op() == ir.OINDEXMAP {
- all[i] = convas(all[i], &mapinit)
- }
-
- case ir.ODEREF:
- l := l.(*ir.StarExpr)
- l.X = reorder3save(l.X, all, i, &early)
- case ir.ODOTPTR:
- l := l.(*ir.SelectorExpr)
- l.X = reorder3save(l.X, all, i, &early)
- }
-
- // Save expression on right side.
- all[i].Y = reorder3save(all[i].Y, all, i, &early)
- }
-
- early = append(mapinit, early...)
- for _, as := range all {
- early = append(early, as)
- }
- return early
-}
-
-// if the evaluation of *np would be affected by the
-// assignments in all up to but not including the ith assignment,
-// copy into a temporary during *early and
-// replace *np with that temp.
-// The result of reorder3save MUST be assigned back to n, e.g.
-// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node {
- if !aliased(n, all[:i]) {
- return n
- }
-
- q := ir.Node(typecheck.Temp(n.Type()))
- as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n))
- *early = append(*early, as)
- return q
-}
-
-// Is it possible that the computation of r might be
-// affected by assignments in all?
-func aliased(r ir.Node, all []*ir.AssignStmt) bool {
- if r == nil {
- return false
- }
-
- // Treat all fields of a struct as referring to the whole struct.
- // We could do better but we would have to keep track of the fields.
- for r.Op() == ir.ODOT {
- r = r.(*ir.SelectorExpr).X
- }
-
- // Look for obvious aliasing: a variable being assigned
- // during the all list and appearing in n.
- // Also record whether there are any writes to addressable
- // memory (either main memory or variables whose addresses
- // have been taken).
- memwrite := false
- for _, as := range all {
- // We can ignore assignments to blank.
- if ir.IsBlank(as.X) {
- continue
- }
-
- lv := ir.OuterValue(as.X)
- if lv.Op() != ir.ONAME {
- memwrite = true
- continue
- }
- l := lv.(*ir.Name)
-
- switch l.Class_ {
- default:
- base.Fatalf("unexpected class: %v, %v", l, l.Class_)
-
- case ir.PAUTOHEAP, ir.PEXTERN:
- memwrite = true
- continue
-
- case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
- if l.Name().Addrtaken() {
- memwrite = true
- continue
- }
-
- if refersToName(l, r) {
- // Direct hit: l appears in r.
- return true
- }
- }
- }
-
- // The variables being written do not appear in r.
- // However, r might refer to computed addresses
- // that are being written.
-
- // If no computed addresses are affected by the writes, no aliasing.
- if !memwrite {
- return false
- }
-
- // If r does not refer to any variables whose addresses have been taken,
- // then the only possible writes to r would be directly to the variables,
- // and we checked those above, so no aliasing problems.
- if !anyAddrTaken(r) {
- return false
- }
-
- // Otherwise, both the writes and r refer to computed memory addresses.
- // Assume that they might conflict.
- return true
-}
-
-// anyAddrTaken reports whether the evaluation n,
-// which appears on the left side of an assignment,
-// may refer to variables whose addresses have been taken.
-func anyAddrTaken(n ir.Node) bool {
- return ir.Any(n, func(n ir.Node) bool {
- switch n.Op() {
- case ir.ONAME:
- n := n.(*ir.Name)
- return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken()
-
- case ir.ODOT: // but not ODOTPTR - should have been handled in aliased.
- base.Fatalf("anyAddrTaken unexpected ODOT")
-
- case ir.OADD,
- ir.OAND,
- ir.OANDAND,
- ir.OANDNOT,
- ir.OBITNOT,
- ir.OCONV,
- ir.OCONVIFACE,
- ir.OCONVNOP,
- ir.ODIV,
- ir.ODOTTYPE,
- ir.OLITERAL,
- ir.OLSH,
- ir.OMOD,
- ir.OMUL,
- ir.ONEG,
- ir.ONIL,
- ir.OOR,
- ir.OOROR,
- ir.OPAREN,
- ir.OPLUS,
- ir.ORSH,
- ir.OSUB,
- ir.OXOR:
- return false
- }
- // Be conservative.
- return true
- })
-}
-
-// refersToName reports whether r refers to name.
-func refersToName(name *ir.Name, r ir.Node) bool {
- return ir.Any(r, func(r ir.Node) bool {
- return r.Op() == ir.ONAME && r == name
- })
-}
-
-var stop = errors.New("stop")
-
-// refersToCommonName reports whether any name
-// appears in common between l and r.
-// This is called from sinit.go.
-func refersToCommonName(l ir.Node, r ir.Node) bool {
- if l == nil || r == nil {
- return false
- }
-
- // This could be written elegantly as a Find nested inside a Find:
- //
- // found := ir.Find(l, func(l ir.Node) interface{} {
- // if l.Op() == ir.ONAME {
- // return ir.Find(r, func(r ir.Node) interface{} {
- // if r.Op() == ir.ONAME && l.Name() == r.Name() {
- // return r
- // }
- // return nil
- // })
- // }
- // return nil
- // })
- // return found != nil
- //
- // But that would allocate a new closure for the inner Find
- // for each name found on the left side.
- // It may not matter at all, but the below way of writing it
- // only allocates two closures, not O(|L|) closures.
-
- var doL, doR func(ir.Node) error
- var targetL *ir.Name
- doR = func(r ir.Node) error {
- if r.Op() == ir.ONAME && r.Name() == targetL {
- return stop
- }
- return ir.DoChildren(r, doR)
- }
- doL = func(l ir.Node) error {
- if l.Op() == ir.ONAME {
- l := l.(*ir.Name)
- targetL = l.Name()
- if doR(r) == stop {
- return stop
- }
- }
- return ir.DoChildren(l, doL)
- }
- return doL(l) == stop
-}
-
-// paramstoheap returns code to allocate memory for heap-escaped parameters
-// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []ir.Node {
- var nn []ir.Node
- for _, t := range params.Fields().Slice() {
- v := ir.AsNode(t.Nname)
- if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
- v = nil
- }
- if v == nil {
- continue
- }
-
- if stackcopy := v.Name().Stackcopy; stackcopy != nil {
- nn = append(nn, walkstmt(ir.NewDecl(base.Pos, ir.ODCL, v)))
- if stackcopy.Class_ == ir.PPARAM {
- nn = append(nn, walkstmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy))))
- }
- }
- }
-
- return nn
-}
-
-// zeroResults zeros the return values at the start of the function.
-// We need to do this very early in the function. Defer might stop a
-// panic and show the return values as they exist at the time of
-// panic. For precise stacks, the garbage collector assumes results
-// are always live, so we need to zero them before any allocations,
-// even allocations to move params/results to the heap.
-// The generated code is added to Curfn's Enter list.
-func zeroResults() {
- for _, f := range ir.CurFunc.Type().Results().Fields().Slice() {
- v := ir.AsNode(f.Nname)
- if v != nil && v.Name().Heapaddr != nil {
- // The local which points to the return value is the
- // thing that needs zeroing. This is already handled
- // by a Needzero annotation in plive.go:livenessepilogue.
- continue
- }
- if ir.IsParamHeapCopy(v) {
- // TODO(josharian/khr): Investigate whether we can switch to "continue" here,
- // and document more in either case.
- // In the review of CL 114797, Keith wrote (roughly):
- // I don't think the zeroing below matters.
- // The stack return value will never be marked as live anywhere in the function.
- // It is not written to until deferreturn returns.
- v = v.Name().Stackcopy
- }
- // Zero the stack location containing f.
- ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil))
- }
-}
-
-// returnsfromheap returns code to copy values for heap-escaped parameters
-// back to the stack.
-func returnsfromheap(params *types.Type) []ir.Node {
- var nn []ir.Node
- for _, t := range params.Fields().Slice() {
- v := ir.AsNode(t.Nname)
- if v == nil {
- continue
- }
- if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT {
- nn = append(nn, walkstmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v))))
- }
- }
-
- return nn
-}
-
-// heapmoves generates code to handle migrating heap-escaped parameters
-// between the stack and the heap. The generated code is added to Curfn's
-// Enter and Exit lists.
-func heapmoves() {
- lno := base.Pos
- base.Pos = ir.CurFunc.Pos()
- nn := paramstoheap(ir.CurFunc.Type().Recvs())
- nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...)
- nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...)
- ir.CurFunc.Enter.Append(nn...)
- base.Pos = ir.CurFunc.Endlineno
- ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...)
- base.Pos = lno
-}
-
-func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
- if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
- base.Fatalf("mkcall %v %v", fn, fn.Type())
- }
-
- n := fn.Type().NumParams()
- if n != len(va) {
- base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
- }
-
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
- typecheck.Call(call)
- call.SetType(t)
- return walkexpr(call, init).(*ir.CallExpr)
-}
-
-func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
- return vmkcall(typecheck.LookupRuntime(name), t, init, args)
-}
-
-func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
- return vmkcall(fn, t, init, args)
-}
-
-// byteindex converts n, which is byte-sized, to an int used to index into an array.
-// We cannot use conv, because we allow converting bool to int here,
-// which is forbidden in user code.
-func byteindex(n ir.Node) ir.Node {
- // We cannot convert from bool to int directly.
- // While converting from int8 to int is possible, it would yield
- // the wrong result for negative values.
- // Reinterpreting the value as an unsigned byte solves both cases.
- if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
- n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
- n.SetType(types.Types[types.TUINT8])
- n.SetTypecheck(1)
- }
- n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
- n.SetType(types.Types[types.TINT])
- n.SetTypecheck(1)
- return n
-}
-
-func chanfn(name string, n int, t *types.Type) ir.Node {
- if !t.IsChan() {
- base.Fatalf("chanfn %v", t)
- }
- fn := typecheck.LookupRuntime(name)
- switch n {
- default:
- base.Fatalf("chanfn %d", n)
- case 1:
- fn = typecheck.SubstArgTypes(fn, t.Elem())
- case 2:
- fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
- }
- return fn
-}
-
-func mapfn(name string, t *types.Type) ir.Node {
- if !t.IsMap() {
- base.Fatalf("mapfn %v", t)
- }
- fn := typecheck.LookupRuntime(name)
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
- return fn
-}
-
-func mapfndel(name string, t *types.Type) ir.Node {
- if !t.IsMap() {
- base.Fatalf("mapfn %v", t)
- }
- fn := typecheck.LookupRuntime(name)
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
- return fn
-}
-
-const (
- mapslow = iota
- mapfast32
- mapfast32ptr
- mapfast64
- mapfast64ptr
- mapfaststr
- nmapfast
-)
-
-type mapnames [nmapfast]string
-
-func mkmapnames(base string, ptr string) mapnames {
- return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
-}
-
-var mapaccess1 = mkmapnames("mapaccess1", "")
-var mapaccess2 = mkmapnames("mapaccess2", "")
-var mapassign = mkmapnames("mapassign", "ptr")
-var mapdelete = mkmapnames("mapdelete", "")
-
-func mapfast(t *types.Type) int {
- // Check runtime/map.go:maxElemSize before changing.
- if t.Elem().Width > 128 {
- return mapslow
- }
- switch reflectdata.AlgType(t.Key()) {
- case types.AMEM32:
- if !t.Key().HasPointers() {
- return mapfast32
- }
- if types.PtrSize == 4 {
- return mapfast32ptr
- }
- base.Fatalf("small pointer %v", t.Key())
- case types.AMEM64:
- if !t.Key().HasPointers() {
- return mapfast64
- }
- if types.PtrSize == 8 {
- return mapfast64ptr
- }
- // Two-word object, at least one of which is a pointer.
- // Use the slow path.
- case types.ASTRING:
- return mapfaststr
- }
- return mapslow
-}
-
-func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
- fn := typecheck.LookupRuntime(name)
- fn = typecheck.SubstArgTypes(fn, l, r)
- return fn
-}
-
-func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
- c := len(n.List)
-
- if c < 2 {
- base.Fatalf("addstr count %d too small", c)
- }
-
- buf := typecheck.NodNil()
- if n.Esc() == ir.EscNone {
- sz := int64(0)
- for _, n1 := range n.List {
- if n1.Op() == ir.OLITERAL {
- sz += int64(len(ir.StringVal(n1)))
- }
- }
-
- // Don't allocate the buffer if the result won't fit.
- if sz < tmpstringbufsize {
- // Create temporary buffer for result string on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- buf = typecheck.NodAddr(typecheck.Temp(t))
- }
- }
-
- // build list of string arguments
- args := []ir.Node{buf}
- for _, n2 := range n.List {
- args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING]))
- }
-
- var fn string
- if c <= 5 {
- // small numbers of strings use direct runtime helpers.
- // note: order.expr knows this cutoff too.
- fn = fmt.Sprintf("concatstring%d", c)
- } else {
- // large numbers of strings are passed to the runtime as a slice.
- fn = "concatstrings"
-
- t := types.NewSlice(types.Types[types.TSTRING])
- // args[1:] to skip buf arg
- slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:])
- slice.Prealloc = n.Prealloc
- args = []ir.Node{buf, slice}
- slice.SetEsc(ir.EscNone)
- }
-
- cat := typecheck.LookupRuntime(fn)
- r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil)
- r.Args.Set(args)
- r1 := typecheck.Expr(r)
- r1 = walkexpr(r1, init)
- r1.SetType(n.Type())
-
- return r1
-}
-
-func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
- walkexprlistsafe(n.Args, init)
-
- // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
- // and n are name or literal, but those may index the slice we're
- // modifying here. Fix explicitly.
- ls := n.Args
- for i1, n1 := range ls {
- ls[i1] = cheapexpr(n1, init)
- }
-}
-
-// expand append(l1, l2...) to
-// init {
-// s := l1
-// n := len(s) + len(l2)
-// // Compare as uint so growslice can panic on overflow.
-// if uint(n) > uint(cap(s)) {
-// s = growslice(s, n)
-// }
-// s = s[:n]
-// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-// }
-// s
-//
-// l2 is allowed to be a string.
-func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- walkAppendArgs(n, init)
-
- l1 := n.Args[0]
- l2 := n.Args[1]
- l2 = cheapexpr(l2, init)
- n.Args[1] = l2
-
- var nodes ir.Nodes
-
- // var s []T
- s := typecheck.Temp(l1.Type())
- nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
-
- elemtype := s.Type().Elem()
-
- // n := len(s) + len(l2)
- nn := typecheck.Temp(types.Types[types.TINT])
- nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))))
-
- // if uint(n) > uint(cap(s))
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
- nuint := typecheck.Conv(nn, types.Types[types.TUINT])
- scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)
-
- // instantiate growslice(typ *type, []any, int) []any
- fn := typecheck.LookupRuntime("growslice")
- fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
-
- // s = growslice(T, s, n)
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
- nodes.Append(nif)
-
- // s = s[:n]
- nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
- nt.SetSliceBounds(nil, nn, nil)
- nt.SetBounded(true)
- nodes.Append(ir.NewAssignStmt(base.Pos, s, nt))
-
- var ncopy ir.Node
- if elemtype.HasPointers() {
- // copy(s[len(l1):], l2)
- slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
- slice.SetType(s.Type())
- slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
-
- ir.CurFunc.SetWBPos(n.Pos())
-
- // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
- fn := typecheck.LookupRuntime("typedslicecopy")
- fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
- ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
- ptr2, len2 := backingArrayPtrLen(l2)
- ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
- } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
- // rely on runtime to instrument:
- // copy(s[len(l1):], l2)
- // l2 can be a slice or string.
- slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
- slice.SetType(s.Type())
- slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
-
- ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
- ptr2, len2 := backingArrayPtrLen(l2)
-
- fn := typecheck.LookupRuntime("slicecopy")
- fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
- ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width))
- } else {
- // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
- ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
- ix.SetBounded(true)
- addr := typecheck.NodAddr(ix)
-
- sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2)
-
- nwid := cheapexpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes)
- nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width))
-
- // instantiate func memmove(to *any, frm *any, length uintptr)
- fn := typecheck.LookupRuntime("memmove")
- fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
- ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
- }
- ln := append(nodes, ncopy)
-
- typecheck.Stmts(ln)
- walkstmtlist(ln)
- init.Append(ln...)
- return s
-}
-
-// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
-// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n ir.Node) bool {
- if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
- return false
- }
-
- if n.Typecheck() == 0 {
- base.Fatalf("missing typecheck: %+v", n)
- }
-
- if n.Op() != ir.OAPPEND {
- return false
- }
- call := n.(*ir.CallExpr)
- if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE {
- return false
- }
-
- mk := call.Args[1].(*ir.MakeExpr)
- if mk.Cap != nil {
- return false
- }
-
- // y must be either an integer constant or the largest possible positive value
- // of variable y needs to fit into an uint.
-
- // typecheck made sure that constant arguments to make are not negative and fit into an int.
-
- // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
- y := mk.Len
- if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
- return false
- }
-
- return true
-}
-
-// extendslice rewrites append(l1, make([]T, l2)...) to
-// init {
-// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
-// } else {
-// panicmakeslicelen()
-// }
-// s := l1
-// n := len(s) + l2
-// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
-// // cap is a positive int and n can become negative when len(s) + l2
-// // overflows int. Interpreting n when negative as uint makes it larger
-// // than cap(s). growslice will check the int n arg and panic if n is
-// // negative. This prevents the overflow from being undetected.
-// if uint(n) > uint(cap(s)) {
-// s = growslice(T, s, n)
-// }
-// s = s[:n]
-// lptr := &l1[0]
-// sptr := &s[0]
-// if lptr == sptr || !T.HasPointers() {
-// // growslice did not clear the whole underlying array (or did not get called)
-// hp := &s[len(l1)]
-// hn := l2 * sizeof(T)
-// memclr(hp, hn)
-// }
-// }
-// s
-func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- // isAppendOfMake made sure all possible positive values of l2 fit into an uint.
- // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
- // check of l2 < 0 at runtime which is generated below.
- l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT])
- l2 = typecheck.Expr(l2)
- n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second().
-
- walkAppendArgs(n, init)
-
- l1 := n.Args[0]
- l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs
-
- var nodes []ir.Node
-
- // if l2 >= 0 (likely happens), do nothing
- nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil)
- nifneg.Likely = true
-
- // else panicmakeslicelen()
- nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
- nodes = append(nodes, nifneg)
-
- // s := l1
- s := typecheck.Temp(l1.Type())
- nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
-
- elemtype := s.Type().Elem()
-
- // n := len(s) + l2
- nn := typecheck.Temp(types.Types[types.TINT])
- nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
-
- // if uint(n) > uint(cap(s))
- nuint := typecheck.Conv(nn, types.Types[types.TUINT])
- capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil)
-
- // instantiate growslice(typ *type, old []any, newcap int) []any
- fn := typecheck.LookupRuntime("growslice")
- fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
-
- // s = growslice(T, s, n)
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
- nodes = append(nodes, nif)
-
- // s = s[:n]
- nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
- nt.SetSliceBounds(nil, nn, nil)
- nt.SetBounded(true)
- nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt))
-
- // lptr := &l1[0]
- l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo())
- tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1)
- nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp))
-
- // sptr := &s[0]
- sptr := typecheck.Temp(elemtype.PtrTo())
- tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
- nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp))
-
- // hp := &s[len(l1)]
- ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
- ix.SetBounded(true)
- hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
-
- // hn := l2 * sizeof(elem(s))
- hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR])
-
- clrname := "memclrNoHeapPointers"
- hasPointers := elemtype.HasPointers()
- if hasPointers {
- clrname = "memclrHasPointers"
- ir.CurFunc.SetWBPos(n.Pos())
- }
-
- var clr ir.Nodes
- clrfn := mkcall(clrname, nil, &clr, hp, hn)
- clr.Append(clrfn)
-
- if hasPointers {
- // if l1ptr == sptr
- nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil)
- nifclr.Body = clr
- nodes = append(nodes, nifclr)
- } else {
- nodes = append(nodes, clr...)
- }
-
- typecheck.Stmts(nodes)
- walkstmtlist(nodes)
- init.Append(nodes...)
- return s
-}
-
-// Rewrite append(src, x, y, z) so that any side effects in
-// x, y, z (including runtime panics) are evaluated in
-// initialization statements before the append.
-// For normal code generation, stop there and leave the
-// rest to cgen_append.
-//
-// For race detector, expand append(src, a [, b]* ) to
-//
-// init {
-// s := src
-// const argc = len(args) - 1
-// if cap(s) - len(s) < argc {
-// s = growslice(s, len(s)+argc)
-// }
-// n := len(s)
-// s = s[:n+argc]
-// s[n] = a
-// s[n+1] = b
-// ...
-// }
-// s
-func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
- if !ir.SameSafeExpr(dst, n.Args[0]) {
- n.Args[0] = safeexpr(n.Args[0], init)
- n.Args[0] = walkexpr(n.Args[0], init)
- }
- walkexprlistsafe(n.Args[1:], init)
-
- nsrc := n.Args[0]
-
- // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
- // and n are name or literal, but those may index the slice we're
- // modifying here. Fix explicitly.
- // Using cheapexpr also makes sure that the evaluation
- // of all arguments (and especially any panics) happen
- // before we begin to modify the slice in a visible way.
- ls := n.Args[1:]
- for i, n := range ls {
- n = cheapexpr(n, init)
- if !types.Identical(n.Type(), nsrc.Type().Elem()) {
- n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
- n = walkexpr(n, init)
- }
- ls[i] = n
- }
-
- argc := len(n.Args) - 1
- if argc < 1 {
- return nsrc
- }
-
- // General case, with no function calls left as arguments.
- // Leave for gen, except that instrumentation requires old form.
- if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
- return n
- }
-
- var l []ir.Node
-
- ns := typecheck.Temp(nsrc.Type())
- l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
-
- na := ir.NewInt(int64(argc)) // const argc
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
-
- fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
- fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
-
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
- ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
-
- l = append(l, nif)
-
- nn := typecheck.Temp(types.Types[types.TINT])
- l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
-
- slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc]
- slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil)
- slice.SetBounded(true)
- l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
-
- ls = n.Args[1:]
- for i, n := range ls {
- ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
- ix.SetBounded(true)
- l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
- if i+1 < len(ls) {
- l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
- }
- }
-
- typecheck.Stmts(l)
- walkstmtlist(l)
- init.Append(l...)
- return ns
-}
-
-// Lower copy(a, b) to a memmove call or a runtime call.
-//
-// init {
-// n := len(a)
-// if n > len(b) { n = len(b) }
-// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
-// }
-// n;
-//
-// Also works if b is a string.
-//
-func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
- if n.X.Type().Elem().HasPointers() {
- ir.CurFunc.SetWBPos(n.Pos())
- fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
- n.X = cheapexpr(n.X, init)
- ptrL, lenL := backingArrayPtrLen(n.X)
- n.Y = cheapexpr(n.Y, init)
- ptrR, lenR := backingArrayPtrLen(n.Y)
- return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
- }
-
- if runtimecall {
- // rely on runtime to instrument:
- // copy(n.Left, n.Right)
- // n.Right can be a slice or string.
-
- n.X = cheapexpr(n.X, init)
- ptrL, lenL := backingArrayPtrLen(n.X)
- n.Y = cheapexpr(n.Y, init)
- ptrR, lenR := backingArrayPtrLen(n.Y)
-
- fn := typecheck.LookupRuntime("slicecopy")
- fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
-
- return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width))
- }
-
- n.X = walkexpr(n.X, init)
- n.Y = walkexpr(n.Y, init)
- nl := typecheck.Temp(n.X.Type())
- nr := typecheck.Temp(n.Y.Type())
- var l []ir.Node
- l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
- l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
-
- nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
- nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
-
- nlen := typecheck.Temp(types.Types[types.TINT])
-
- // n = len(to)
- l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
-
- // if n > len(frm) { n = len(frm) }
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
-
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
- nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
- l = append(l, nif)
-
- // if to.ptr != frm.ptr { memmove( ... ) }
- ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
- ne.Likely = true
- l = append(l, ne)
-
- fn := typecheck.LookupRuntime("memmove")
- fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
- nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
- setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
- ne.Body.Append(setwid)
- nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width))
- call := mkcall1(fn, nil, init, nto, nfrm, nwid)
- ne.Body.Append(call)
-
- typecheck.Stmts(l)
- walkstmtlist(l)
- init.Append(l...)
- return nlen
-}
-
-func eqfor(t *types.Type) (n ir.Node, needsize bool) {
- // Should only arrive here with large memory or
- // a struct/array containing a non-memory field/element.
- // Small memory is handled inline, and single non-memory
- // is handled by walkcompare.
- switch a, _ := types.AlgType(t); a {
- case types.AMEM:
- n := typecheck.LookupRuntime("memequal")
- n = typecheck.SubstArgTypes(n, t, t)
- return n, true
- case types.ASPECIAL:
- sym := reflectdata.TypeSymPrefix(".eq", t)
- n := typecheck.NewName(sym)
- ir.MarkFunc(n)
- n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
- ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
- ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
- }, []*ir.Field{
- ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]),
- }))
- return n, false
- }
- base.Fatalf("eqfor %v", t)
- return nil, false
-}
-
-// The result of walkcompare MUST be assigned back to n, e.g.
-// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
- if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
- return walkcompareInterface(n, init)
- }
-
- if n.X.Type().IsString() && n.Y.Type().IsString() {
- return walkcompareString(n, init)
- }
-
- n.X = walkexpr(n.X, init)
- n.Y = walkexpr(n.Y, init)
-
- // Given mixed interface/concrete comparison,
- // rewrite into types-equal && data-equal.
- // This is efficient, avoids allocations, and avoids runtime calls.
- if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
- // Preserve side-effects in case of short-circuiting; see #32187.
- l := cheapexpr(n.X, init)
- r := cheapexpr(n.Y, init)
- // Swap so that l is the interface value and r is the concrete value.
- if n.Y.Type().IsInterface() {
- l, r = r, l
- }
-
- // Handle both == and !=.
- eq := n.Op()
- andor := ir.OOROR
- if eq == ir.OEQ {
- andor = ir.OANDAND
- }
- // Check for types equal.
- // For empty interface, this is:
- // l.tab == type(r)
- // For non-empty interface, this is:
- // l.tab != nil && l.tab._type == type(r)
- var eqtype ir.Node
- tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
- rtyp := reflectdata.TypePtr(r.Type())
- if l.Type().IsEmptyInterface() {
- tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
- tab.SetTypecheck(1)
- eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
- } else {
- nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
- match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
- eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
- }
- // Check for data equal.
- eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
- // Put it all together.
- expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
- return finishcompare(n, expr, init)
- }
-
- // Must be comparison of array or struct.
- // Otherwise back end handles it.
- // While we're here, decide whether to
- // inline or call an eq alg.
- t := n.X.Type()
- var inline bool
-
- maxcmpsize := int64(4)
- unalignedLoad := canMergeLoads()
- if unalignedLoad {
- // Keep this low enough to generate less code than a function call.
- maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
- }
-
- switch t.Kind() {
- default:
- if base.Debug.Libfuzzer != 0 && t.IsInteger() {
- n.X = cheapexpr(n.X, init)
- n.Y = cheapexpr(n.Y, init)
-
- // If exactly one comparison operand is
- // constant, invoke the constcmp functions
- // instead, and arrange for the constant
- // operand to be the first argument.
- l, r := n.X, n.Y
- if r.Op() == ir.OLITERAL {
- l, r = r, l
- }
- constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
-
- var fn string
- var paramType *types.Type
- switch t.Size() {
- case 1:
- fn = "libfuzzerTraceCmp1"
- if constcmp {
- fn = "libfuzzerTraceConstCmp1"
- }
- paramType = types.Types[types.TUINT8]
- case 2:
- fn = "libfuzzerTraceCmp2"
- if constcmp {
- fn = "libfuzzerTraceConstCmp2"
- }
- paramType = types.Types[types.TUINT16]
- case 4:
- fn = "libfuzzerTraceCmp4"
- if constcmp {
- fn = "libfuzzerTraceConstCmp4"
- }
- paramType = types.Types[types.TUINT32]
- case 8:
- fn = "libfuzzerTraceCmp8"
- if constcmp {
- fn = "libfuzzerTraceConstCmp8"
- }
- paramType = types.Types[types.TUINT64]
- default:
- base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
- }
- init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
- }
- return n
- case types.TARRAY:
- // We can compare several elements at once with 2/4/8 byte integer compares
- inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
- case types.TSTRUCT:
- inline = t.NumComponents(types.IgnoreBlankFields) <= 4
- }
-
- cmpl := n.X
- for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
- cmpl = cmpl.(*ir.ConvExpr).X
- }
- cmpr := n.Y
- for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
- cmpr = cmpr.(*ir.ConvExpr).X
- }
-
- // Chose not to inline. Call equality function directly.
- if !inline {
- // eq algs take pointers; cmpl and cmpr must be addressable
- if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) {
- base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
- }
-
- fn, needsize := eqfor(t)
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
- call.Args.Append(typecheck.NodAddr(cmpl))
- call.Args.Append(typecheck.NodAddr(cmpr))
- if needsize {
- call.Args.Append(ir.NewInt(t.Width))
- }
- res := ir.Node(call)
- if n.Op() != ir.OEQ {
- res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
- }
- return finishcompare(n, res, init)
- }
-
- // inline: build boolean expression comparing element by element
- andor := ir.OANDAND
- if n.Op() == ir.ONE {
- andor = ir.OOROR
- }
- var expr ir.Node
- compare := func(el, er ir.Node) {
- a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
- if expr == nil {
- expr = a
- } else {
- expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
- }
- }
- cmpl = safeexpr(cmpl, init)
- cmpr = safeexpr(cmpr, init)
- if t.IsStruct() {
- for _, f := range t.Fields().Slice() {
- sym := f.Sym
- if sym.IsBlank() {
- continue
- }
- compare(
- ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym),
- ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym),
- )
- }
- } else {
- step := int64(1)
- remains := t.NumElem() * t.Elem().Width
- combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
- combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
- combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
- for i := int64(0); remains > 0; {
- var convType *types.Type
- switch {
- case remains >= 8 && combine64bit:
- convType = types.Types[types.TINT64]
- step = 8 / t.Elem().Width
- case remains >= 4 && combine32bit:
- convType = types.Types[types.TUINT32]
- step = 4 / t.Elem().Width
- case remains >= 2 && combine16bit:
- convType = types.Types[types.TUINT16]
- step = 2 / t.Elem().Width
- default:
- step = 1
- }
- if step == 1 {
- compare(
- ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)),
- ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)),
- )
- i++
- remains -= t.Elem().Width
- } else {
- elemType := t.Elem().ToUnsigned()
- cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)))
- cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
- cmplw = typecheck.Conv(cmplw, convType) // widen
- cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)))
- cmprw = typecheck.Conv(cmprw, elemType)
- cmprw = typecheck.Conv(cmprw, convType)
- // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
- // ssa will generate a single large load.
- for offset := int64(1); offset < step; offset++ {
- lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset)))
- lb = typecheck.Conv(lb, elemType)
- lb = typecheck.Conv(lb, convType)
- lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset))
- cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
- rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset)))
- rb = typecheck.Conv(rb, elemType)
- rb = typecheck.Conv(rb, convType)
- rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset))
- cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
- }
- compare(cmplw, cmprw)
- i += step
- remains -= step * t.Elem().Width
- }
- }
- }
- if expr == nil {
- expr = ir.NewBool(n.Op() == ir.OEQ)
- // We still need to use cmpl and cmpr, in case they contain
- // an expression which might panic. See issue 23837.
- t := typecheck.Temp(cmpl.Type())
- a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl))
- a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr))
- init.Append(a1, a2)
- }
- return finishcompare(n, expr, init)
-}
-
-func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
- // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
- n = copyexpr(n, n.Type(), init)
- }
-
- return typecheck.Conv(n, t)
-}
-
-func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
- n.Y = cheapexpr(n.Y, init)
- n.X = cheapexpr(n.X, init)
- eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
- var cmp ir.Node
- if n.Op() == ir.OEQ {
- cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
- } else {
- eqtab.SetOp(ir.ONE)
- cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
- }
- return finishcompare(n, cmp, init)
-}
-
-func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
- // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs ir.Node // const string, non-const string
- switch {
- case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
- // ignore; will be constant evaluated
- case ir.IsConst(n.X, constant.String):
- cs = n.X
- ncs = n.Y
- case ir.IsConst(n.Y, constant.String):
- cs = n.Y
- ncs = n.X
- }
- if cs != nil {
- cmp := n.Op()
- // Our comparison below assumes that the non-constant string
- // is on the left hand side, so rewrite "" cmp x to x cmp "".
- // See issue 24817.
- if ir.IsConst(n.X, constant.String) {
- cmp = brrev(cmp)
- }
-
- // maxRewriteLen was chosen empirically.
- // It is the value that minimizes cmd/go file size
- // across most architectures.
- // See the commit description for CL 26758 for details.
- maxRewriteLen := 6
- // Some architectures can load unaligned byte sequence as 1 word.
- // So we can cover longer strings with the same amount of code.
- canCombineLoads := canMergeLoads()
- combine64bit := false
- if canCombineLoads {
- // Keep this low enough to generate less code than a function call.
- maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
- combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
- }
-
- var and ir.Op
- switch cmp {
- case ir.OEQ:
- and = ir.OANDAND
- case ir.ONE:
- and = ir.OOROR
- default:
- // Don't do byte-wise comparisons for <, <=, etc.
- // They're fairly complicated.
- // Length-only checks are ok, though.
- maxRewriteLen = 0
- }
- if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
- if len(s) > 0 {
- ncs = safeexpr(ncs, init)
- }
- r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s)))))
- remains := len(s)
- for i := 0; remains > 0; {
- if remains == 1 || !canCombineLoads {
- cb := ir.NewInt(int64(s[i]))
- ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i)))
- r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
- remains--
- i++
- continue
- }
- var step int
- var convType *types.Type
- switch {
- case remains >= 8 && combine64bit:
- convType = types.Types[types.TINT64]
- step = 8
- case remains >= 4:
- convType = types.Types[types.TUINT32]
- step = 4
- case remains >= 2:
- convType = types.Types[types.TUINT16]
- step = 2
- }
- ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType)
- csubstr := int64(s[i])
- // Calculate large constant from bytes as sequence of shifts and ors.
- // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
- // ssa will combine this into a single large load.
- for offset := 1; offset < step; offset++ {
- b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType)
- b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset)))
- ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
- csubstr |= int64(s[i+offset]) << uint8(8*offset)
- }
- csubstrPart := ir.NewInt(csubstr)
- // Compare "step" bytes as once
- r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
- remains -= step
- i += step
- }
- return finishcompare(n, r, init)
- }
- }
-
- var r ir.Node
- if n.Op() == ir.OEQ || n.Op() == ir.ONE {
- // prepare for rewrite below
- n.X = cheapexpr(n.X, init)
- n.Y = cheapexpr(n.Y, init)
- eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
- // quick check of len before full compare for == or !=.
- // memequal then tests equality up to length len.
- if n.Op() == ir.OEQ {
- // len(left) == len(right) && memequal(left, right, len)
- r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
- } else {
- // len(left) != len(right) || !memequal(left, right, len)
- eqlen.SetOp(ir.ONE)
- r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
- }
- } else {
- // sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
- r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0))
- }
-
- return finishcompare(n, r, init)
-}
-
-// The result of finishcompare MUST be assigned back to n, e.g.
-// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
- r = typecheck.Expr(r)
- r = typecheck.Conv(r, n.Type())
- r = walkexpr(r, init)
- return r
-}
-
-// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n ir.Node, max int64) bool {
- if n.Type() == nil || !n.Type().IsInteger() {
- return false
- }
-
- sign := n.Type().IsSigned()
- bits := int32(8 * n.Type().Width)
-
- if ir.IsSmallIntConst(n) {
- v := ir.Int64Val(n)
- return 0 <= v && v < max
- }
-
- switch n.Op() {
- case ir.OAND, ir.OANDNOT:
- n := n.(*ir.BinaryExpr)
- v := int64(-1)
- switch {
- case ir.IsSmallIntConst(n.X):
- v = ir.Int64Val(n.X)
- case ir.IsSmallIntConst(n.Y):
- v = ir.Int64Val(n.Y)
- if n.Op() == ir.OANDNOT {
- v = ^v
- if !sign {
- v &= 1<<uint(bits) - 1
- }
- }
- }
- if 0 <= v && v < max {
- return true
- }
-
- case ir.OMOD:
- n := n.(*ir.BinaryExpr)
- if !sign && ir.IsSmallIntConst(n.Y) {
- v := ir.Int64Val(n.Y)
- if 0 <= v && v <= max {
- return true
- }
- }
-
- case ir.ODIV:
- n := n.(*ir.BinaryExpr)
- if !sign && ir.IsSmallIntConst(n.Y) {
- v := ir.Int64Val(n.Y)
- for bits > 0 && v >= 2 {
- bits--
- v >>= 1
- }
- }
-
- case ir.ORSH:
- n := n.(*ir.BinaryExpr)
- if !sign && ir.IsSmallIntConst(n.Y) {
- v := ir.Int64Val(n.Y)
- if v > int64(bits) {
- return true
- }
- bits -= int32(v)
- }
- }
-
- if !sign && bits <= 62 && 1<<uint(bits) <= max {
- return true
- }
-
- return false
-}
-
-// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *ir.CallExpr) {
- t := n.X.Type()
-
- // Looking for either of:
- // Method(int) reflect.Method
- // MethodByName(string) (reflect.Method, bool)
- //
- // TODO(crawshaw): improve precision of match by working out
- // how to check the method name.
- if n := t.NumParams(); n != 1 {
- return
- }
- if n := t.NumResults(); n != 1 && n != 2 {
- return
- }
- p0 := t.Params().Field(0)
- res0 := t.Results().Field(0)
- var res1 *types.Field
- if t.NumResults() == 2 {
- res1 = t.Results().Field(1)
- }
-
- if res1 == nil {
- if p0.Type.Kind() != types.TINT {
- return
- }
- } else {
- if !p0.Type.IsString() {
- return
- }
- if !res1.Type.IsBoolean() {
- return
- }
- }
-
- // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
- // (including global variables such as numImports - was issue #19028).
- // Also need to check for reflect package itself (see Issue #38515).
- if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
- ir.CurFunc.SetReflectMethod(true)
- // The LSym is initialized at this point. We need to set the attribute on the LSym.
- ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
- }
-}
-
-func usefield(n *ir.SelectorExpr) {
- if objabi.Fieldtrack_enabled == 0 {
- return
- }
-
- switch n.Op() {
- default:
- base.Fatalf("usefield %v", n.Op())
-
- case ir.ODOT, ir.ODOTPTR:
- break
- }
- if n.Sel == nil {
- // No field name. This DOTPTR was built by the compiler for access
- // to runtime data structures. Ignore.
- return
- }
-
- t := n.X.Type()
- if t.IsPtr() {
- t = t.Elem()
- }
- field := n.Selection
- if field == nil {
- base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel)
- }
- if field.Sym != n.Sel || field.Offset != n.Offset {
- base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sel, n.Offset)
- }
- if !strings.Contains(field.Note, "go:\"track\"") {
- return
- }
-
- outer := n.X.Type()
- if outer.IsPtr() {
- outer = outer.Elem()
- }
- if outer.Sym() == nil {
- base.Errorf("tracked field must be in named struct type")
- }
- if !types.IsExported(field.Sym.Name) {
- base.Errorf("tracked field must be exported (upper case)")
- }
-
- sym := reflectdata.TrackSym(outer, field)
- if ir.CurFunc.FieldTrack == nil {
- ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{})
- }
- ir.CurFunc.FieldTrack[sym] = struct{}{}
-}
-
-// anySideEffects reports whether n contains any operations that could have observable side effects.
-func anySideEffects(n ir.Node) bool {
- return ir.Any(n, func(n ir.Node) bool {
- switch n.Op() {
- // Assume side effects unless we know otherwise.
- default:
- return true
-
- // No side effects here (arguments are checked separately).
- case ir.ONAME,
- ir.ONONAME,
- ir.OTYPE,
- ir.OPACK,
- ir.OLITERAL,
- ir.ONIL,
- ir.OADD,
- ir.OSUB,
- ir.OOR,
- ir.OXOR,
- ir.OADDSTR,
- ir.OADDR,
- ir.OANDAND,
- ir.OBYTES2STR,
- ir.ORUNES2STR,
- ir.OSTR2BYTES,
- ir.OSTR2RUNES,
- ir.OCAP,
- ir.OCOMPLIT,
- ir.OMAPLIT,
- ir.OSTRUCTLIT,
- ir.OARRAYLIT,
- ir.OSLICELIT,
- ir.OPTRLIT,
- ir.OCONV,
- ir.OCONVIFACE,
- ir.OCONVNOP,
- ir.ODOT,
- ir.OEQ,
- ir.ONE,
- ir.OLT,
- ir.OLE,
- ir.OGT,
- ir.OGE,
- ir.OKEY,
- ir.OSTRUCTKEY,
- ir.OLEN,
- ir.OMUL,
- ir.OLSH,
- ir.ORSH,
- ir.OAND,
- ir.OANDNOT,
- ir.ONEW,
- ir.ONOT,
- ir.OBITNOT,
- ir.OPLUS,
- ir.ONEG,
- ir.OOROR,
- ir.OPAREN,
- ir.ORUNESTR,
- ir.OREAL,
- ir.OIMAG,
- ir.OCOMPLEX:
- return false
-
- // Only possible side effect is division by zero.
- case ir.ODIV, ir.OMOD:
- n := n.(*ir.BinaryExpr)
- if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 {
- return true
- }
-
- // Only possible side effect is panic on invalid size,
- // but many makechan and makemap use size zero, which is definitely OK.
- case ir.OMAKECHAN, ir.OMAKEMAP:
- n := n.(*ir.MakeExpr)
- if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 {
- return true
- }
-
- // Only possible side effect is panic on invalid size.
- // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
- case ir.OMAKESLICE, ir.OMAKESLICECOPY:
- return true
- }
- return false
- })
-}
-
-// Rewrite
-// go builtin(x, y, z)
-// into
-// go func(a1, a2, a3) {
-// builtin(a1, a2, a3)
-// }(x, y, z)
-// for print, println, and delete.
-//
-// Rewrite
-// go f(x, y, uintptr(unsafe.Pointer(z)))
-// into
-// go func(a1, a2, a3) {
-// builtin(a1, a2, uintptr(a3))
-// }(x, y, unsafe.Pointer(z))
-// for function contains unsafe-uintptr arguments.
-
-var wrapCall_prgen int
-
-// The result of wrapCall MUST be assigned back to n, e.g.
-// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- if len(n.Init()) != 0 {
- walkstmtlist(n.Init())
- init.Append(n.PtrInit().Take()...)
- }
-
- isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
-
- // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
- if !isBuiltinCall && n.IsDDD {
- last := len(n.Args) - 1
- if va := n.Args[last]; va.Op() == ir.OSLICELIT {
- va := va.(*ir.CompLitExpr)
- n.Args.Set(append(n.Args[:last], va.List...))
- n.IsDDD = false
- }
- }
-
- // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]ir.Node, len(n.Args))
- var funcArgs []*ir.Field
- for i, arg := range n.Args {
- s := typecheck.LookupNum("a", i)
- if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- origArgs[i] = arg
- arg = arg.(*ir.ConvExpr).X
- n.Args[i] = arg
- }
- funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type()))
- }
- t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
-
- wrapCall_prgen++
- sym := typecheck.LookupNum("wrap·", wrapCall_prgen)
- fn := typecheck.DeclFunc(sym, t)
-
- args := ir.ParamNames(t.Type())
- for i, origArg := range origArgs {
- if origArg == nil {
- continue
- }
- args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
- }
- call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args)
- if !isBuiltinCall {
- call.SetOp(ir.OCALL)
- call.IsDDD = n.IsDDD
- }
- fn.Body = []ir.Node{call}
-
- typecheck.FinishFuncBody()
-
- typecheck.Func(fn)
- typecheck.Stmts(fn.Body)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
- call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args)
- return walkexpr(typecheck.Stmt(call), init)
-}
-
-// canMergeLoads reports whether the backend optimization passes for
-// the current architecture can combine adjacent loads into a single
-// larger, possibly unaligned, load. Note that currently the
-// optimizations must be able to handle little endian byte order.
-func canMergeLoads() bool {
- switch ssagen.Arch.LinkArch.Family {
- case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
- return true
- case sys.PPC64:
- // Load combining only supported on ppc64le.
- return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian
- }
- return false
-}
-
-// isRuneCount reports whether n is of the form len([]rune(string)).
-// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n ir.Node) bool {
- return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
-}
-
-func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node {
- if !n.Type().IsPtr() {
- base.Fatalf("expected pointer type: %v", n.Type())
- }
- elem := n.Type().Elem()
- if count != nil {
- if !elem.IsArray() {
- base.Fatalf("expected array type: %v", elem)
- }
- elem = elem.Elem()
- }
-
- size := elem.Size()
- if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
- return n
- }
-
- if count == nil {
- count = ir.NewInt(1)
- }
-
- n.X = cheapexpr(n.X, init)
- init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
- return n
-}
-
-var walkCheckPtrArithmeticMarker byte
-
-func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
- // Calling cheapexpr(n, init) below leads to a recursive call
- // to walkexpr, which leads us back here again. Use n.Opt to
- // prevent infinite loops.
- if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker {
- return n
- } else if opt != nil {
- // We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
- // there's no guarantee that temporarily replacing it is safe, so just hard fail here.
- base.Fatalf("unexpected Opt: %v", opt)
- }
- n.SetOpt(&walkCheckPtrArithmeticMarker)
- defer n.SetOpt(nil)
-
- // TODO(mdempsky): Make stricter. We only need to exempt
- // reflect.Value.Pointer and reflect.Value.UnsafeAddr.
- switch n.X.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- return n
- }
-
- if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) {
- return n
- }
-
- // Find original unsafe.Pointer operands involved in this
- // arithmetic expression.
- //
- // "It is valid both to add and to subtract offsets from a
- // pointer in this way. It is also valid to use &^ to round
- // pointers, usually for alignment."
- var originals []ir.Node
- var walk func(n ir.Node)
- walk = func(n ir.Node) {
- switch n.Op() {
- case ir.OADD:
- n := n.(*ir.BinaryExpr)
- walk(n.X)
- walk(n.Y)
- case ir.OSUB, ir.OANDNOT:
- n := n.(*ir.BinaryExpr)
- walk(n.X)
- case ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- if n.X.Type().IsUnsafePtr() {
- n.X = cheapexpr(n.X, init)
- originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]))
- }
- }
- }
- walk(n.X)
-
- cheap := cheapexpr(n, init)
-
- slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
- slice.SetEsc(ir.EscNone)
-
- init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
- // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
- // the backing store for multiple calls to checkptrArithmetic.
-
- return cheap
-}
-
-// appendWalkStmt typechecks and walks stmt and then appends it to init.
-func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
- op := stmt.Op()
- n := typecheck.Stmt(stmt)
- if op == ir.OAS || op == ir.OAS2 {
- // If the assignment has side effects, walkexpr will append them
- // directly to init for us, while walkstmt will wrap it in an OBLOCK.
- // We need to append them directly.
- // TODO(rsc): Clean this up.
- n = walkexpr(n, init)
- } else {
- n = walkstmt(n)
- }
- init.Append(n)
-}
-
-// The max number of defers in a function using open-coded defers. We enforce this
-// limit because the deferBits bitmask is currently a single byte (to minimize code size)
-const maxOpenDefers = 8