aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/gc/plive.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/gc/plive.go')
-rw-r--r--src/cmd/compile/internal/gc/plive.go134
1 files changed, 35 insertions, 99 deletions
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index 7e1c0c1a95..09e5487f60 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -140,24 +140,14 @@ type Liveness struct {
regMaps []liveRegMask
cache progeffectscache
-
- // These are only populated if open-coded defers are being used.
- // List of vars/stack slots storing defer args
- openDeferVars []openDeferVarInfo
- // Map from defer arg OpVarDef to the block where the OpVarDef occurs.
- openDeferVardefToBlockMap map[*Node]*ssa.Block
- // Map of blocks that cannot reach a return or exit (panic)
- nonReturnBlocks map[*ssa.Block]bool
-}
-
-type openDeferVarInfo struct {
- n *Node // Var/stack slot storing a defer arg
- varsIndex int // Index of variable in lv.vars
}
// LivenessMap maps from *ssa.Value to LivenessIndex.
type LivenessMap struct {
vals map[ssa.ID]LivenessIndex
+ // The set of live, pointer-containing variables at the deferreturn
+ // call (only set when open-coded defers are used).
+ deferreturn LivenessIndex
}
func (m *LivenessMap) reset() {
@@ -168,6 +158,7 @@ func (m *LivenessMap) reset() {
delete(m.vals, k)
}
}
+ m.deferreturn = LivenessInvalid
}
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
@@ -268,7 +259,7 @@ func (v *varRegVec) AndNot(v1, v2 varRegVec) {
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool {
- return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type)
+ return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@@ -445,7 +436,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
case ssa.LocalSlot:
return mask
case *ssa.Register:
- if ptrOnly && !v.Type.HasHeapPointer() {
+ if ptrOnly && !v.Type.HasPointers() {
return mask
}
regs[0] = loc
@@ -460,7 +451,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
if loc1 == nil {
continue
}
- if ptrOnly && !v.Type.FieldType(i).HasHeapPointer() {
+ if ptrOnly && !v.Type.FieldType(i).HasPointers() {
continue
}
regs[nreg] = loc1.(*ssa.Register)
@@ -542,7 +533,7 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
- lv.livenessMap = LivenessMap{lc.livenessMap.vals}
+ lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessInvalid}
lc.livenessMap.vals = nil
}
if lv.be == nil {
@@ -577,13 +568,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
+ if !t.HasPointers() {
+ // Note: this case ensures that pointers to go:notinheap types
+ // are not considered pointers by garbage collection and stack copying.
+ return
+ }
switch t.Etype {
- case TINT8, TUINT8, TINT16, TUINT16,
- TINT32, TUINT32, TINT64, TUINT64,
- TINT, TUINT, TUINTPTR, TBOOL,
- TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
-
case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
@@ -893,58 +884,12 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
func (lv *Liveness) prologue() {
lv.initcache()
- if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() {
- lv.openDeferVardefToBlockMap = make(map[*Node]*ssa.Block)
- for i, n := range lv.vars {
- if n.Name.OpenDeferSlot() {
- lv.openDeferVars = append(lv.openDeferVars, openDeferVarInfo{n: n, varsIndex: i})
- }
- }
-
- // Find any blocks that cannot reach a return or a BlockExit
- // (panic) -- these must be because of an infinite loop.
- reachesRet := make(map[ssa.ID]bool)
- blockList := make([]*ssa.Block, 0, 256)
-
- for _, b := range lv.f.Blocks {
- if b.Kind == ssa.BlockRet || b.Kind == ssa.BlockRetJmp || b.Kind == ssa.BlockExit {
- blockList = append(blockList, b)
- }
- }
-
- for len(blockList) > 0 {
- b := blockList[0]
- blockList = blockList[1:]
- if reachesRet[b.ID] {
- continue
- }
- reachesRet[b.ID] = true
- for _, e := range b.Preds {
- blockList = append(blockList, e.Block())
- }
- }
-
- lv.nonReturnBlocks = make(map[*ssa.Block]bool)
- for _, b := range lv.f.Blocks {
- if !reachesRet[b.ID] {
- lv.nonReturnBlocks[b] = true
- //fmt.Println("No reach ret", lv.f.Name, b.ID, b.Kind)
- }
- }
- }
-
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
// Walk the block instructions backward and update the block
// effects with the each prog effects.
for j := len(b.Values) - 1; j >= 0; j-- {
- if b.Values[j].Op == ssa.OpVarDef {
- n := b.Values[j].Aux.(*Node)
- if n.Name.OpenDeferSlot() {
- lv.openDeferVardefToBlockMap[n] = b
- }
- }
pos, e := lv.valueEffects(b.Values[j])
regUevar, regKill := lv.regEffects(b.Values[j])
if e&varkill != 0 {
@@ -961,20 +906,6 @@ func (lv *Liveness) prologue() {
}
}
-// markDeferVarsLive marks each variable storing an open-coded defer arg as
-// specially live in block b if the variable definition dominates block b.
-func (lv *Liveness) markDeferVarsLive(b *ssa.Block, newliveout *varRegVec) {
- // Only force computation of dominators if we have a block where we need
- // to specially mark defer args live.
- sdom := lv.f.Sdom()
- for _, info := range lv.openDeferVars {
- defB := lv.openDeferVardefToBlockMap[info.n]
- if sdom.IsAncestorEq(defB, b) {
- newliveout.vars.Set(int32(info.varsIndex))
- }
- }
-}
-
// Solve the liveness dataflow equations.
func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
@@ -1018,23 +949,6 @@ func (lv *Liveness) solve() {
}
}
- if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() &&
- (b.Kind == ssa.BlockExit || lv.nonReturnBlocks[b]) {
- // Open-coded defer args slots must be live
- // everywhere in a function, since a panic can
- // occur (almost) anywhere. Force all appropriate
- // defer arg slots to be live in BlockExit (panic)
- // blocks and in blocks that do not reach a return
- // (because of infinite loop).
- //
- // We are assuming that the defer exit code at
- // BlockReturn/BlockReturnJmp accesses all of the
- // defer args (with pointers), and so keeps them
- // live. This analysis may have to be adjusted if
- // that changes (because of optimizations).
- lv.markDeferVarsLive(b, &newliveout)
- }
-
if !be.liveout.Eq(newliveout) {
change = true
be.liveout.Copy(newliveout)
@@ -1087,6 +1001,17 @@ func (lv *Liveness) epilogue() {
n.Name.SetNeedzero(true)
livedefer.Set(int32(i))
}
+ if n.Name.OpenDeferSlot() {
+ // Open-coded defer args slots must be live
+ // everywhere in a function, since a panic can
+ // occur (almost) anywhere. Because it is live
+ // everywhere, it must be zeroed on entry.
+ livedefer.Set(int32(i))
+ // It was already marked as Needzero when created.
+ if !n.Name.Needzero() {
+ Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ }
+ }
}
}
@@ -1188,6 +1113,17 @@ func (lv *Liveness) epilogue() {
lv.compact(b)
}
+ // If we have an open-coded deferreturn call, make a liveness map for it.
+ if lv.fn.Func.OpenCodedDeferDisallowed() {
+ lv.livenessMap.deferreturn = LivenessInvalid
+ } else {
+ lv.livenessMap.deferreturn = LivenessIndex{
+ stackMapIndex: lv.stackMapSet.add(livedefer),
+ regMapIndex: 0, // entry regMap, containing no live registers
+ isUnsafePoint: false,
+ }
+ }
+
// Done compacting. Throw out the stack map set.
lv.stackMaps = lv.stackMapSet.extractUniqe()
lv.stackMapSet = bvecSet{}