aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/stackalloc.go
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2015-08-24 02:16:19 -0700
committerKeith Randall <khr@golang.org>2015-09-05 06:41:05 +0000
commitd2107fc98724662c7aa343d8004ed9d391fdeb59 (patch)
treec5791bf69022fb5ca145ca63d23c17b8e49ab38c /src/cmd/compile/internal/ssa/stackalloc.go
parentd052bbd051a76dcfcbc0a0f471072166a9d07d20 (diff)
downloadgo-d2107fc98724662c7aa343d8004ed9d391fdeb59.tar.gz
go-d2107fc98724662c7aa343d8004ed9d391fdeb59.zip
[dev.ssa] cmd/runtime: generate gc bitmaps for SSA-compiled code
This change is all about leveraging the gc bitmap generation that is already done by the current compiler. We rearrange how stack allocation is done so that we generate a variable declaration for each spill. We also reorganize how args/locals are recorded during SSA. Then we can use the existing allocauto/defframe to allocate the stack frame and liveness to make the gc bitmaps. With this change, stack copying works correctly and we no longer need hacks in runtime/stack*.go to make tests work. GC is close to working, it just needs write barriers. Change-Id: I990fb4e3fbe98850c6be35c3185a1c85d9e1a6ba Reviewed-on: https://go-review.googlesource.com/13894 Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
Diffstat (limited to 'src/cmd/compile/internal/ssa/stackalloc.go')
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go46
1 files changed, 5 insertions, 41 deletions
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 626fb8f369..d60f8d1df2 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -18,22 +18,6 @@ func setloc(home []Location, v *Value, loc Location) []Location {
func stackalloc(f *Func) {
home := f.RegAlloc
- // Start with space for callee arguments/returns.
- var n int64
- for _, b := range f.Blocks {
- if b.Kind != BlockCall {
- continue
- }
- v := b.Control
- if n < v.AuxInt {
- n = v.AuxInt
- }
- }
- f.Logf("stackalloc: 0-%d for callee arguments/returns\n", n)
-
- // TODO: group variables by ptr/nonptr, size, etc. Emit ptr vars last
- // so stackmap is smaller.
-
// Assign stack locations to phis first, because we
// must also assign the same locations to the phi stores
// introduced during regalloc.
@@ -49,10 +33,9 @@ func stackalloc(f *Func) {
continue // register-based phi
}
// stack-based phi
- n = align(n, v.Type.Alignment())
- f.Logf("stackalloc: %d-%d for %v\n", n, n+v.Type.Size(), v)
+ n := f.Config.fe.Auto(v.Type)
+ f.Logf("stackalloc: %s: for %v <%v>\n", n, v, v.Type)
loc := &LocalSlot{n}
- n += v.Type.Size()
home = setloc(home, v, loc)
for _, w := range v.Args {
if w.Op != OpStoreReg {
@@ -79,34 +62,15 @@ func stackalloc(f *Func) {
if len(v.Args) == 1 && (v.Args[0].Op == OpSP || v.Args[0].Op == OpSB) {
continue
}
- n = align(n, v.Type.Alignment())
- f.Logf("stackalloc: %d-%d for %v\n", n, n+v.Type.Size(), v)
+
+ n := f.Config.fe.Auto(v.Type)
+ f.Logf("stackalloc: %s for %v\n", n, v)
loc := &LocalSlot{n}
- n += v.Type.Size()
home = setloc(home, v, loc)
}
}
- // Finally, allocate space for all autos that we used
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- s, ok := v.Aux.(*AutoSymbol)
- if !ok || s.Offset >= 0 {
- continue
- }
- t := s.Typ
- n = align(n, t.Alignment())
- f.Logf("stackalloc: %d-%d for auto %v\n", n, n+t.Size(), v)
- s.Offset = n
- n += t.Size()
- }
- }
-
- n = align(n, f.Config.PtrSize)
- f.Logf("stackalloc: %d-%d for return address\n", n, n+f.Config.PtrSize)
- n += f.Config.PtrSize // space for return address. TODO: arch-dependent
f.RegAlloc = home
- f.FrameSize = n
// TODO: share stack slots among noninterfering (& gc type compatible) values
}