aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/flagalloc.go
blob: 1aa82a39471c8f6a2672605496fc8c5032e40f62 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package ssa

// flagalloc allocates the flag register among all the flag-generating
// instructions. Flag values are recomputed if they need to be
// spilled/restored.
func flagalloc(f *Func) {
	// Compute the in-register flag value we want at the end of
	// each block. This is basically a best-effort live variable
	// analysis, so it can be much simpler than a full analysis.
	// TODO: do we really need to keep flag values live across blocks?
	// Could we force the flags register to be unused at basic block
	// boundaries?  Then we wouldn't need this computation.
	end := make([]*Value, f.NumBlocks())
	for n := 0; n < 2; n++ {
		// Walk blocks backwards. Poor-man's postorder traversal.
		for i := len(f.Blocks) - 1; i >= 0; i-- {
			b := f.Blocks[i]
			// Walk values backwards to figure out what flag
			// value we want in the flag register at the start
			// of the block.
			flag := end[b.ID]
			if b.Control != nil && b.Control.Type.IsFlags() {
				flag = b.Control
			}
			for j := len(b.Values) - 1; j >= 0; j-- {
				v := b.Values[j]
				if v == flag {
					flag = nil
				}
				if opcodeTable[v.Op].reg.clobbers&f.Config.flagRegMask != 0 {
					flag = nil
				}
				for _, a := range v.Args {
					if a.Type.IsFlags() {
						flag = a
					}
				}
			}
			if flag != nil {
				for _, e := range b.Preds {
					p := e.b
					end[p.ID] = flag
				}
			}
		}
	}

	// For blocks which have a flags control value, that's the only value
	// we can leave in the flags register at the end of the block. (There
	// is no place to put a flag regeneration instruction.)
	for _, b := range f.Blocks {
		v := b.Control
		if v != nil && v.Type.IsFlags() && end[b.ID] != v {
			end[b.ID] = nil
		}
		if b.Kind == BlockDefer {
			// Defer blocks internally use/clobber the flags value.
			end[b.ID] = nil
		}
	}

	// Add flag recomputations where they are needed.
	// TODO: Remove original instructions if they are never used.
	var oldSched []*Value
	for _, b := range f.Blocks {
		oldSched = append(oldSched[:0], b.Values...)
		b.Values = b.Values[:0]
		// The current live flag value the pre-flagalloc copy).
		var flag *Value
		if len(b.Preds) > 0 {
			flag = end[b.Preds[0].b.ID]
			// Note: the following condition depends on the lack of critical edges.
			for _, e := range b.Preds[1:] {
				p := e.b
				if end[p.ID] != flag {
					f.Fatalf("live flag in %s's predecessors not consistent", b)
				}
			}
		}
		for _, v := range oldSched {
			if v.Op == OpPhi && v.Type.IsFlags() {
				f.Fatalf("phi of flags not supported: %s", v.LongString())
			}
			// Make sure any flag arg of v is in the flags register.
			// If not, recompute it.
			for i, a := range v.Args {
				if !a.Type.IsFlags() {
					continue
				}
				if a == flag {
					continue
				}
				// Recalculate a
				c := copyFlags(a, b)
				// Update v.
				v.SetArg(i, c)
				// Remember the most-recently computed flag value.
				flag = a
			}
			// Issue v.
			b.Values = append(b.Values, v)
			if opcodeTable[v.Op].reg.clobbers&f.Config.flagRegMask != 0 {
				flag = nil
			}
			if v.Type.IsFlags() {
				flag = v
			}
		}
		if v := b.Control; v != nil && v != flag && v.Type.IsFlags() {
			// Recalculate control value.
			c := v.copyInto(b)
			b.SetControl(c)
			flag = v
		}
		if v := end[b.ID]; v != nil && v != flag {
			// Need to reissue flag generator for use by
			// subsequent blocks.
			copyFlags(v, b)
			// Note: this flag generator is not properly linked up
			// with the flag users. This breaks the SSA representation.
			// We could fix up the users with another pass, but for now
			// we'll just leave it.  (Regalloc has the same issue for
			// standard regs, and it runs next.)
		}
	}

	// Save live flag state for later.
	for _, b := range f.Blocks {
		b.FlagsLiveAtEnd = end[b.ID] != nil
	}
}

// copyFlags copies v (flag generator) into b, returns the copy.
// If v's arg is also flags, copy recursively.
func copyFlags(v *Value, b *Block) *Value {
	flagsArgs := make(map[int]*Value)
	for i, a := range v.Args {
		if a.Type.IsFlags() || a.Type.IsTuple() {
			flagsArgs[i] = copyFlags(a, b)
		}
	}
	c := v.copyInto(b)
	for i, a := range flagsArgs {
		c.SetArg(i, a)
	}
	return c
}