aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2017-03-18 10:16:03 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2017-03-19 02:03:44 +0000
commit872db7998937b310635a99055e066904425559bb (patch)
treeb560c8840f8b3294956eb72b5f805a25ea874dfc /src
parentaea3aff66911e31cba9eddd93c02eb591ae483bf (diff)
downloadgo-872db7998937b310635a99055e066904425559bb.tar.gz
go-872db7998937b310635a99055e066904425559bb.zip
cmd/compile: add more types to ssa.Types
This reduces the number of calls back into the gc Type routines, which will help performance in a concurrent backend. It also reduces the number of callsites that must be considered in making the transition. Passes toolstash-check -all. No compiler performance changes. Updates #15756 Change-Id: Ic7a8f1daac7e01a21658ae61ac118b2a70804117 Reviewed-on: https://go-review.googlesource.com/38340 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/cmd/compile/internal/gc/ssa.go89
-rw-r--r--src/cmd/compile/internal/ssa/config.go37
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go37
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules8
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec.rules24
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules8
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go16
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go44
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go16
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go2
10 files changed, 151 insertions, 130 deletions
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 5946da9f8c..3275dd852f 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -23,21 +23,28 @@ var ssaCache *ssa.Cache
func initssaconfig() {
types := ssa.Types{
- Bool: Types[TBOOL],
- Int8: Types[TINT8],
- Int16: Types[TINT16],
- Int32: Types[TINT32],
- Int64: Types[TINT64],
- UInt8: Types[TUINT8],
- UInt16: Types[TUINT16],
- UInt32: Types[TUINT32],
- UInt64: Types[TUINT64],
- Float32: Types[TFLOAT32],
- Float64: Types[TFLOAT64],
- Int: Types[TINT],
- Uintptr: Types[TUINTPTR],
- String: Types[TSTRING],
- BytePtr: ptrto(Types[TUINT8]),
+ Bool: Types[TBOOL],
+ Int8: Types[TINT8],
+ Int16: Types[TINT16],
+ Int32: Types[TINT32],
+ Int64: Types[TINT64],
+ UInt8: Types[TUINT8],
+ UInt16: Types[TUINT16],
+ UInt32: Types[TUINT32],
+ UInt64: Types[TUINT64],
+ Float32: Types[TFLOAT32],
+ Float64: Types[TFLOAT64],
+ Int: Types[TINT],
+ Uintptr: Types[TUINTPTR],
+ String: Types[TSTRING],
+ BytePtr: ptrto(Types[TUINT8]),
+ Int32Ptr: ptrto(Types[TINT32]),
+ UInt32Ptr: ptrto(Types[TUINT32]),
+ IntPtr: ptrto(Types[TINT]),
+ UintptrPtr: ptrto(Types[TUINTPTR]),
+ Float32Ptr: ptrto(Types[TFLOAT32]),
+ Float64Ptr: ptrto(Types[TFLOAT64]),
+ BytePtrPtr: ptrto(ptrto(Types[TUINT8])),
}
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types, Ctxt, Debug['N'] == 0)
if thearch.LinkArch.Name == "386" {
@@ -1344,12 +1351,12 @@ func (s *state) expr(n *Node) *ssa.Value {
switch n.Op {
case OARRAYBYTESTRTMP:
slice := s.expr(n.Left)
- ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice)
+ ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
case OSTRARRAYBYTETMP:
str := s.expr(n.Left)
- ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), str)
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, Types[TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
case OCFUNC:
@@ -1914,7 +1921,7 @@ func (s *state) expr(n *Node) *ssa.Value {
len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
s.boundsCheck(i, len)
}
- ptrtyp := ptrto(Types[TUINT8])
+ ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
@@ -2139,7 +2146,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
// Tell liveness we're about to build a new slice
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
}
- capaddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), int64(array_cap), addr)
+ capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], capaddr, r[2], s.mem())
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, pt, addr, r[0], s.mem())
// load the value we just stored to avoid having to spill it
@@ -2160,7 +2167,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if inplace {
l = s.variable(&lenVar, Types[TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
- lenaddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), int64(array_nel), addr)
+ lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenaddr, nl, s.mem())
}
@@ -2515,7 +2522,7 @@ func init() {
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
slice := args[0]
- ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice)
+ ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
},
@@ -2523,7 +2530,7 @@ func init() {
}
add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), args[0])
+ data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem())
return nil
},
@@ -2569,9 +2576,9 @@ func init() {
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem())
+ v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(s.f.Config.Types.BytePtr, ssa.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v)
+ return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
@@ -2995,7 +3002,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
s.nilCheck(itab)
}
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
- itab = s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), itabidx, itab)
+ itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
} else {
@@ -3018,7 +3025,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
if k != callNormal {
argStart += int64(2 * Widthptr)
}
- addr := s.constOffPtrSP(ptrto(Types[TUINTPTR]), argStart)
+ addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], addr, rcvr, s.mem())
}
@@ -3027,9 +3034,9 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
// Write argsize and closure (args to Newproc/Deferproc).
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(Types[TUINT32], int32(stksize))
- addr := s.constOffPtrSP(ptrto(Types[TUINT32]), argStart)
+ addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINT32], addr, argsize, s.mem())
- addr = s.constOffPtrSP(ptrto(Types[TUINTPTR]), argStart+int64(Widthptr))
+ addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
@@ -3188,7 +3195,7 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case OCLOSUREVAR:
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
- s.entryNewValue0(ssa.OpGetClosurePtr, ptrto(Types[TUINT8])))
+ s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
case OCONVNOP:
addr := s.addr(n.Left, bounded)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
@@ -3465,22 +3472,22 @@ func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask)
return
}
len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
- lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
- lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
- capAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), 2*s.config.IntSize, left)
+ capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.IntSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
- itab := s.newValue1(ssa.OpITab, ptrto(Types[TUINT8]), right)
+ itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
@@ -3505,16 +3512,16 @@ func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
case t.IsString():
- ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, ptrto(Types[TUINT8]), left, ptr, s.mem())
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsSlice():
- ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, ptrto(Types[TUINT8]), left, ptr, s.mem())
+ ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
- idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right)
- idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(ptrto(Types[TUINT8])), s.config.PtrSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, ptrto(Types[TUINT8]), idataAddr, idata, s.mem())
+ idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
+ idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
@@ -3948,7 +3955,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Ty
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left) // input interface
target := s.expr(typename(n.Type)) // target type
- byteptr := ptrto(Types[TUINT8])
+ byteptr := s.f.Config.Types.BytePtr
if n.Type.IsInterface() {
if n.Type.IsEmptyInterface() {
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 7c49abba92..ab541ccafc 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -46,21 +46,28 @@ type (
)
type Types struct {
- Bool Type
- Int8 Type
- Int16 Type
- Int32 Type
- Int64 Type
- UInt8 Type
- UInt16 Type
- UInt32 Type
- UInt64 Type
- Int Type
- Float32 Type
- Float64 Type
- Uintptr Type
- String Type
- BytePtr Type // TODO: use unsafe.Pointer instead?
+ Bool Type
+ Int8 Type
+ Int16 Type
+ Int32 Type
+ Int64 Type
+ UInt8 Type
+ UInt16 Type
+ UInt32 Type
+ UInt64 Type
+ Int Type
+ Float32 Type
+ Float64 Type
+ Uintptr Type
+ String Type
+ BytePtr Type // TODO: use unsafe.Pointer instead?
+ Int32Ptr Type
+ UInt32Ptr Type
+ IntPtr Type
+ UintptrPtr Type
+ Float32Ptr Type
+ Float64Ptr Type
+ BytePtrPtr Type
}
type Logger interface {
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index b04702d279..33e0ffb416 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -102,21 +102,28 @@ func (d DummyFrontend) Debug_checknil() bool { ret
func (d DummyFrontend) Debug_wb() bool { return false }
var dummyTypes = Types{
- Bool: TypeBool,
- Int8: TypeInt8,
- Int16: TypeInt16,
- Int32: TypeInt32,
- Int64: TypeInt64,
- UInt8: TypeUInt8,
- UInt16: TypeUInt16,
- UInt32: TypeUInt32,
- UInt64: TypeUInt64,
- Float32: TypeFloat32,
- Float64: TypeFloat64,
- Int: TypeInt64,
- Uintptr: TypeUInt64,
- String: nil,
- BytePtr: TypeBytePtr,
+ Bool: TypeBool,
+ Int8: TypeInt8,
+ Int16: TypeInt16,
+ Int32: TypeInt32,
+ Int64: TypeInt64,
+ UInt8: TypeUInt8,
+ UInt16: TypeUInt16,
+ UInt32: TypeUInt32,
+ UInt64: TypeUInt64,
+ Float32: TypeFloat32,
+ Float64: TypeFloat64,
+ Int: TypeInt64,
+ Uintptr: TypeUInt64,
+ String: nil,
+ BytePtr: TypeBytePtr,
+ Int32Ptr: TypeInt32.PtrTo(),
+ UInt32Ptr: TypeUInt32.PtrTo(),
+ IntPtr: TypeInt64.PtrTo(),
+ UintptrPtr: TypeUInt64.PtrTo(),
+ Float32Ptr: TypeFloat32.PtrTo(),
+ Float64Ptr: TypeFloat64.PtrTo(),
+ BytePtrPtr: TypeBytePtr.PtrTo(),
}
func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index f1ece56474..fe6f182ca4 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -393,14 +393,14 @@
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
(AtomicOr8 ptr val mem) && !config.BigEndian ->
- (LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
+ (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
(SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3] ptr))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
(AtomicAnd8 ptr val mem) && !config.BigEndian ->
- (LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
+ (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
(OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3] ptr)))
@@ -411,7 +411,7 @@
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
(AtomicOr8 ptr val mem) && config.BigEndian ->
- (LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
+ (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
(SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3]
@@ -419,7 +419,7 @@
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
(AtomicAnd8 ptr val mem) && config.BigEndian ->
- (LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
+ (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
(OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3]
diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules
index 08935d92b4..377edba724 100644
--- a/src/cmd/compile/internal/ssa/gen/dec.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec.rules
@@ -15,24 +15,24 @@
(ComplexMake
(Load <types.Float32> ptr mem)
(Load <types.Float32>
- (OffPtr <types.Float32.PtrTo()> [4] ptr)
+ (OffPtr <types.Float32Ptr> [4] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
(Store {types.Float32}
- (OffPtr <types.Float32.PtrTo()> [4] dst)
+ (OffPtr <types.Float32Ptr> [4] dst)
imag
(Store {types.Float32} dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
(Load <types.Float64> ptr mem)
(Load <types.Float64>
- (OffPtr <types.Float64.PtrTo()> [8] ptr)
+ (OffPtr <types.Float64Ptr> [8] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
(Store {types.Float64}
- (OffPtr <types.Float64.PtrTo()> [8] dst)
+ (OffPtr <types.Float64Ptr> [8] dst)
imag
(Store {types.Float64} dst real mem))
@@ -44,11 +44,11 @@
(StringMake
(Load <types.BytePtr> ptr mem)
(Load <types.Int>
- (OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr)
+ (OffPtr <types.IntPtr> [config.PtrSize] ptr)
mem))
(Store dst (StringMake ptr len) mem) ->
(Store {types.Int}
- (OffPtr <types.Int.PtrTo()> [config.PtrSize] dst)
+ (OffPtr <types.IntPtr> [config.PtrSize] dst)
len
(Store {types.BytePtr} dst ptr mem))
@@ -61,17 +61,17 @@
(SliceMake
(Load <t.ElemType().PtrTo()> ptr mem)
(Load <types.Int>
- (OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr)
+ (OffPtr <types.IntPtr> [config.PtrSize] ptr)
mem)
(Load <types.Int>
- (OffPtr <types.Int.PtrTo()> [2*config.PtrSize] ptr)
+ (OffPtr <types.IntPtr> [2*config.PtrSize] ptr)
mem))
(Store dst (SliceMake ptr len cap) mem) ->
(Store {types.Int}
- (OffPtr <types.Int.PtrTo()> [2*config.PtrSize] dst)
+ (OffPtr <types.IntPtr> [2*config.PtrSize] dst)
cap
(Store {types.Int}
- (OffPtr <types.Int.PtrTo()> [config.PtrSize] dst)
+ (OffPtr <types.IntPtr> [config.PtrSize] dst)
len
(Store {types.BytePtr} dst ptr mem)))
@@ -83,10 +83,10 @@
(IMake
(Load <types.BytePtr> ptr mem)
(Load <types.BytePtr>
- (OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] ptr)
+ (OffPtr <types.BytePtrPtr> [config.PtrSize] ptr)
mem))
(Store dst (IMake itab data) mem) ->
(Store {types.BytePtr}
- (OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] dst)
+ (OffPtr <types.BytePtrPtr> [config.PtrSize] dst)
data
(Store {types.Uintptr} dst itab mem))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
index 19f9755b40..ea7b95165f 100644
--- a/src/cmd/compile/internal/ssa/gen/dec64.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -12,23 +12,23 @@
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() ->
(Int64Make
- (Load <types.Int32> (OffPtr <types.Int32.PtrTo()> [4] ptr) mem)
+ (Load <types.Int32> (OffPtr <types.Int32Ptr> [4] ptr) mem)
(Load <types.UInt32> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() ->
(Int64Make
- (Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem)
+ (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem)
(Load <types.UInt32> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() ->
(Int64Make
(Load <types.Int32> ptr mem)
- (Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem))
+ (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() ->
(Int64Make
(Load <types.UInt32> ptr mem)
- (Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem))
+ (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && !config.BigEndian ->
(Store {hi.Type}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index a555c58e58..5e70fc4f73 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -688,7 +688,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
_ = types
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -697,7 +697,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
@@ -740,7 +740,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -749,7 +749,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
@@ -862,7 +862,7 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
_ = types
// match: (AtomicOr8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) mem)
+ // result: (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -871,7 +871,7 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
@@ -894,7 +894,7 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
}
// match: (AtomicOr8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) mem)
+ // result: (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -903,7 +903,7 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
index 3946dca922..2782316c7e 100644
--- a/src/cmd/compile/internal/ssa/rewritedec.go
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -112,7 +112,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
_ = types
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 8
- // result: (ComplexMake (Load <types.Float32> ptr mem) (Load <types.Float32> (OffPtr <types.Float32.PtrTo()> [4] ptr) mem) )
+ // result: (ComplexMake (Load <types.Float32> ptr mem) (Load <types.Float32> (OffPtr <types.Float32Ptr> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
@@ -126,7 +126,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.Float32)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float32.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -136,7 +136,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 16
- // result: (ComplexMake (Load <types.Float64> ptr mem) (Load <types.Float64> (OffPtr <types.Float64.PtrTo()> [8] ptr) mem) )
+ // result: (ComplexMake (Load <types.Float64> ptr mem) (Load <types.Float64> (OffPtr <types.Float64Ptr> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
@@ -150,7 +150,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.Float64)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float64.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float64Ptr)
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -160,7 +160,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsString()
- // result: (StringMake (Load <types.BytePtr> ptr mem) (Load <types.Int> (OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr) mem))
+ // result: (StringMake (Load <types.BytePtr> ptr mem) (Load <types.Int> (OffPtr <types.IntPtr> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -174,7 +174,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.Int)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -184,7 +184,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsSlice()
- // result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <types.Int> (OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr) mem) (Load <types.Int> (OffPtr <types.Int.PtrTo()> [2*config.PtrSize] ptr) mem))
+ // result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <types.Int> (OffPtr <types.IntPtr> [config.PtrSize] ptr) mem) (Load <types.Int> (OffPtr <types.IntPtr> [2*config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -198,14 +198,14 @@ func rewriteValuedec_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.Int)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpLoad, types.Int)
- v4 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
+ v4 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
@@ -215,7 +215,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsInterface()
- // result: (IMake (Load <types.BytePtr> ptr mem) (Load <types.BytePtr> (OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] ptr) mem))
+ // result: (IMake (Load <types.BytePtr> ptr mem) (Load <types.BytePtr> (OffPtr <types.BytePtrPtr> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -229,7 +229,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtr.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtrPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -299,7 +299,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
_ = types
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.(Type).Size() == 8
- // result: (Store {types.Float32} (OffPtr <types.Float32.PtrTo()> [4] dst) imag (Store {types.Float32} dst real mem))
+ // result: (Store {types.Float32} (OffPtr <types.Float32Ptr> [4] dst) imag (Store {types.Float32} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
@@ -315,7 +315,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
}
v.reset(OpStore)
v.Aux = types.Float32
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float32.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float32Ptr)
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
@@ -330,7 +330,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
}
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.(Type).Size() == 16
- // result: (Store {types.Float64} (OffPtr <types.Float64.PtrTo()> [8] dst) imag (Store {types.Float64} dst real mem))
+ // result: (Store {types.Float64} (OffPtr <types.Float64Ptr> [8] dst) imag (Store {types.Float64} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
@@ -346,7 +346,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
}
v.reset(OpStore)
v.Aux = types.Float64
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float64.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float64Ptr)
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
@@ -361,7 +361,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
}
// match: (Store dst (StringMake ptr len) mem)
// cond:
- // result: (Store {types.Int} (OffPtr <types.Int.PtrTo()> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem))
+ // result: (Store {types.Int} (OffPtr <types.IntPtr> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
@@ -373,7 +373,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
mem := v.Args[2]
v.reset(OpStore)
v.Aux = types.Int
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
@@ -388,7 +388,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
}
// match: (Store dst (SliceMake ptr len cap) mem)
// cond:
- // result: (Store {types.Int} (OffPtr <types.Int.PtrTo()> [2*config.PtrSize] dst) cap (Store {types.Int} (OffPtr <types.Int.PtrTo()> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem)))
+ // result: (Store {types.Int} (OffPtr <types.IntPtr> [2*config.PtrSize] dst) cap (Store {types.Int} (OffPtr <types.IntPtr> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem)))
for {
dst := v.Args[0]
v_1 := v.Args[1]
@@ -401,14 +401,14 @@ func rewriteValuedec_OpStore(v *Value) bool {
mem := v.Args[2]
v.reset(OpStore)
v.Aux = types.Int
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
v1.Aux = types.Int
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
@@ -424,7 +424,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
}
// match: (Store dst (IMake itab data) mem)
// cond:
- // result: (Store {types.BytePtr} (OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] dst) data (Store {types.Uintptr} dst itab mem))
+ // result: (Store {types.BytePtr} (OffPtr <types.BytePtrPtr> [config.PtrSize] dst) data (Store {types.Uintptr} dst itab mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
@@ -436,7 +436,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
mem := v.Args[2]
v.reset(OpStore)
v.Aux = types.BytePtr
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtr.PtrTo())
+ v0 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtrPtr)
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index 610018c9b6..9e7802d431 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -847,7 +847,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
_ = types
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
- // result: (Int64Make (Load <types.Int32> (OffPtr <types.Int32.PtrTo()> [4] ptr) mem) (Load <types.UInt32> ptr mem))
+ // result: (Int64Make (Load <types.Int32> (OffPtr <types.Int32Ptr> [4] ptr) mem) (Load <types.UInt32> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -857,7 +857,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpLoad, types.Int32)
- v1 := b.NewValue0(v.Pos, OpOffPtr, types.Int32.PtrTo())
+ v1 := b.NewValue0(v.Pos, OpOffPtr, types.Int32Ptr)
v1.AuxInt = 4
v1.AddArg(ptr)
v0.AddArg(v1)
@@ -871,7 +871,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
- // result: (Int64Make (Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem) (Load <types.UInt32> ptr mem))
+ // result: (Int64Make (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem) (Load <types.UInt32> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -881,7 +881,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32.PtrTo())
+ v1 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32Ptr)
v1.AuxInt = 4
v1.AddArg(ptr)
v0.AddArg(v1)
@@ -895,7 +895,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
- // result: (Int64Make (Load <types.Int32> ptr mem) (Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem))
+ // result: (Int64Make (Load <types.Int32> ptr mem) (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -909,7 +909,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -919,7 +919,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
- // result: (Int64Make (Load <types.UInt32> ptr mem) (Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem))
+ // result: (Int64Make (Load <types.UInt32> ptr mem) (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -933,7 +933,7 @@ func rewriteValuedec64_OpLoad(v *Value) bool {
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32.PtrTo())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index 850a4b886d..a8eac9f5eb 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -94,7 +94,7 @@ func writebarrier(f *Func) {
sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr)
}
wbsym := &ExternSymbol{Typ: f.Config.Types.Bool, Sym: f.fe.Syslook("writeBarrier")}
- wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32.PtrTo(), wbsym, sb)
+ wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
writebarrierptr = f.fe.Syslook("writebarrierptr")
typedmemmove = f.fe.Syslook("typedmemmove")
typedmemclr = f.fe.Syslook("typedmemclr")