aboutsummaryrefslogtreecommitdiff
path: root/src/reflect
diff options
context:
space:
mode:
Diffstat (limited to 'src/reflect')
-rw-r--r--src/reflect/abi.go42
-rw-r--r--src/reflect/all_test.go133
-rw-r--r--src/reflect/deepequal.go21
-rw-r--r--src/reflect/float32reg_generic.go21
-rw-r--r--src/reflect/value.go79
5 files changed, 271 insertions, 25 deletions
diff --git a/src/reflect/abi.go b/src/reflect/abi.go
index 9ddde3ae57..2ce7ca2615 100644
--- a/src/reflect/abi.go
+++ b/src/reflect/abi.go
@@ -467,3 +467,45 @@ func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
out.stackBytes -= retOffset
return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, inRegPtrs, outRegPtrs}
}
+
+// intFromReg loads an argSize sized integer from reg and places it at to.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+func intFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
+ memmove(to, r.IntRegArgAddr(reg, argSize), argSize)
+}
+
+// intToReg loads an argSize sized integer and stores it into reg.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+func intToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
+ memmove(r.IntRegArgAddr(reg, argSize), from, argSize)
+}
+
+// floatFromReg loads a float value from its register representation in r.
+//
+// argSize must be 4 or 8.
+func floatFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
+ switch argSize {
+ case 4:
+ *(*float32)(to) = archFloat32FromReg(r.Floats[reg])
+ case 8:
+ *(*float64)(to) = *(*float64)(unsafe.Pointer(&r.Floats[reg]))
+ default:
+ panic("bad argSize")
+ }
+}
+
+// floatToReg stores a float value in its register representation in r.
+//
+// argSize must be either 4 or 8.
+func floatToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
+ switch argSize {
+ case 4:
+ r.Floats[reg] = archFloat32ToReg(*(*float32)(from))
+ case 8:
+ r.Floats[reg] = *(*uint64)(from)
+ default:
+ panic("bad argSize")
+ }
+}
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 293d036f67..22885c548f 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -905,6 +905,9 @@ var deepEqualTests = []DeepEqualTest{
{error(nil), error(nil), true},
{map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true},
{fn1, fn2, true},
+ {[]byte{1, 2, 3}, []byte{1, 2, 3}, true},
+ {[]MyByte{1, 2, 3}, []MyByte{1, 2, 3}, true},
+ {MyBytes{1, 2, 3}, MyBytes{1, 2, 3}, true},
// Inequalities
{1, 2, false},
@@ -925,6 +928,9 @@ var deepEqualTests = []DeepEqualTest{
{fn1, fn3, false},
{fn3, fn3, false},
{[][]int{{1}}, [][]int{{2}}, false},
+ {&structWithSelfPtr{p: &structWithSelfPtr{s: "a"}}, &structWithSelfPtr{p: &structWithSelfPtr{s: "b"}}, false},
+
+ // Fun with floating point.
{math.NaN(), math.NaN(), false},
{&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
{&[1]float64{math.NaN()}, self{}, true},
@@ -932,7 +938,6 @@ var deepEqualTests = []DeepEqualTest{
{[]float64{math.NaN()}, self{}, true},
{map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
{map[float64]float64{math.NaN(): 1}, self{}, true},
- {&structWithSelfPtr{p: &structWithSelfPtr{s: "a"}}, &structWithSelfPtr{p: &structWithSelfPtr{s: "b"}}, false},
// Nil vs empty: not the same.
{[]int{}, []int(nil), false},
@@ -950,6 +955,9 @@ var deepEqualTests = []DeepEqualTest{
{&[3]interface{}{1, 2, 4}, &[3]interface{}{1, 2, "s"}, false},
{Basic{1, 0.5}, NotBasic{1, 0.5}, false},
{map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false},
+ {[]byte{1, 2, 3}, []MyByte{1, 2, 3}, false},
+ {[]MyByte{1, 2, 3}, MyBytes{1, 2, 3}, false},
+ {[]byte{1, 2, 3}, MyBytes{1, 2, 3}, false},
// Possible loops.
{&loop1, &loop1, true},
@@ -1049,6 +1057,82 @@ func TestDeepEqualUnexportedMap(t *testing.T) {
}
}
+var deepEqualPerfTests = []struct {
+ x, y interface{}
+}{
+ {x: int8(99), y: int8(99)},
+ {x: []int8{99}, y: []int8{99}},
+ {x: int16(99), y: int16(99)},
+ {x: []int16{99}, y: []int16{99}},
+ {x: int32(99), y: int32(99)},
+ {x: []int32{99}, y: []int32{99}},
+ {x: int64(99), y: int64(99)},
+ {x: []int64{99}, y: []int64{99}},
+ {x: int(999999), y: int(999999)},
+ {x: []int{999999}, y: []int{999999}},
+
+ {x: uint8(99), y: uint8(99)},
+ {x: []uint8{99}, y: []uint8{99}},
+ {x: uint16(99), y: uint16(99)},
+ {x: []uint16{99}, y: []uint16{99}},
+ {x: uint32(99), y: uint32(99)},
+ {x: []uint32{99}, y: []uint32{99}},
+ {x: uint64(99), y: uint64(99)},
+ {x: []uint64{99}, y: []uint64{99}},
+ {x: uint(999999), y: uint(999999)},
+ {x: []uint{999999}, y: []uint{999999}},
+ {x: uintptr(999999), y: uintptr(999999)},
+ {x: []uintptr{999999}, y: []uintptr{999999}},
+
+ {x: float32(1.414), y: float32(1.414)},
+ {x: []float32{1.414}, y: []float32{1.414}},
+ {x: float64(1.414), y: float64(1.414)},
+ {x: []float64{1.414}, y: []float64{1.414}},
+
+ {x: complex64(1.414), y: complex64(1.414)},
+ {x: []complex64{1.414}, y: []complex64{1.414}},
+ {x: complex128(1.414), y: complex128(1.414)},
+ {x: []complex128{1.414}, y: []complex128{1.414}},
+
+ {x: true, y: true},
+ {x: []bool{true}, y: []bool{true}},
+
+ {x: "abcdef", y: "abcdef"},
+ {x: []string{"abcdef"}, y: []string{"abcdef"}},
+
+ {x: []byte("abcdef"), y: []byte("abcdef")},
+ {x: [][]byte{[]byte("abcdef")}, y: [][]byte{[]byte("abcdef")}},
+
+ {x: [6]byte{'a', 'b', 'c', 'a', 'b', 'c'}, y: [6]byte{'a', 'b', 'c', 'a', 'b', 'c'}},
+ {x: [][6]byte{[6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}, y: [][6]byte{[6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}},
+}
+
+func TestDeepEqualAllocs(t *testing.T) {
+ for _, tt := range deepEqualPerfTests {
+ t.Run(ValueOf(tt.x).Type().String(), func(t *testing.T) {
+ got := testing.AllocsPerRun(100, func() {
+ if !DeepEqual(tt.x, tt.y) {
+ t.Errorf("DeepEqual(%v, %v)=false", tt.x, tt.y)
+ }
+ })
+ if int(got) != 0 {
+ t.Errorf("DeepEqual(%v, %v) allocated %d times", tt.x, tt.y, int(got))
+ }
+ })
+ }
+}
+
+func BenchmarkDeepEqual(b *testing.B) {
+ for _, bb := range deepEqualPerfTests {
+ b.Run(ValueOf(bb.x).Type().String(), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ sink = DeepEqual(bb.x, bb.y)
+ }
+ })
+ }
+}
+
func check2ndField(x interface{}, offs uintptr, t *testing.T) {
s := ValueOf(x)
f := s.Type().Field(1)
@@ -7050,6 +7134,53 @@ func BenchmarkNew(b *testing.B) {
})
}
+func BenchmarkMap(b *testing.B) {
+ type V *int
+ value := ValueOf((V)(nil))
+ stringKeys := []string{}
+ mapOfStrings := map[string]V{}
+ uint64Keys := []uint64{}
+ mapOfUint64s := map[uint64]V{}
+ for i := 0; i < 100; i++ {
+ stringKey := fmt.Sprintf("key%d", i)
+ stringKeys = append(stringKeys, stringKey)
+ mapOfStrings[stringKey] = nil
+
+ uint64Key := uint64(i)
+ uint64Keys = append(uint64Keys, uint64Key)
+ mapOfUint64s[uint64Key] = nil
+ }
+
+ tests := []struct {
+ label string
+ m, keys, value Value
+ }{
+ {"StringKeys", ValueOf(mapOfStrings), ValueOf(stringKeys), value},
+ {"Uint64Keys", ValueOf(mapOfUint64s), ValueOf(uint64Keys), value},
+ }
+
+ for _, tt := range tests {
+ b.Run(tt.label, func(b *testing.B) {
+ b.Run("MapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := tt.keys.Len() - 1; j >= 0; j-- {
+ tt.m.MapIndex(tt.keys.Index(j))
+ }
+ }
+ })
+ b.Run("SetMapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := tt.keys.Len() - 1; j >= 0; j-- {
+ tt.m.SetMapIndex(tt.keys.Index(j), tt.value)
+ }
+ }
+ })
+ })
+ }
+}
+
func TestSwapper(t *testing.T) {
type I int
var a, b, c I
diff --git a/src/reflect/deepequal.go b/src/reflect/deepequal.go
index d951d8d999..94174dec04 100644
--- a/src/reflect/deepequal.go
+++ b/src/reflect/deepequal.go
@@ -6,7 +6,10 @@
package reflect
-import "unsafe"
+import (
+ "internal/bytealg"
+ "unsafe"
+)
// During deepValueEqual, must keep track of checks that are
// in progress. The comparison algorithm assumes that all
@@ -102,6 +105,10 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
if v1.Pointer() == v2.Pointer() {
return true
}
+ // Special case for []byte, which is common.
+ if v1.Type().Elem().Kind() == Uint8 {
+ return bytealg.Equal(v1.Bytes(), v2.Bytes())
+ }
for i := 0; i < v1.Len(); i++ {
if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
return false
@@ -149,6 +156,18 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
}
// Can't do better than this:
return false
+ case Int, Int8, Int16, Int32, Int64:
+ return v1.Int() == v2.Int()
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v1.Uint() == v2.Uint()
+ case String:
+ return v1.String() == v2.String()
+ case Bool:
+ return v1.Bool() == v2.Bool()
+ case Float32, Float64:
+ return v1.Float() == v2.Float()
+ case Complex64, Complex128:
+ return v1.Complex() == v2.Complex()
default:
// Normal equality suffices
return valueInterface(v1, false) == valueInterface(v2, false)
diff --git a/src/reflect/float32reg_generic.go b/src/reflect/float32reg_generic.go
new file mode 100644
index 0000000000..381d458057
--- /dev/null
+++ b/src/reflect/float32reg_generic.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import "unsafe"
+
+// This file implements a straightforward conversion of a float32
+// value into its representation in a register. This conversion
+// applies for amd64 and arm64. It is also chosen for the case of
+// zero argument registers, but is not used.
+
+func archFloat32FromReg(reg uint64) float32 {
+ i := uint32(reg)
+ return *(*float32)(unsafe.Pointer(&i))
+}
+
+func archFloat32ToReg(val float32) uint64 {
+ return uint64(*(*uint32)(unsafe.Pointer(&val)))
+}
diff --git a/src/reflect/value.go b/src/reflect/value.go
index 3c2172135e..33b81d7209 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -508,7 +508,7 @@ func (v Value) call(op string, in []Value) []Value {
// Copy values to "integer registers."
if v.flag&flagIndir != 0 {
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regArgs.IntRegArgAddr(st.ireg, st.size), offset, st.size)
+ intToReg(&regArgs, st.ireg, st.size, offset)
} else {
if st.kind == abiStepPointer {
// Duplicate this pointer in the pointer area of the
@@ -524,7 +524,7 @@ func (v Value) call(op string, in []Value) []Value {
panic("attempted to copy pointer to FP register")
}
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regArgs.FloatRegArgAddr(st.freg, st.size), offset, st.size)
+ floatToReg(&regArgs, st.freg, st.size, offset)
default:
panic("unknown ABI part kind")
}
@@ -610,13 +610,13 @@ func (v Value) call(op string, in []Value) []Value {
switch st.kind {
case abiStepIntReg:
offset := add(s, st.offset, "precomputed value offset")
- memmove(offset, regArgs.IntRegArgAddr(st.ireg, st.size), st.size)
+ intFromReg(&regArgs, st.ireg, st.size, offset)
case abiStepPointer:
s := add(s, st.offset, "precomputed value offset")
*((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
case abiStepFloatReg:
offset := add(s, st.offset, "precomputed value offset")
- memmove(offset, regArgs.FloatRegArgAddr(st.freg, st.size), st.size)
+ floatFromReg(&regArgs, st.freg, st.size, offset)
case abiStepStack:
panic("register-based return value has stack component")
default:
@@ -698,13 +698,13 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs
switch st.kind {
case abiStepIntReg:
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(offset, regs.IntRegArgAddr(st.ireg, st.size), st.size)
+ intFromReg(regs, st.ireg, st.size, offset)
case abiStepPointer:
s := add(v.ptr, st.offset, "precomputed value offset")
*((*unsafe.Pointer)(s)) = regs.Ptrs[st.ireg]
case abiStepFloatReg:
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(offset, regs.FloatRegArgAddr(st.freg, st.size), st.size)
+ floatFromReg(regs, st.freg, st.size, offset)
case abiStepStack:
panic("register-based return value has stack component")
default:
@@ -784,7 +784,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs
// Copy values to "integer registers."
if v.flag&flagIndir != 0 {
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regs.IntRegArgAddr(st.ireg, st.size), offset, st.size)
+ intToReg(regs, st.ireg, st.size, offset)
} else {
// Only populate the Ints space on the return path.
// This is safe because out is kept alive until the
@@ -799,7 +799,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs
panic("attempted to copy pointer to FP register")
}
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regs.FloatRegArgAddr(st.freg, st.size), offset, st.size)
+ floatToReg(regs, st.freg, st.size, offset)
default:
panic("unknown ABI part kind")
}
@@ -982,9 +982,9 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a
methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
fallthrough // We need to make sure this ends up in Ints, too.
case abiStepIntReg:
- memmove(methodRegs.IntRegArgAddr(mStep.ireg, mStep.size), from, mStep.size)
+ intToReg(&methodRegs, mStep.ireg, mStep.size, from)
case abiStepFloatReg:
- memmove(methodRegs.FloatRegArgAddr(mStep.freg, mStep.size), from, mStep.size)
+ floatToReg(&methodRegs, mStep.freg, mStep.size, from)
default:
panic("unexpected method step")
}
@@ -1000,9 +1000,9 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a
// Do the pointer copy directly so we get a write barrier.
*(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
case abiStepIntReg:
- memmove(to, valueRegs.IntRegArgAddr(vStep.ireg, vStep.size), vStep.size)
+ intFromReg(valueRegs, vStep.ireg, vStep.size, to)
case abiStepFloatReg:
- memmove(to, valueRegs.FloatRegArgAddr(vStep.freg, vStep.size), vStep.size)
+ floatFromReg(valueRegs, vStep.freg, vStep.size, to)
default:
panic("unexpected value step")
}
@@ -1515,15 +1515,21 @@ func (v Value) MapIndex(key Value) Value {
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
- key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
- var k unsafe.Pointer
- if key.flag&flagIndir != 0 {
- k = key.ptr
+ var e unsafe.Pointer
+ if key.kind() == String && tt.key.Kind() == String && tt.elem.size <= maxValSize {
+ k := *(*string)(key.ptr)
+ e = mapaccess_faststr(v.typ, v.pointer(), k)
} else {
- k = unsafe.Pointer(&key.ptr)
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ e = mapaccess(v.typ, v.pointer(), k)
}
- e := mapaccess(v.typ, v.pointer(), k)
if e == nil {
return Value{}
}
@@ -2121,6 +2127,25 @@ func (v Value) SetMapIndex(key, elem Value) {
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ if key.kind() == String && tt.key.Kind() == String && tt.elem.size <= maxValSize {
+ k := *(*string)(key.ptr)
+ if elem.typ == nil {
+ mapdelete_faststr(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign_faststr(v.typ, v.pointer(), k, e)
+ return
+ }
+
key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
@@ -2915,8 +2940,7 @@ func (v Value) CanConvert(t Type) bool {
// from slice to pointer-to-array.
if vt.Kind() == Slice && t.Kind() == Ptr && t.Elem().Kind() == Array {
n := t.Elem().Len()
- h := (*unsafeheader.Slice)(v.ptr)
- if n > h.Len {
+ if n > v.Len() {
return false
}
}
@@ -3183,10 +3207,10 @@ func cvtStringRunes(v Value, t Type) Value {
// convertOp: []T -> *[N]T
func cvtSliceArrayPtr(v Value, t Type) Value {
n := t.Elem().Len()
- h := (*unsafeheader.Slice)(v.ptr)
- if n > h.Len {
- panic("reflect: cannot convert slice with length " + itoa.Itoa(h.Len) + " to pointer to array with length " + itoa.Itoa(n))
+ if n > v.Len() {
+ panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to pointer to array with length " + itoa.Itoa(n))
}
+ h := (*unsafeheader.Slice)(v.ptr)
return Value{t.common(), h.Data, v.flag&^(flagIndir|flagAddr|flagKindMask) | flag(Ptr)}
}
@@ -3253,12 +3277,21 @@ func makemap(t *rtype, cap int) (m unsafe.Pointer)
func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
//go:noescape
+func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
+
+//go:noescape
func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
//go:noescape
+func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
+
+//go:noescape
func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
//go:noescape
+func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
+
+//go:noescape
func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
//go:noescape