aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/map_fast64.go
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2019-04-22 13:37:08 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2019-04-30 18:18:12 +0000
commit73cb9a1cb3d9d31a8276aaa2aebfb8b35509e05c (patch)
tree2811c612c1eb11bc686626310ee50b4a4be6709e /src/runtime/map_fast64.go
parenta8d0047e473510db1b1a5e35c03fdf41a13b5733 (diff)
downloadgo-73cb9a1cb3d9d31a8276aaa2aebfb8b35509e05c.tar.gz
go-73cb9a1cb3d9d31a8276aaa2aebfb8b35509e05c.zip
all: refer to map elements as elements instead of values
The spec carefully and consistently uses "key" and "element" as map terminology. The implementation, not so much. This change attempts to make the implementation consistently hew to the spec's terminology. Beyond consistency, this has the advantage of avoid some confusion and naming collisions, since v and value are very generic and commonly used terms. I believe that I found all everything, but there are a lot of non-obvious places for these to hide, and grepping for them is hard. Hopefully this change changes enough of them that we will start using elem going forward. Any remaining hidden cases can be removed ad hoc as they are discovered. The only externally-facing part of this change is in package reflect, where there is a minor doc change and a function parameter name change. Updates #27167 Change-Id: I2f2d78f16c360dc39007b9966d5c2046a29d3701 Reviewed-on: https://go-review.googlesource.com/c/go/+/174523 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Diffstat (limited to 'src/runtime/map_fast64.go')
-rw-r--r--src/runtime/map_fast64.go38
1 files changed, 19 insertions, 19 deletions
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index 03115197f3..4d420e7477 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -42,7 +42,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
}
}
}
@@ -82,7 +82,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
}
}
}
@@ -171,12 +171,12 @@ bucketloop:
h.count++
done:
- val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
- return val
+ return elem
}
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
@@ -261,12 +261,12 @@ bucketloop:
h.count++
done:
- val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
- return val
+ return elem
}
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
@@ -302,11 +302,11 @@ search:
if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
- memclrHasPointers(v, t.elem.size)
+ memclrHasPointers(e, t.elem.size)
} else {
- memclrNoHeapPointers(v, t.elem.size)
+ memclrNoHeapPointers(e, t.elem.size)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
@@ -373,7 +373,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.v = add(x.k, bucketCnt*8)
+ x.e = add(x.k, bucketCnt*8)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@@ -381,13 +381,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.v = add(y.k, bucketCnt*8)
+ y.e = add(y.k, bucketCnt*8)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- v := add(k, bucketCnt*8)
- for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
+ e := add(k, bucketCnt*8)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@@ -399,7 +399,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
- // to send this key/value to bucket x or bucket y).
+ // to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
@@ -413,7 +413,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.v = add(dst.k, bucketCnt*8)
+ dst.e = add(dst.k, bucketCnt*8)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
@@ -431,17 +431,17 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
*(*uint64)(dst.k) = *(*uint64)(k)
}
- typedmemmove(t.elem, dst.v, v)
+ typedmemmove(t.elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
- // key or value arrays. That's ok, as we have the overflow pointer
+ // key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 8)
- dst.v = add(dst.v, uintptr(t.valuesize))
+ dst.e = add(dst.e, uintptr(t.elemsize))
}
}
- // Unlink the overflow buckets & clear key/value to help GC.
+ // Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation