aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/map.go
diff options
context:
space:
mode:
authorMartin Möhrmann <moehrmann@google.com>2018-04-27 21:58:59 +0200
committerMartin Möhrmann <moehrmann@google.com>2018-05-08 21:15:16 +0000
commitaee71dd70b3779c66950ce6a952deca13d48e55e (patch)
treeba5e077b2478755d5a423c9898c23b9bd3819cdd /src/runtime/map.go
parentcd1976dbef0a4a474cccb9e7fcb1e9cf2310df66 (diff)
downloadgo-aee71dd70b3779c66950ce6a952deca13d48e55e.tar.gz
go-aee71dd70b3779c66950ce6a952deca13d48e55e.zip
cmd/compile: optimize map-clearing range idiom
replace map clears of the form: for k := range m { delete(m, k) } (where m is map with key type that is reflexive for ==) with a new runtime function that clears the maps backing array with a memclr and reinitializes the hmap struct. Map key types that for example contain floats are not replaced by this optimization since NaN keys cannot be deleted from maps using delete. name old time/op new time/op delta GoMapClear/Reflexive/1 92.2ns ± 1% 47.1ns ± 2% -48.89% (p=0.000 n=9+9) GoMapClear/Reflexive/10 108ns ± 1% 48ns ± 2% -55.68% (p=0.000 n=10+10) GoMapClear/Reflexive/100 303ns ± 2% 110ns ± 3% -63.56% (p=0.000 n=10+10) GoMapClear/Reflexive/1000 3.58µs ± 3% 1.23µs ± 2% -65.49% (p=0.000 n=9+10) GoMapClear/Reflexive/10000 28.2µs ± 3% 10.3µs ± 2% -63.55% (p=0.000 n=9+10) GoMapClear/NonReflexive/1 121ns ± 2% 124ns ± 7% ~ (p=0.097 n=10+10) GoMapClear/NonReflexive/10 137ns ± 2% 139ns ± 3% +1.53% (p=0.033 n=10+10) GoMapClear/NonReflexive/100 331ns ± 3% 334ns ± 2% ~ (p=0.342 n=10+10) GoMapClear/NonReflexive/1000 3.64µs ± 3% 3.64µs ± 2% ~ (p=0.887 n=9+10) GoMapClear/NonReflexive/10000 28.1µs ± 2% 28.4µs ± 3% ~ (p=0.247 n=10+10) Fixes #20138 Change-Id: I181332a8ef434a4f0d89659f492d8711db3f3213 Reviewed-on: https://go-review.googlesource.com/110055 Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime/map.go')
-rw-r--r--src/runtime/map.go122
1 files changed, 94 insertions, 28 deletions
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 1926123458..cc1358a977 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -318,7 +318,7 @@ func makemap(t *maptype, hint int, h *hmap) *hmap {
// If hint is large zeroing this memory could take a while.
if h.B != 0 {
var nextOverflow *bmap
- h.buckets, nextOverflow = makeBucketArray(t, h.B)
+ h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
if nextOverflow != nil {
h.extra = new(mapextra)
h.extra.nextOverflow = nextOverflow
@@ -328,6 +328,57 @@ func makemap(t *maptype, hint int, h *hmap) *hmap {
return h
}
+// makeBucketArray initializes a backing array for map buckets.
+// 1<<b is the minimum number of buckets to allocate.
+// dirtyalloc should either be nil or a bucket array previously
+// allocated by makeBucketArray with the same t and b parameters.
+// If dirtyalloc is nil a new backing array will be alloced and
+// otherwise dirtyalloc will be cleared and reused as backing array.
+func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
+ base := bucketShift(b)
+ nbuckets := base
+ // For small b, overflow buckets are unlikely.
+ // Avoid the overhead of the calculation.
+ if b >= 4 {
+ // Add on the estimated number of overflow buckets
+ // required to insert the median number of elements
+ // used with this value of b.
+ nbuckets += bucketShift(b - 4)
+ sz := t.bucket.size * nbuckets
+ up := roundupsize(sz)
+ if up != sz {
+ nbuckets = up / t.bucket.size
+ }
+ }
+
+ if dirtyalloc == nil {
+ buckets = newarray(t.bucket, int(nbuckets))
+ } else {
+ // dirtyalloc was previously generated by
+ // the above newarray(t.bucket, int(nbuckets))
+ // but may not be empty.
+ buckets = dirtyalloc
+ size := t.bucket.size * nbuckets
+ if t.bucket.kind&kindNoPointers == 0 {
+ memclrHasPointers(buckets, size)
+ } else {
+ memclrNoHeapPointers(buckets, size)
+ }
+ }
+
+ if base != nbuckets {
+ // We preallocated some overflow buckets.
+ // To keep the overhead of tracking these overflow buckets to a minimum,
+ // we use the convention that if a preallocated overflow bucket's overflow
+ // pointer is nil, then there are more available by bumping the pointer.
+ // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
+ nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
+ last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
+ last.setoverflow(t, (*bmap)(buckets))
+ }
+ return buckets, nextOverflow
+}
+
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the value type if
// the key is not in the map.
@@ -855,34 +906,49 @@ next:
goto next
}
-func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
- base := bucketShift(b)
- nbuckets := base
- // For small b, overflow buckets are unlikely.
- // Avoid the overhead of the calculation.
- if b >= 4 {
- // Add on the estimated number of overflow buckets
- // required to insert the median number of elements
- // used with this value of b.
- nbuckets += bucketShift(b - 4)
- sz := t.bucket.size * nbuckets
- up := roundupsize(sz)
- if up != sz {
- nbuckets = up / t.bucket.size
- }
+// mapclear deletes all keys from a map.
+func mapclear(t *maptype, h *hmap) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ pc := funcPC(mapclear)
+ racewritepc(unsafe.Pointer(h), callerpc, pc)
}
- buckets = newarray(t.bucket, int(nbuckets))
- if base != nbuckets {
- // We preallocated some overflow buckets.
- // To keep the overhead of tracking these overflow buckets to a minimum,
- // we use the convention that if a preallocated overflow bucket's overflow
- // pointer is nil, then there are more available by bumping the pointer.
- // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
- nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
- last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
- last.setoverflow(t, (*bmap)(buckets))
+
+ if h == nil || h.count == 0 {
+ return
}
- return buckets, nextOverflow
+
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+
+ h.flags |= hashWriting
+
+ h.flags &^= sameSizeGrow
+ h.oldbuckets = nil
+ h.nevacuate = 0
+ h.noverflow = 0
+ h.count = 0
+
+ // Keep the mapextra allocation but clear any extra information.
+ if h.extra != nil {
+ *h.extra = mapextra{}
+ }
+
+ // makeBucketArray clears the memory pointed to by h.buckets
+ // and recovers any overflow buckets by generating them
+ // as if h.buckets was newly alloced.
+ _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
+ if nextOverflow != nil {
+ // If overflow buckets are created then h.extra
+ // will have been allocated during initial bucket creation.
+ h.extra.nextOverflow = nextOverflow
+ }
+
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
}
func hashGrow(t *maptype, h *hmap) {
@@ -895,7 +961,7 @@ func hashGrow(t *maptype, h *hmap) {
h.flags |= sameSizeGrow
}
oldbuckets := h.buckets
- newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
+ newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
flags := h.flags &^ (iterator | oldIterator)
if h.flags&iterator != 0 {