diff options
author | Russ Cox <rsc@golang.org> | 2015-04-28 00:28:47 -0400 |
---|---|---|
committer | Russ Cox <rsc@golang.org> | 2015-05-11 14:43:33 +0000 |
commit | 6d8a147bef8ee28eb647db21ea91ecb823fa2480 (patch) | |
tree | 1c14bd4162ef484aa775232d3e5abc7a8a16774e /src/runtime/mbarrier.go | |
parent | 7d9e16abc6bea2eb12d718b578f91328af99586a (diff) | |
download | go-6d8a147bef8ee28eb647db21ea91ecb823fa2480.tar.gz go-6d8a147bef8ee28eb647db21ea91ecb823fa2480.zip |
runtime: use 1-bit pointer bitmaps in type representation
The type information in reflect.Type and the GC programs is now
1 bit per word, down from 2 bits.
The in-memory unrolled type bitmap representation are now
1 bit per word, down from 4 bits.
The conversion from the unrolled (now 1-bit) bitmap to the
heap bitmap (still 4-bit) is not optimized. A followup CL will
work on that, after the heap bitmap has been converted to 2-bit.
The typeDead optimization, in which a special value denotes
that there are no more pointers anywhere in the object, is lost
in this CL. A followup CL will bring it back in the final form of
heapBitsSetType.
Change-Id: If61e67950c16a293b0b516a6fd9a1c755b6d5549
Reviewed-on: https://go-review.googlesource.com/9702
Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mbarrier.go')
-rw-r--r-- | src/runtime/mbarrier.go | 86 |
1 files changed, 48 insertions, 38 deletions
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index eb5881707b..4162483ade 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -223,29 +223,25 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) { } systemstack(func() { - mask := typeBitmapInHeapBitmapFormat(typ) + dst := dst // make local copies + src := src nptr := typ.size / ptrSize - for i := uintptr(0); i < nptr; i += 2 { - bits := mask[i/2] - if (bits>>2)&typeMask == typePointer { - writebarrierptr((*uintptr)(dst), *(*uintptr)(src)) - } else { - *(*uintptr)(dst) = *(*uintptr)(src) - } - // TODO(rsc): The noescape calls should be unnecessary. - dst = add(noescape(dst), ptrSize) - src = add(noescape(src), ptrSize) - if i+1 == nptr { - break - } - bits >>= 4 - if (bits>>2)&typeMask == typePointer { - writebarrierptr((*uintptr)(dst), *(*uintptr)(src)) - } else { - *(*uintptr)(dst) = *(*uintptr)(src) + i := uintptr(0) + Copy: + for _, bits := range ptrBitmapForType(typ) { + for j := 0; j < 8; j++ { + if bits&1 != 0 { + writebarrierptr((*uintptr)(dst), *(*uintptr)(src)) + } else { + *(*uintptr)(dst) = *(*uintptr)(src) + } + if i++; i >= nptr { + break Copy + } + dst = add(dst, ptrSize) + src = add(src, ptrSize) + bits >>= 1 } - dst = add(noescape(dst), ptrSize) - src = add(noescape(src), ptrSize) } }) } @@ -274,18 +270,25 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size off += frag } - mask := typeBitmapInHeapBitmapFormat(typ) + mask := ptrBitmapForType(typ) nptr := (off + size) / ptrSize - for i := uintptr(off / ptrSize); i < nptr; i++ { - bits := mask[i/2] >> ((i & 1) << 2) - if (bits>>2)&typeMask == typePointer { - writebarrierptr((*uintptr)(dst), *(*uintptr)(src)) - } else { - *(*uintptr)(dst) = *(*uintptr)(src) + i := uintptr(off / ptrSize) +Copy: + for { + bits := mask[i/8] >> (i % 8) + for j := i % 8; j < 8; j++ { + if bits&1 != 0 { + writebarrierptr((*uintptr)(dst), *(*uintptr)(src)) + } else { + *(*uintptr)(dst) = *(*uintptr)(src) + } + if i++; i >= nptr { + break Copy + } + dst = add(dst, ptrSize) + src = add(src, ptrSize) + bits >>= 1 } - // TODO(rsc): The noescape calls should be unnecessary. - dst = add(noescape(dst), ptrSize) - src = add(noescape(src), ptrSize) } size &= ptrSize - 1 if size > 0 { @@ -307,18 +310,25 @@ func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uin } systemstack(func() { - mask := typeBitmapInHeapBitmapFormat(typ) + mask := ptrBitmapForType(typ) // retoffset is known to be pointer-aligned (at least). // TODO(rsc): The noescape call should be unnecessary. dst := add(noescape(frame), retoffset) nptr := framesize / ptrSize - for i := uintptr(retoffset / ptrSize); i < nptr; i++ { - bits := mask[i/2] >> ((i & 1) << 2) - if (bits>>2)&typeMask == typePointer { - writebarrierptr_nostore((*uintptr)(dst), *(*uintptr)(dst)) + i := uintptr(retoffset / ptrSize) + Copy: + for { + bits := mask[i/8] >> (i % 8) + for j := i % 8; j < 8; j++ { + if bits&1 != 0 { + writebarrierptr_nostore((*uintptr)(dst), *(*uintptr)(dst)) + } + if i++; i >= nptr { + break Copy + } + dst = add(dst, ptrSize) + bits >>= 1 } - // TODO(rsc): The noescape call should be unnecessary. - dst = add(noescape(dst), ptrSize) } }) } |