aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mbitmap.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mbitmap.go')
-rw-r--r--src/runtime/mbitmap.go147
1 files changed, 74 insertions, 73 deletions
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 32b8db7a50..9363409e36 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -46,6 +46,7 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -326,8 +327,8 @@ func heapBitsForAddr(addr uintptr) (h heapBits) {
// we expect to crash in the caller.
return
}
- h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
- h.shift = uint32((addr / sys.PtrSize) & 3)
+ h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
+ h.shift = uint32((addr / goarch.PtrSize) & 3)
h.arena = uint32(arena)
h.last = &ha.bitmap[len(ha.bitmap)-1]
return
@@ -386,10 +387,10 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
if s == nil {
- if GOARCH == "amd64" && p == clobberdeadPtr && debug.invalidptr != 0 {
- // Crash if clobberdeadPtr is seen. Only on AMD64 for now, as
- // it is the only platform where compiler's clobberdead mode is
- // implemented. On AMD64 clobberdeadPtr cannot be a valid address.
+ if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
+ // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
+ // as they are the only platform where compiler's clobberdead mode is
+ // implemented. On these platforms clobberdeadPtr cannot be a valid address.
badPointer(s, p, refBase, refOff)
}
return
@@ -557,7 +558,7 @@ func (h heapBits) isPointer() bool {
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
@@ -592,7 +593,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
if src == 0 {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if !buf.putFast(*dstx, 0) {
@@ -602,7 +603,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
h = h.next()
}
} else {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
@@ -625,7 +626,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
// created and zeroed with malloc.
//go:nosplit
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
@@ -633,7 +634,7 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
}
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(0, *srcx) {
@@ -653,17 +654,17 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
//
//go:nosplit
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
- word := maskOffset / sys.PtrSize
+ word := maskOffset / goarch.PtrSize
bits = addb(bits, word/8)
mask := uint8(1) << (word % 8)
buf := &getg().m.p.ptr().wbBuf
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if mask == 0 {
bits = addb(bits, 1)
if *bits == 0 {
// Skip 8 words.
- i += 7 * sys.PtrSize
+ i += 7 * goarch.PtrSize
continue
}
mask = 1
@@ -720,8 +721,8 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
ptrmask := typ.gcdata
buf := &getg().m.p.ptr().wbBuf
var bits uint32
- for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
@@ -751,14 +752,14 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
// Clear bits corresponding to objects.
- nw := (s.npages << _PageShift) / sys.PtrSize
+ nw := (s.npages << _PageShift) / goarch.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
- isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
+ isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
@@ -836,7 +837,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
// assume that dataSize == size without checking it explicitly.
- if sys.PtrSize == 8 && size == sys.PtrSize {
+ if goarch.PtrSize == 8 && size == goarch.PtrSize {
// It's one word and it has pointers, it must be a pointer.
// Since all allocated one-word objects are pointers
// (non-pointers are aggregated into tinySize allocations),
@@ -862,8 +863,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// objects are at least 4 words long and that their bitmaps start either at the beginning
// of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
- if size == 2*sys.PtrSize {
- if typ.size == sys.PtrSize {
+ if size == 2*goarch.PtrSize {
+ if typ.size == goarch.PtrSize {
// We're allocating a block big enough to hold two pointers.
// On 64-bit, that means the actual object must be two pointers,
// or else we'd have used the one-pointer-sized block.
@@ -872,7 +873,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// just the smallest block available. Distinguish by checking dataSize.
// (In general the number of instances of typ being allocated is
// dataSize/typ.size.)
- if sys.PtrSize == 4 && dataSize == sys.PtrSize {
+ if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
// 1 pointer object. On 32-bit machines clear the bit for the
// unused second word.
*h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
@@ -886,38 +887,38 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Otherwise typ.size must be 2*sys.PtrSize,
// and typ.kind&kindGCProg == 0.
if doubleCheck {
- if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
+ if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
throw("heapBitsSetType")
}
}
b := uint32(*ptrmask)
hb := b & 3
- hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1)
+ hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
// Clear the bits for this object so we can set the
// appropriate ones.
*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
*h.bitp |= uint8(hb << h.shift)
return
- } else if size == 3*sys.PtrSize {
+ } else if size == 3*goarch.PtrSize {
b := uint8(*ptrmask)
if doubleCheck {
if b == 0 {
println("runtime: invalid type ", typ.string())
throw("heapBitsSetType: called with non-pointer type")
}
- if sys.PtrSize != 8 {
+ if goarch.PtrSize != 8 {
throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
}
if typ.kind&kindGCProg != 0 {
throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
}
- if typ.size == 2*sys.PtrSize {
+ if typ.size == 2*goarch.PtrSize {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
throw("heapBitsSetType: inconsistent object sizes")
}
}
- if typ.size == sys.PtrSize {
+ if typ.size == goarch.PtrSize {
// The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
// Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
if doubleCheck && *typ.gcdata != 1 {
@@ -1063,8 +1064,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Filling in bits for an array of typ.
// Set up for repetition of ptrmask during main loop.
// Note that ptrmask describes only a prefix of
- const maxBits = sys.PtrSize*8 - 7
- if typ.ptrdata/sys.PtrSize <= maxBits {
+ const maxBits = goarch.PtrSize*8 - 7
+ if typ.ptrdata/goarch.PtrSize <= maxBits {
// Entire ptrmask fits in uintptr with room for a byte fragment.
// Load into pbits and never read from ptrmask again.
// This is especially important when the ptrmask has
@@ -1075,12 +1076,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Accumulate ptrmask into b.
// ptrmask is sized to describe only typ.ptrdata, but we record
// it as describing typ.size bytes, since all the high bits are zero.
- nb = typ.ptrdata / sys.PtrSize
+ nb = typ.ptrdata / goarch.PtrSize
for i := uintptr(0); i < nb; i += 8 {
b |= uintptr(*p) << i
p = add1(p)
}
- nb = typ.size / sys.PtrSize
+ nb = typ.size / goarch.PtrSize
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
@@ -1091,7 +1092,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
pbits = b
endnb = nb
if nb+nb <= maxBits {
- for endnb <= sys.PtrSize*8 {
+ for endnb <= goarch.PtrSize*8 {
pbits |= pbits << endnb
endnb += endnb
}
@@ -1110,9 +1111,9 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
endp = nil
} else {
// Ptrmask is larger. Read it multiple times.
- n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
+ n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
endp = addb(ptrmask, n)
- endnb = typ.size/sys.PtrSize - n*8
+ endnb = typ.size/goarch.PtrSize - n*8
}
}
if p != nil {
@@ -1123,12 +1124,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
if typ.size == dataSize {
// Single entry: can stop once we reach the non-pointer data.
- nw = typ.ptrdata / sys.PtrSize
+ nw = typ.ptrdata / goarch.PtrSize
} else {
// Repeated instances of typ in an array.
// Have to process first N-1 entries in full, but can stop
// once we reach the non-pointer data in the final entry.
- nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
+ nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
}
if nw == 0 {
// No pointers! Caller was supposed to check.
@@ -1291,7 +1292,7 @@ Phase3:
}
// Change nw from counting possibly-pointer words to total words in allocation.
- nw = size / sys.PtrSize
+ nw = size / goarch.PtrSize
// Write whole bitmap bytes.
// The first is hb, the rest are zero.
@@ -1325,7 +1326,7 @@ Phase4:
h := heapBitsForAddr(x)
// cnw is the number of heap words, or bit pairs
// remaining (like nw above).
- cnw := size / sys.PtrSize
+ cnw := size / goarch.PtrSize
src := (*uint8)(unsafe.Pointer(x))
// We know the first and last byte of the bitmap are
// not the same, but it's still possible for small
@@ -1390,7 +1391,7 @@ Phase4:
if doubleCheck {
// x+size may not point to the heap, so back up one
// word and then advance it the way we do above.
- end := heapBitsForAddr(x + size - sys.PtrSize)
+ end := heapBitsForAddr(x + size - goarch.PtrSize)
if outOfPlace {
// In out-of-place copying, we just advance
// using next.
@@ -1417,11 +1418,11 @@ Phase4:
// Double-check that bits to be written were written correctly.
// Does not check that other bits were not written, unfortunately.
h := heapBitsForAddr(x)
- nptr := typ.ptrdata / sys.PtrSize
- ndata := typ.size / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
+ ndata := typ.size / goarch.PtrSize
count := dataSize / typ.size
- totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
- for i := uintptr(0); i < size/sys.PtrSize; i++ {
+ totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
+ for i := uintptr(0); i < size/goarch.PtrSize; i++ {
j := i % ndata
var have, want uint8
have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
@@ -1446,7 +1447,7 @@ Phase4:
print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
- println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
+ println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
if typ.kind&kindGCProg != 0 {
println("GC program:")
dumpGCProg(addb(typ.gcdata, 4))
@@ -1477,14 +1478,14 @@ var debugPtrmask struct {
// so that the relevant bitmap bytes are not shared with surrounding
// objects.
func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
- if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
+ if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
// Alignment will be wrong.
throw("heapBitsSetTypeGCProg: small allocation")
}
var totalBits uintptr
if elemSize == dataSize {
totalBits = runGCProg(prog, nil, h.bitp, 2)
- if totalBits*sys.PtrSize != progSize {
+ if totalBits*goarch.PtrSize != progSize {
println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
throw("heapBitsSetTypeGCProg: unexpected bit count")
}
@@ -1499,7 +1500,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// repeats that first element to fill the array.
var trailer [40]byte // 3 varints (max 10 each) + some bytes
i := 0
- if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
+ if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
// literal(0)
trailer[i] = 0x01
i++
@@ -1521,7 +1522,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// repeat(elemSize/ptrSize, count-1)
trailer[i] = 0x80
i++
- n := elemSize / sys.PtrSize
+ n := elemSize / goarch.PtrSize
for ; n >= 0x80; n >>= 7 {
trailer[i] = byte(n | 0x80)
i++
@@ -1545,10 +1546,10 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// last element. This will cause the code below to
// memclr the dead section of the final array element,
// so that scanobject can stop early in the final element.
- totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
+ totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
}
endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
- endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
+ endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
}
@@ -1556,7 +1557,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/sys.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
- n := (size/sys.PtrSize + 7) / 8
+ n := (size/goarch.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
x[len(x)-1] = 0xa1 // overflow check sentinel
n = runGCProg(prog, nil, &x[0], 1)
@@ -1691,7 +1692,7 @@ Run:
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
- const maxBits = sys.PtrSize*8 - 7
+ const maxBits = goarch.PtrSize*8 - 7
if n <= maxBits {
// Start with bits in output buffer.
pattern := bits
@@ -1744,7 +1745,7 @@ Run:
nb := npattern
if nb+nb <= maxBits {
// Double pattern until the whole uintptr is filled.
- for nb <= sys.PtrSize*8 {
+ for nb <= goarch.PtrSize*8 {
b |= b << nb
nb += nb
}
@@ -1872,7 +1873,7 @@ Run:
// The result must be deallocated with dematerializeGCProg.
func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
// Each word of ptrdata needs one bit in the bitmap.
- bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
+ bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
// Compute the number of pages needed for bitmapBytes.
pages := divRoundUp(bitmapBytes, pageSize)
s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
@@ -1945,7 +1946,7 @@ func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
func reflect_gcbits(x interface{}) []byte {
ret := getgcmask(x)
typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
- nptr := typ.ptrdata / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
ret = ret[:len(ret)-1]
}
@@ -1965,10 +1966,10 @@ func getgcmask(ep interface{}) (mask []byte) {
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
bitmap := datap.gcdatamask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.data) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
@@ -1977,10 +1978,10 @@ func getgcmask(ep interface{}) (mask []byte) {
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
bitmap := datap.gcbssmask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.bss) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
@@ -1990,13 +1991,13 @@ func getgcmask(ep interface{}) (mask []byte) {
if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
hbits := heapBitsForAddr(base)
n := s.elemsize
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if hbits.isPointer() {
- mask[i/sys.PtrSize] = 1
+ mask[i/goarch.PtrSize] = 1
}
if !hbits.morePointers() {
- mask = mask[:i/sys.PtrSize]
+ mask = mask[:i/goarch.PtrSize]
break
}
hbits = hbits.next()
@@ -2015,12 +2016,12 @@ func getgcmask(ep interface{}) (mask []byte) {
if locals.n == 0 {
return
}
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
- mask[i/sys.PtrSize] = locals.ptrbit(off)
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = locals.ptrbit(off)
}
}
return