aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mpagecache.go
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2020-08-25 12:34:02 -0400
committerMichael Pratt <mpratt@google.com>2020-10-23 16:54:35 +0000
commitad642727247383079c8546ca365172859641a800 (patch)
treedba4e02179ab3b0d6bc6630405ddaacf919ba4ea /src/runtime/mpagecache.go
parente5ad73508e5ab5cadfba25e25d6cc3b025865e29 (diff)
downloadgo-ad642727247383079c8546ca365172859641a800.tar.gz
go-ad642727247383079c8546ca365172859641a800.zip
runtime: rename pageAlloc receiver
The history of pageAlloc using 's' as a receiver are lost to the depths of time (perhaps it used to be called summary?), but it doesn't make much sense anymore. Rename it to 'p'. Generated with: $ cd src/runtime $ grep -R -b "func (s \*pageAlloc" . | awk -F : '{ print $1 ":#" $2+6 }' | xargs -n 1 -I {} env GOROOT=$(pwd)/../../ gorename -offset {} -to p -v $ grep -R -b "func (s \*pageAlloc" . | awk -F : '{ print $1 ":#" $2+6 }' | xargs -n 1 -I {} env GOROOT=$(pwd)/../../ GOARCH=386 gorename -offset {} -to p -v $ GOROOT=$(pwd)/../../ gorename -offset mpagecache.go:#2397 -to p -v ($2+6 to advance past "func (".) Plus manual comment fixups. Change-Id: I2d521a1cbf6ebe2ef6aae92e654bfc33c63d1aa9 Reviewed-on: https://go-review.googlesource.com/c/go/+/250517 Trust: Michael Pratt <mpratt@google.com> Run-TryBot: Michael Pratt <mpratt@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime/mpagecache.go')
-rw-r--r--src/runtime/mpagecache.go42
1 files changed, 21 insertions, 21 deletions
diff --git a/src/runtime/mpagecache.go b/src/runtime/mpagecache.go
index 683a997136..5f76501a1c 100644
--- a/src/runtime/mpagecache.go
+++ b/src/runtime/mpagecache.go
@@ -71,8 +71,8 @@ func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
// into s. Then, it clears the cache, such that empty returns
// true.
//
-// s.mheapLock must be held or the world must be stopped.
-func (c *pageCache) flush(s *pageAlloc) {
+// p.mheapLock must be held or the world must be stopped.
+func (c *pageCache) flush(p *pageAlloc) {
if c.empty() {
return
}
@@ -83,18 +83,18 @@ func (c *pageCache) flush(s *pageAlloc) {
// slower, safer thing by iterating over each bit individually.
for i := uint(0); i < 64; i++ {
if c.cache&(1<<i) != 0 {
- s.chunkOf(ci).free1(pi + i)
+ p.chunkOf(ci).free1(pi + i)
}
if c.scav&(1<<i) != 0 {
- s.chunkOf(ci).scavenged.setRange(pi+i, 1)
+ p.chunkOf(ci).scavenged.setRange(pi+i, 1)
}
}
// Since this is a lot like a free, we need to make sure
// we update the searchAddr just like free does.
- if b := (offAddr{c.base}); b.lessThan(s.searchAddr) {
- s.searchAddr = b
+ if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
+ p.searchAddr = b
}
- s.update(c.base, pageCachePages, false, false)
+ p.update(c.base, pageCachePages, false, false)
*c = pageCache{}
}
@@ -102,19 +102,19 @@ func (c *pageCache) flush(s *pageAlloc) {
// may not be contiguous, and returns a pageCache structure which owns the
// chunk.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) allocToCache() pageCache {
+// p.mheapLock must be held.
+func (p *pageAlloc) allocToCache() pageCache {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
- if chunkIndex(s.searchAddr.addr()) >= s.end {
+ if chunkIndex(p.searchAddr.addr()) >= p.end {
return pageCache{}
}
c := pageCache{}
- ci := chunkIndex(s.searchAddr.addr()) // chunk index
- if s.summary[len(s.summary)-1][ci] != 0 {
+ ci := chunkIndex(p.searchAddr.addr()) // chunk index
+ if p.summary[len(p.summary)-1][ci] != 0 {
// Fast path: there's free pages at or near the searchAddr address.
- chunk := s.chunkOf(ci)
- j, _ := chunk.find(1, chunkPageIndex(s.searchAddr.addr()))
+ chunk := p.chunkOf(ci)
+ j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
if j == ^uint(0) {
throw("bad summary data")
}
@@ -126,15 +126,15 @@ func (s *pageAlloc) allocToCache() pageCache {
} else {
// Slow path: the searchAddr address had nothing there, so go find
// the first free page the slow way.
- addr, _ := s.find(1)
+ addr, _ := p.find(1)
if addr == 0 {
// We failed to find adequate free space, so mark the searchAddr as OoM
// and return an empty pageCache.
- s.searchAddr = maxSearchAddr
+ p.searchAddr = maxSearchAddr
return pageCache{}
}
ci := chunkIndex(addr)
- chunk := s.chunkOf(ci)
+ chunk := p.chunkOf(ci)
c = pageCache{
base: alignDown(addr, 64*pageSize),
cache: ^chunk.pages64(chunkPageIndex(addr)),
@@ -143,19 +143,19 @@ func (s *pageAlloc) allocToCache() pageCache {
}
// Set the bits as allocated and clear the scavenged bits.
- s.allocRange(c.base, pageCachePages)
+ p.allocRange(c.base, pageCachePages)
// Update as an allocation, but note that it's not contiguous.
- s.update(c.base, pageCachePages, false, true)
+ p.update(c.base, pageCachePages, false, true)
// Set the search address to the last page represented by the cache.
// Since all of the pages in this block are going to the cache, and we
// searched for the first free page, we can confidently start at the
// next page.
//
- // However, s.searchAddr is not allowed to point into unmapped heap memory
+ // However, p.searchAddr is not allowed to point into unmapped heap memory
// unless it is maxSearchAddr, so make it the last page as opposed to
// the page after.
- s.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
+ p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
return c
}