aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2010-02-10 21:23:08 -0800
committerRuss Cox <rsc@golang.org>2010-02-10 21:23:08 -0800
commit22a7f2a14d8fd870913942707dd3f30a30bb1eeb (patch)
tree3ff6573c09dd6d13814515ecae0ac692fc7cc758
parentfc8e3d4004a1d583d329aa5055407c83e52bd581 (diff)
downloadgo-22a7f2a14d8fd870913942707dd3f30a30bb1eeb.tar.gz
go-22a7f2a14d8fd870913942707dd3f30a30bb1eeb.zip
runtime: delete MHeapMapCache, which is useless
because free needs to mark the block as freed to coordinate with the garbage collector. (in C++ free can blindly put the block on the free list, no questions asked, so the cache saves some work.) R=iant CC=golang-dev https://golang.org/cl/206069
-rw-r--r--src/pkg/runtime/malloc.cgo67
-rw-r--r--src/pkg/runtime/malloc.h6
-rw-r--r--src/pkg/runtime/mfinal.c2
-rw-r--r--src/pkg/runtime/mgc0.c2
-rw-r--r--src/pkg/runtime/mheap.c18
-rw-r--r--src/pkg/runtime/mheapmap32.h35
-rw-r--r--src/pkg/runtime/mheapmap64.h36
7 files changed, 33 insertions, 133 deletions
diff --git a/src/pkg/runtime/malloc.cgo b/src/pkg/runtime/malloc.cgo
index 53411da1b1..5b43b3c9e7 100644
--- a/src/pkg/runtime/malloc.cgo
+++ b/src/pkg/runtime/malloc.cgo
@@ -48,6 +48,12 @@ mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
mstats.alloc += size;
mstats.total_alloc += size;
mstats.by_size[sizeclass].nmalloc++;
+
+ if(!mlookup(v, nil, nil, nil, &ref)) {
+ printf("malloc %D; mlookup failed\n", (uint64)size);
+ throw("malloc mlookup");
+ }
+ *ref = RefNone | refflag;
} else {
// TODO(rsc): Report tracebacks for very large allocations.
@@ -61,14 +67,10 @@ mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
mstats.alloc += npages<<PageShift;
mstats.total_alloc += npages<<PageShift;
v = (void*)(s->start << PageShift);
- }
- // setup for mark sweep
- if(!mlookup(v, nil, nil, &ref)) {
- printf("malloc %D; mlookup failed\n", (uint64)size);
- throw("malloc mlookup");
+ // setup for mark sweep
+ s->gcref0 = RefNone | refflag;
}
- *ref = RefNone | refflag;
m->mallocing = 0;
@@ -88,7 +90,6 @@ void
free(void *v)
{
int32 sizeclass, size;
- uintptr page, tmp;
MSpan *s;
MCache *c;
uint32 *ref;
@@ -100,46 +101,34 @@ free(void *v)
throw("malloc/free - deadlock");
m->mallocing = 1;
- if(!mlookup(v, nil, nil, &ref)) {
+ if(!mlookup(v, nil, nil, &s, &ref)) {
printf("free %p: not an allocated block\n", v);
throw("free mlookup");
}
*ref = RefFree;
// Find size class for v.
- page = (uintptr)v >> PageShift;
- sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp);
+ sizeclass = s->sizeclass;
if(sizeclass == 0) {
- // Missed in cache.
- s = MHeap_Lookup(&mheap, page);
- if(s == nil)
- throw("free - invalid pointer");
- sizeclass = s->sizeclass;
- if(sizeclass == 0) {
- // Large object.
- mstats.alloc -= s->npages<<PageShift;
- runtime_memclr(v, s->npages<<PageShift);
- MHeap_Free(&mheap, s);
- goto out;
- }
- MHeapMapCache_SET(&mheap.mapcache, page, sizeclass);
+ // Large object.
+ mstats.alloc -= s->npages<<PageShift;
+ runtime_memclr(v, s->npages<<PageShift);
+ MHeap_Free(&mheap, s);
+ } else {
+ // Small object.
+ c = m->mcache;
+ size = class_to_size[sizeclass];
+ if(size > sizeof(uintptr))
+ ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
+ mstats.alloc -= size;
+ mstats.by_size[sizeclass].nfree++;
+ MCache_Free(c, v, sizeclass, size);
}
-
- // Small object.
- c = m->mcache;
- size = class_to_size[sizeclass];
- if(size > sizeof(uintptr))
- ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
- mstats.alloc -= size;
- mstats.by_size[sizeclass].nfree++;
- MCache_Free(c, v, sizeclass, size);
-
-out:
m->mallocing = 0;
}
int32
-mlookup(void *v, byte **base, uintptr *size, uint32 **ref)
+mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
{
uintptr n, nobj, i;
byte *p;
@@ -147,6 +136,8 @@ mlookup(void *v, byte **base, uintptr *size, uint32 **ref)
mstats.nlookup++;
s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
+ if(sp)
+ *sp = s;
if(s == nil) {
if(base)
*base = nil;
@@ -256,7 +247,7 @@ stackalloc(uint32 n)
return v;
}
v = mallocgc(n, 0, 0, 0);
- if(!mlookup(v, nil, nil, &ref))
+ if(!mlookup(v, nil, nil, nil, &ref))
throw("stackalloc mlookup");
*ref = RefStack;
return v;
@@ -283,7 +274,7 @@ func Free(p *byte) {
}
func Lookup(p *byte) (base *byte, size uintptr) {
- mlookup(p, &base, &size, nil);
+ mlookup(p, &base, &size, nil, nil);
}
func GC() {
@@ -306,7 +297,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
goto throw;
}
- if(!mlookup(obj.data, &base, &size, nil) || obj.data != base) {
+ if(!mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) {
printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
goto throw;
}
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
index f018b6e216..b9dea2f5e9 100644
--- a/src/pkg/runtime/malloc.h
+++ b/src/pkg/runtime/malloc.h
@@ -20,8 +20,6 @@
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
// MSpan: a run of pages managed by the MHeap.
// MHeapMap: a mapping from page IDs to MSpans.
-// MHeapMapCache: a small cache of MHeapMap mapping page IDs
-// to size classes for pages used for small objects.
// MCentral: a shared free list for a given size class.
// MCache: a per-thread (in Go, per-M) cache for small objects.
// MStats: allocation statistics.
@@ -87,7 +85,6 @@ typedef struct FixAlloc FixAlloc;
typedef struct MCentral MCentral;
typedef struct MHeap MHeap;
typedef struct MHeapMap MHeapMap;
-typedef struct MHeapMapCache MHeapMapCache;
typedef struct MSpan MSpan;
typedef struct MStats MStats;
typedef struct MLink MLink;
@@ -296,7 +293,6 @@ struct MHeap
// span lookup
MHeapMap map;
- MHeapMapCache mapcache;
// range of addresses we might see in the heap
byte *min;
@@ -324,7 +320,7 @@ MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
-int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
+int32 mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
void gc(int32 force);
void* SysAlloc(uintptr);
diff --git a/src/pkg/runtime/mfinal.c b/src/pkg/runtime/mfinal.c
index 4fad6aa951..9591747231 100644
--- a/src/pkg/runtime/mfinal.c
+++ b/src/pkg/runtime/mfinal.c
@@ -97,7 +97,7 @@ addfinalizer(void *p, void (*f)(void*), int32 nret)
uint32 *ref;
byte *base;
- if(!mlookup(p, &base, nil, &ref) || p != base)
+ if(!mlookup(p, &base, nil, nil, &ref) || p != base)
throw("addfinalizer on invalid pointer");
if(f == nil) {
if(*ref & RefHasFinalizer) {
diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c
index 0870b3a6b0..82a8ad7e5b 100644
--- a/src/pkg/runtime/mgc0.c
+++ b/src/pkg/runtime/mgc0.c
@@ -65,7 +65,7 @@ scanblock(int32 depth, byte *b, int64 n)
obj = vp[i];
if(obj == nil || (byte*)obj < mheap.min || (byte*)obj >= mheap.max)
continue;
- if(mlookup(obj, &obj, &size, &refp)) {
+ if(mlookup(obj, &obj, &size, nil, &refp)) {
ref = *refp;
switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
case RefFinalize:
diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c
index e78c860c31..49ff3622ff 100644
--- a/src/pkg/runtime/mheap.c
+++ b/src/pkg/runtime/mheap.c
@@ -108,27 +108,11 @@ HaveSpan:
MHeap_FreeLocked(h, t);
}
- // If span is being used for small objects, cache size class.
- // No matter what, cache span info, because gc needs to be
+ // Record span info, because gc needs to be
// able to map interior pointer to containing span.
s->sizeclass = sizeclass;
for(n=0; n<npage; n++)
MHeapMap_Set(&h->map, s->start+n, s);
- if(sizeclass == 0) {
- uintptr tmp;
-
- // If there are entries for this span, invalidate them,
- // but don't blow out cache entries about other spans.
- for(n=0; n<npage; n++)
- if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
- MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
- } else {
- // Save cache entries for this span.
- // If there's a size class, there aren't that many pages.
- for(n=0; n<npage; n++)
- MHeapMapCache_SET(&h->mapcache, s->start+n, sizeclass);
- }
-
return s;
}
diff --git a/src/pkg/runtime/mheapmap32.h b/src/pkg/runtime/mheapmap32.h
index 0a16ccd100..cb8a830d07 100644
--- a/src/pkg/runtime/mheapmap32.h
+++ b/src/pkg/runtime/mheapmap32.h
@@ -39,38 +39,3 @@ MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
-// Much of the time, free(v) needs to know only the size class for v,
-// not which span it came from. The MHeapMap finds the size class
-// by looking up the span.
-//
-// An MHeapMapCache is a simple direct-mapped cache translating
-// page numbers to size classes. It avoids the expensive MHeapMap
-// lookup for hot pages.
-//
-// The cache entries are 32 bits, with the page number in the low part
-// and the value at the top.
-//
-// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers),
-// we can use a 16-bit cache entry by not storing the redundant 12 bits
-// of the key that are used as the entry index. For now, keep it simple.
-enum
-{
- MHeapMapCache_HashBits = 12
-};
-
-struct MHeapMapCache
-{
- uint32 array[1<<MHeapMapCache_HashBits];
-};
-
-// All macros for speed (sorry).
-#define HMASK ((1<<MHeapMapCache_HashBits)-1)
-#define KBITS MHeapMap_TotalBits
-#define KMASK ((1LL<<KBITS)-1)
-
-#define MHeapMapCache_SET(cache, key, value) \
- ((cache)->array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS))
-
-#define MHeapMapCache_GET(cache, key, tmp) \
- (tmp = (cache)->array[(key) & HMASK], \
- (tmp & KMASK) == (key) ? (tmp >> KBITS) : 0)
diff --git a/src/pkg/runtime/mheapmap64.h b/src/pkg/runtime/mheapmap64.h
index 127b773f74..fefeae65d6 100644
--- a/src/pkg/runtime/mheapmap64.h
+++ b/src/pkg/runtime/mheapmap64.h
@@ -58,39 +58,3 @@ MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
-// Much of the time, free(v) needs to know only the size class for v,
-// not which span it came from. The MHeapMap finds the size class
-// by looking up the span.
-//
-// An MHeapMapCache is a simple direct-mapped cache translating
-// page numbers to size classes. It avoids the expensive MHeapMap
-// lookup for hot pages.
-//
-// The cache entries are 64 bits, with the page number in the low part
-// and the value at the top.
-//
-// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers),
-// we can use a 16-bit cache entry by not storing the redundant 12 bits
-// of the key that are used as the entry index. Here in 64-bit land,
-// that trick won't work unless the hash table has 2^28 entries.
-enum
-{
- MHeapMapCache_HashBits = 12
-};
-
-struct MHeapMapCache
-{
- uintptr array[1<<MHeapMapCache_HashBits];
-};
-
-// All macros for speed (sorry).
-#define HMASK ((1<<MHeapMapCache_HashBits)-1)
-#define KBITS MHeapMap_TotalBits
-#define KMASK ((1LL<<KBITS)-1)
-
-#define MHeapMapCache_SET(cache, key, value) \
- ((cache)->array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS))
-
-#define MHeapMapCache_GET(cache, key, tmp) \
- (tmp = (cache)->array[(key) & HMASK], \
- (tmp & KMASK) == (key) ? (tmp >> KBITS) : 0)