aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2010-02-10 14:59:39 -0800
committerRuss Cox <rsc@golang.org>2010-02-10 14:59:39 -0800
commitfb94be55dc2d93544189fccef0decee39121e57b (patch)
tree0dbbf480c327623e3dfa663515f6e08cad92f8e8
parent991a968f44010667faf58ce96626ac5b03c12fa2 (diff)
downloadgo-fb94be55dc2d93544189fccef0decee39121e57b.tar.gz
go-fb94be55dc2d93544189fccef0decee39121e57b.zip
runtime: tighten garbage collector
* specialize sweepspan as sweepspan0 and sweepspan1. * in sweepspan1, inline "free" to avoid expensive mlookup. R=iant CC=golang-dev https://golang.org/cl/206060
-rw-r--r--src/pkg/runtime/malloc.h1
-rw-r--r--src/pkg/runtime/mcentral.c17
-rw-r--r--src/pkg/runtime/mgc0.c156
3 files changed, 112 insertions, 62 deletions
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
index 2d94872f77..f018b6e216 100644
--- a/src/pkg/runtime/malloc.h
+++ b/src/pkg/runtime/malloc.h
@@ -321,6 +321,7 @@ MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass);
void MHeap_Free(MHeap *h, MSpan *s);
MSpan* MHeap_Lookup(MHeap *h, PageID p);
MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
+void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
diff --git a/src/pkg/runtime/mcentral.c b/src/pkg/runtime/mcentral.c
index 7e33e01af2..ff366b1c53 100644
--- a/src/pkg/runtime/mcentral.c
+++ b/src/pkg/runtime/mcentral.c
@@ -157,6 +157,19 @@ MCentral_Free(MCentral *c, void *v)
}
}
+void
+MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
+{
+ int32 size;
+ int32 npages;
+
+ npages = class_to_allocnpages[sizeclass];
+ size = class_to_size[sizeclass];
+ *npagesp = npages;
+ *sizep = size;
+ *nobj = (npages << PageShift) / (size + RefcountOverhead);
+}
+
// Fetch a new span from the heap and
// carve into objects for the free list.
static bool
@@ -168,7 +181,7 @@ MCentral_Grow(MCentral *c)
MSpan *s;
unlock(c);
- npages = class_to_allocnpages[c->sizeclass];
+ MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
s = MHeap_Alloc(&mheap, npages, c->sizeclass);
if(s == nil) {
// TODO(rsc): Log out of memory
@@ -179,8 +192,6 @@ MCentral_Grow(MCentral *c)
// Carve span into sequence of blocks.
tailp = &s->freelist;
p = (byte*)(s->start << PageShift);
- size = class_to_size[c->sizeclass];
- n = (npages << PageShift) / (size + RefcountOverhead);
s->gcref = (uint32*)(p + size*n);
for(i=0; i<n; i++) {
v = (MLink*)p;
diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c
index 7cc965400e..0870b3a6b0 100644
--- a/src/pkg/runtime/mgc0.c
+++ b/src/pkg/runtime/mgc0.c
@@ -139,79 +139,115 @@ mark(void)
}
}
+// pass 0: mark RefNone with finalizer as RefFinalize and trace
static void
-sweepspan(MSpan *s, int32 pass)
+sweepspan0(MSpan *s)
{
- int32 i, n, npages, size;
byte *p;
-
- if(s->state != MSpanInUse)
- return;
+ uint32 ref, *gcrefp, *gcrefep;
+ int32 n, size, npages;
p = (byte*)(s->start << PageShift);
if(s->sizeclass == 0) {
// Large block.
- sweepblock(p, (uint64)s->npages<<PageShift, &s->gcref0, pass);
+ ref = s->gcref0;
+ if((ref&~RefNoPointers) == (RefNone|RefHasFinalizer)) {
+ // Mark as finalizable.
+ s->gcref0 = RefFinalize | RefHasFinalizer | (ref&RefNoPointers);
+ if(!(ref & RefNoPointers))
+ scanblock(100, p, s->npages<<PageShift);
+ }
return;
}
// Chunk full of small blocks.
- // Must match computation in MCentral_Grow.
- size = class_to_size[s->sizeclass];
- npages = class_to_allocnpages[s->sizeclass];
- n = (npages << PageShift) / (size + RefcountOverhead);
- for(i=0; i<n; i++)
- sweepblock(p+i*size, size, &s->gcref[i], pass);
-}
+ MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+ gcrefp = s->gcref;
+ gcrefep = s->gcref + n;
+ for(; gcrefp < gcrefep; gcrefp++) {
+ ref = *gcrefp;
+ if((ref&~RefNoPointers) == (RefNone|RefHasFinalizer)) {
+ // Mark as finalizable.
+ *gcrefp = RefFinalize | RefHasFinalizer | (ref&RefNoPointers);
+ if(!(ref & RefNoPointers))
+ scanblock(100, p+(gcrefp-s->gcref)*size, size);
+ }
+ }
+}
+// pass 1: free RefNone, queue RefFinalize, reset RefSome
static void
-sweepblock(byte *p, int64 n, uint32 *gcrefp, int32 pass)
+sweepspan1(MSpan *s)
{
- uint32 gcref;
-
- gcref = *gcrefp;
- switch(gcref & ~(RefNoPointers|RefHasFinalizer)) {
- default:
- throw("bad 'ref count'");
- case RefFree:
- case RefStack:
- break;
- case RefNone:
- if(pass == 0 && (gcref & RefHasFinalizer)) {
- // Tentatively mark as finalizable.
- // Make sure anything it points at will not be collected.
- if(Debug > 0)
- printf("maybe finalize %p+%D\n", p, n);
- *gcrefp = RefFinalize | RefHasFinalizer | (gcref&RefNoPointers);
- scanblock(100, p, n);
- } else if(pass == 1) {
- if(Debug > 0)
- printf("free %p+%D\n", p, n);
- free(p);
+ int32 n, npages, size;
+ byte *p;
+ uint32 ref, *gcrefp, *gcrefep;
+ MCache *c;
+
+ p = (byte*)(s->start << PageShift);
+ if(s->sizeclass == 0) {
+ // Large block.
+ ref = s->gcref0;
+ switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
+ case RefNone:
+ // Free large object.
+ mstats.alloc -= s->npages<<PageShift;
+ runtime_memclr(p, s->npages<<PageShift);
+ s->gcref0 = RefFree;
+ MHeap_Free(&mheap, s);
+ break;
+ case RefFinalize:
+ if(pfinq < efinq) {
+ pfinq->p = p;
+ pfinq->nret = 0;
+ pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
+ ref &= ~RefHasFinalizer;
+ if(pfinq->fn == nil)
+ throw("finalizer inconsistency");
+ pfinq++;
+ }
+ // fall through
+ case RefSome:
+ s->gcref0 = RefNone | (ref&(RefNoPointers|RefHasFinalizer));
+ break;
}
- break;
- case RefFinalize:
- if(pass != 1)
- throw("sweepspan pass 0 RefFinalize");
- if(pfinq < efinq) {
- if(Debug > 0)
- printf("finalize %p+%D\n", p, n);
- pfinq->p = p;
- pfinq->nret = 0;
- pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
- gcref &= ~RefHasFinalizer;
- if(pfinq->fn == nil)
- throw("getfinalizer inconsistency");
- pfinq++;
+ return;
+ }
+
+ // Chunk full of small blocks.
+ MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+ gcrefp = s->gcref;
+ gcrefep = s->gcref + n;
+ for(; gcrefp < gcrefep; gcrefp++, p += size) {
+ ref = *gcrefp;
+ if(ref < RefNone) // RefFree or RefStack
+ continue;
+ switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
+ case RefNone:
+ // Free small object.
+ *gcrefp = RefFree;
+ c = m->mcache;
+ if(size > sizeof(uintptr))
+ ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
+ mstats.alloc -= size;
+ mstats.by_size[s->sizeclass].nfree++;
+ MCache_Free(c, p, s->sizeclass, size);
+ break;
+ case RefFinalize:
+ if(pfinq < efinq) {
+ pfinq->p = p;
+ pfinq->nret = 0;
+ pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
+ ref &= ~RefHasFinalizer;
+ if(pfinq->fn == nil)
+ throw("finalizer inconsistency");
+ pfinq++;
+ }
+ // fall through
+ case RefSome:
+ *gcrefp = RefNone | (ref&(RefNoPointers|RefHasFinalizer));
+ break;
}
- // Reset for next mark+sweep.
- *gcrefp = RefNone | (gcref&(RefNoPointers|RefHasFinalizer));
- break;
- case RefSome:
- // Reset for next mark+sweep.
- if(pass == 1)
- *gcrefp = RefNone | (gcref&(RefNoPointers|RefHasFinalizer));
- break;
}
}
@@ -222,11 +258,13 @@ sweep(void)
// Sweep all the spans marking blocks to be finalized.
for(s = mheap.allspans; s != nil; s = s->allnext)
- sweepspan(s, 0);
+ if(s->state == MSpanInUse)
+ sweepspan0(s);
// Sweep again queueing finalizers and freeing the others.
for(s = mheap.allspans; s != nil; s = s->allnext)
- sweepspan(s, 1);
+ if(s->state == MSpanInUse)
+ sweepspan1(s);
}
// Semaphore, not Lock, so that the goroutine