summaryrefslogtreecommitdiff
path: root/libgo/runtime/mgc0.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/mgc0.c')
-rw-r--r--libgo/runtime/mgc0.c175
1 files changed, 69 insertions, 106 deletions
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index 00448215c917..487767878282 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -145,21 +145,6 @@ clearpools(void)
}
}
-// Holding worldsema grants an M the right to try to stop the world.
-// The procedure is:
-//
-// runtime_semacquire(&runtime_worldsema);
-// m->gcing = 1;
-// runtime_stoptheworld();
-//
-// ... do stuff ...
-//
-// m->gcing = 0;
-// runtime_semrelease(&runtime_worldsema);
-// runtime_starttheworld();
-//
-uint32 runtime_worldsema = 1;
-
typedef struct Workbuf Workbuf;
struct Workbuf
{
@@ -1377,7 +1362,7 @@ getempty(Workbuf *b)
runtime_lock(&work);
if(work.nchunk < sizeof *b) {
work.nchunk = 1<<20;
- work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
+ work.chunk = runtime_SysAlloc(work.nchunk, &mstats()->gc_sys);
if(work.chunk == nil)
runtime_throw("runtime: cannot allocate memory");
}
@@ -1558,7 +1543,7 @@ runtime_queuefinalizer(void *p, FuncVal *fn, const FuncType *ft, const PtrType *
runtime_lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) {
- finc = runtime_persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
+ finc = runtime_persistentalloc(FinBlockSize, 0, &mstats()->gc_sys);
finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
finc->alllink = allfin;
allfin = finc;
@@ -1755,7 +1740,7 @@ runtime_MSpan_Sweep(MSpan *s)
runtime_MHeap_Free(&runtime_mheap, s, 1);
c->local_nlargefree++;
c->local_largefree += size;
- runtime_xadd64(&mstats.next_gc, -(uint64)(size * (gcpercent + 100)/100));
+ runtime_xadd64(&mstats()->next_gc, -(uint64)(size * (gcpercent + 100)/100));
res = true;
} else {
// Free small object.
@@ -1797,7 +1782,7 @@ runtime_MSpan_Sweep(MSpan *s)
if(nfree > 0) {
c->local_nsmallfree[cl] += nfree;
c->local_cachealloc -= nfree * size;
- runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
+ runtime_xadd64(&mstats()->next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
//MCentral_FreeSpan updates sweepgen
}
@@ -2010,6 +1995,7 @@ runtime_updatememstats(GCStats *stats)
uint32 i;
uint64 stacks_inuse, smallfree;
uint64 *src, *dst;
+ MStats *pmstats;
if(stats)
runtime_memclr((byte*)stats, sizeof(*stats));
@@ -2024,11 +2010,12 @@ runtime_updatememstats(GCStats *stats)
runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
}
}
- mstats.stacks_inuse = stacks_inuse;
- mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
- mstats.mspan_inuse = runtime_mheap.spanalloc.inuse;
- mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
- mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
+ pmstats = mstats();
+ pmstats->stacks_inuse = stacks_inuse;
+ pmstats->mcache_inuse = runtime_mheap.cachealloc.inuse;
+ pmstats->mspan_inuse = runtime_mheap.spanalloc.inuse;
+ pmstats->sys = pmstats->heap_sys + pmstats->stacks_sys + pmstats->mspan_sys +
+ pmstats->mcache_sys + pmstats->buckhash_sys + pmstats->gc_sys + pmstats->other_sys;
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.
@@ -2037,13 +2024,13 @@ runtime_updatememstats(GCStats *stats)
// Total number of mallocs is calculated as number of frees plus number of alive objects.
// Similarly, total amount of allocated memory is calculated as amount of freed memory
// plus amount of alive heap memory.
- mstats.alloc = 0;
- mstats.total_alloc = 0;
- mstats.nmalloc = 0;
- mstats.nfree = 0;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.by_size[i].nmalloc = 0;
- mstats.by_size[i].nfree = 0;
+ pmstats->alloc = 0;
+ pmstats->total_alloc = 0;
+ pmstats->nmalloc = 0;
+ pmstats->nfree = 0;
+ for(i = 0; i < nelem(pmstats->by_size); i++) {
+ pmstats->by_size[i].nmalloc = 0;
+ pmstats->by_size[i].nfree = 0;
}
// Flush MCache's to MCentral.
@@ -2058,30 +2045,30 @@ runtime_updatememstats(GCStats *stats)
if(s->state != MSpanInUse)
continue;
if(s->sizeclass == 0) {
- mstats.nmalloc++;
- mstats.alloc += s->elemsize;
+ pmstats->nmalloc++;
+ pmstats->alloc += s->elemsize;
} else {
- mstats.nmalloc += s->ref;
- mstats.by_size[s->sizeclass].nmalloc += s->ref;
- mstats.alloc += s->ref*s->elemsize;
+ pmstats->nmalloc += s->ref;
+ pmstats->by_size[s->sizeclass].nmalloc += s->ref;
+ pmstats->alloc += s->ref*s->elemsize;
}
}
// Aggregate by size class.
smallfree = 0;
- mstats.nfree = runtime_mheap.nlargefree;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.nfree += runtime_mheap.nsmallfree[i];
- mstats.by_size[i].nfree = runtime_mheap.nsmallfree[i];
- mstats.by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
+ pmstats->nfree = runtime_mheap.nlargefree;
+ for(i = 0; i < nelem(pmstats->by_size); i++) {
+ pmstats->nfree += runtime_mheap.nsmallfree[i];
+ pmstats->by_size[i].nfree = runtime_mheap.nsmallfree[i];
+ pmstats->by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
smallfree += runtime_mheap.nsmallfree[i] * runtime_class_to_size[i];
}
- mstats.nmalloc += mstats.nfree;
+ pmstats->nmalloc += pmstats->nfree;
// Calculate derived stats.
- mstats.total_alloc = mstats.alloc + runtime_mheap.largefree + smallfree;
- mstats.heap_alloc = mstats.alloc;
- mstats.heap_objects = mstats.nmalloc - mstats.nfree;
+ pmstats->total_alloc = pmstats->alloc + runtime_mheap.largefree + smallfree;
+ pmstats->heap_alloc = pmstats->alloc;
+ pmstats->heap_objects = pmstats->nmalloc - pmstats->nfree;
}
// Structure of arguments passed to function gc().
@@ -2119,6 +2106,7 @@ runtime_gc(int32 force)
G *g;
struct gc_args a;
int32 i;
+ MStats *pmstats;
// The atomic operations are not atomic if the uint64s
// are not aligned on uint64 boundaries. This has been
@@ -2141,7 +2129,8 @@ runtime_gc(int32 force)
// while holding a lock. The next mallocgc
// without a lock will do the gc instead.
m = runtime_m();
- if(!mstats.enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking)
+ pmstats = mstats();
+ if(!pmstats->enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking || m->preemptoff.len > 0)
return;
if(gcpercent == GcpercentUnknown) { // first time through
@@ -2153,11 +2142,11 @@ runtime_gc(int32 force)
if(gcpercent < 0)
return;
- runtime_semacquire(&runtime_worldsema, false);
- if(force==0 && mstats.heap_alloc < mstats.next_gc) {
+ runtime_acquireWorldsema();
+ if(force==0 && pmstats->heap_alloc < pmstats->next_gc) {
// typically threads which lost the race to grab
// worldsema exit here when gc is done.
- runtime_semrelease(&runtime_worldsema);
+ runtime_releaseWorldsema();
return;
}
@@ -2165,7 +2154,7 @@ runtime_gc(int32 force)
a.start_time = runtime_nanotime();
a.eagersweep = force >= 2;
m->gcing = 1;
- runtime_stoptheworld();
+ runtime_stopTheWorldWithSema();
clearpools();
@@ -2189,8 +2178,8 @@ runtime_gc(int32 force)
// all done
m->gcing = 0;
m->locks++;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
m->locks--;
// now that gc is done, kick off finalizer thread if needed
@@ -2220,6 +2209,7 @@ gc(struct gc_args *args)
uint64 heap0, heap1, obj, ninstr;
GCStats stats;
uint32 i;
+ MStats *pmstats;
// Eface eface;
m = runtime_m();
@@ -2275,28 +2265,29 @@ gc(struct gc_args *args)
cachestats();
// next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
// estimate what was live heap size after previous GC (for tracing only)
- heap0 = mstats.next_gc*100/(gcpercent+100);
+ pmstats = mstats();
+ heap0 = pmstats->next_gc*100/(gcpercent+100);
// conservatively set next_gc to high value assuming that everything is live
// concurrent/lazy sweep will reduce this number while discovering new garbage
- mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100;
+ pmstats->next_gc = pmstats->heap_alloc+(pmstats->heap_alloc-runtime_stacks_sys)*gcpercent/100;
tm4 = runtime_nanotime();
- mstats.last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
- mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = tm4 - tm0;
- mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = mstats.last_gc;
- mstats.pause_total_ns += tm4 - tm0;
- mstats.numgc++;
- if(mstats.debuggc)
+ pmstats->last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
+ pmstats->pause_ns[pmstats->numgc%nelem(pmstats->pause_ns)] = tm4 - tm0;
+ pmstats->pause_end[pmstats->numgc%nelem(pmstats->pause_end)] = pmstats->last_gc;
+ pmstats->pause_total_ns += tm4 - tm0;
+ pmstats->numgc++;
+ if(pmstats->debuggc)
runtime_printf("pause %D\n", tm4-tm0);
if(runtime_debug.gctrace) {
- heap1 = mstats.heap_alloc;
+ heap1 = pmstats->heap_alloc;
runtime_updatememstats(&stats);
- if(heap1 != mstats.heap_alloc) {
- runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
+ if(heap1 != pmstats->heap_alloc) {
+ runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, pmstats->heap_alloc);
runtime_throw("mstats skew");
}
- obj = mstats.nmalloc - mstats.nfree;
+ obj = pmstats->nmalloc - pmstats->nfree;
stats.nprocyield += work.markfor->nprocyield;
stats.nosyield += work.markfor->nosyield;
@@ -2305,9 +2296,9 @@ gc(struct gc_args *args)
runtime_printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
" %d/%d/%d sweeps,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (tm1-tm0)/1000, (tm2-tm1)/1000, (tm3-tm2)/1000, (tm4-tm3)/1000,
+ pmstats->numgc, work.nproc, (tm1-tm0)/1000, (tm2-tm1)/1000, (tm3-tm2)/1000, (tm4-tm3)/1000,
heap0>>20, heap1>>20, obj,
- mstats.nmalloc, mstats.nfree,
+ pmstats->nmalloc, pmstats->nfree,
sweep.nspan, gcstats.nbgsweep, gcstats.npausesweep,
stats.nhandoff, stats.nhandoffcnt,
work.markfor->nsteal, work.markfor->nstealcnt,
@@ -2346,7 +2337,7 @@ gc(struct gc_args *args)
// Free the old cached array if necessary.
if(sweep.spans && sweep.spans != runtime_mheap.allspans)
- runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &mstats.other_sys);
+ runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &pmstats->other_sys);
// Cache the current array.
runtime_mheap.sweepspans = runtime_mheap.allspans;
runtime_mheap.sweepgen += 2;
@@ -2377,36 +2368,6 @@ gc(struct gc_args *args)
m->traceback = 0;
}
-extern uintptr runtime_sizeof_C_MStats
- __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
-
-void runtime_ReadMemStats(MStats *)
- __asm__ (GOSYM_PREFIX "runtime.ReadMemStats");
-
-void
-runtime_ReadMemStats(MStats *stats)
-{
- M *m;
-
- // Have to acquire worldsema to stop the world,
- // because stoptheworld can only be used by
- // one goroutine at a time, and there might be
- // a pending garbage collection already calling it.
- runtime_semacquire(&runtime_worldsema, false);
- m = runtime_m();
- m->gcing = 1;
- runtime_stoptheworld();
- runtime_updatememstats(nil);
- // Size of the trailing by_size array differs between Go and C,
- // _NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
- runtime_memmove(stats, &mstats, runtime_sizeof_C_MStats);
- m->gcing = 0;
- m->locks++;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
- m->locks--;
-}
-
void runtime_debug_readGCStats(Slice*)
__asm__("runtime_debug.readGCStats");
@@ -2415,28 +2376,30 @@ runtime_debug_readGCStats(Slice *pauses)
{
uint64 *p;
uint32 i, n;
+ MStats *pmstats;
// Calling code in runtime/debug should make the slice large enough.
- if((size_t)pauses->cap < nelem(mstats.pause_ns)+3)
+ pmstats = mstats();
+ if((size_t)pauses->cap < nelem(pmstats->pause_ns)+3)
runtime_throw("runtime: short slice passed to readGCStats");
// Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
p = (uint64*)pauses->array;
runtime_lock(&runtime_mheap);
- n = mstats.numgc;
- if(n > nelem(mstats.pause_ns))
- n = nelem(mstats.pause_ns);
+ n = pmstats->numgc;
+ if(n > nelem(pmstats->pause_ns))
+ n = nelem(pmstats->pause_ns);
// The pause buffer is circular. The most recent pause is at
// pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
// from there to go back farther in time. We deliver the times
// most recent first (in p[0]).
for(i=0; i<n; i++)
- p[i] = mstats.pause_ns[(mstats.numgc-1-i)%nelem(mstats.pause_ns)];
+ p[i] = pmstats->pause_ns[(pmstats->numgc-1-i)%nelem(pmstats->pause_ns)];
- p[n] = mstats.last_gc;
- p[n+1] = mstats.numgc;
- p[n+2] = mstats.pause_total_ns;
+ p[n] = pmstats->last_gc;
+ p[n+1] = pmstats->numgc;
+ p[n+2] = pmstats->pause_total_ns;
runtime_unlock(&runtime_mheap);
pauses->__count = n+3;
}
@@ -2745,7 +2708,7 @@ runtime_MHeap_MapBits(MHeap *h)
if(h->bitmap_mapped >= n)
return;
- runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
+ runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats()->gc_sys);
h->bitmap_mapped = n;
}