Skip to content
Snippets Groups Projects
malloc.go 40.2 KiB
Newer Older
  • Learn to ignore specific revisions
  • 		npages++
    	}
    
    
    	// Deduct credit for this span allocation and sweep if
    	// necessary. mHeap_Alloc will also sweep npages, so this only
    	// pays the debt down to npage pages.
    	deductSweepCredit(npages*_PageSize, npages)
    
    
    	s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero)
    
    	if s == nil {
    		throw("out of memory")
    	}
    
    	s.limit = s.base() + size
    
    	heapBitsForAddr(s.base()).initSpan(s)
    
    // implementation of new builtin
    
    // compiler (both frontend and SSA backend) knows the signature
    // of this function
    
    func newobject(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.size, typ, true)
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_New reflect.unsafe_New
    func reflect_unsafe_New(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.size, typ, true)
    
    // newarray allocates an array of n elements of type typ.
    func newarray(typ *_type, n int) unsafe.Pointer {
    
    	if n == 1 {
    		return mallocgc(typ.size, typ, true)
    	}
    
    	if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
    
    		panic(plainError("runtime: allocation size out of range"))
    
    	return mallocgc(typ.size*uintptr(n), typ, true)
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
    
    func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
    
    Russ Cox's avatar
    Russ Cox committed
    	return newarray(typ, n)
    }
    
    
    func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
    
    	mp.mcache.next_sample = nextSample()
    
    	mProf_Malloc(x, size)
    
    // nextSample returns the next sampling point for heap profiling. The goal is
    // to sample allocations on average every MemProfileRate bytes, but with a
    // completely random distribution over the allocation timeline; this
    // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
    // processes, the distance between two samples follows the exponential
    // distribution (exp(MemProfileRate)), so the best return value is a random
    // number taken from an exponential distribution whose mean is MemProfileRate.
    
    func nextSample() int32 {
    
    	if GOOS == "plan9" {
    		// Plan 9 doesn't support floating point in note handler.
    		if g := getg(); g == g.m.gsignal {
    			return nextSampleNoFP()
    		}
    	}
    
    
    	return fastexprand(MemProfileRate)
    }
    
    // fastexprand returns a random number from an exponential distribution with
    // the specified mean.
    func fastexprand(mean int) int32 {
    	// Avoid overflow. Maximum possible step is
    	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
    
    	case mean > 0x7000000:
    		mean = 0x7000000
    	case mean == 0:
    
    	// Take a random sample of the exponential distribution exp(-mean*x).
    	// The probability distribution function is mean*exp(-mean*x), so the CDF is
    	// p = 1 - exp(-mean*x), so
    	// q = 1 - p == exp(-mean*x)
    	// log_e(q) = -mean*x
    	// -log_e(q)/mean = x
    	// x = -log_e(q) * mean
    	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
    
    	const randomBitCount = 26
    
    	q := fastrand()%(1<<randomBitCount) + 1
    
    	qlog := fastlog2(float64(q)) - randomBitCount
    	if qlog > 0 {
    		qlog = 0
    	}
    	const minusLog2 = -0.6931471805599453 // -ln(2)
    
    	return int32(qlog*(minusLog2*float64(mean))) + 1
    
    // nextSampleNoFP is similar to nextSample, but uses older,
    // simpler code to avoid floating point.
    func nextSampleNoFP() int32 {
    	// Set first allocation sample size.
    	rate := MemProfileRate
    	if rate > 0x3fffffff { // make 2*rate not overflow
    		rate = 0x3fffffff
    	}
    	if rate != 0 {
    
    		return int32(fastrand() % uint32(2*rate))
    
    type persistentAlloc struct {
    
    var globalAlloc struct {
    	mutex
    	persistentAlloc
    }
    
    
    // Wrapper around sysAlloc that can allocate small chunks.
    // There is no associated free operation.
    // Intended for things like function/type/debug-related persistent data.
    // If align is 0, uses default align (currently 8).
    
    // The returned memory will be zeroed.
    
    //
    // Consider marking persistentalloc'd types go:notinheap.
    
    func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
    
    	systemstack(func() {
    		p = persistentalloc1(size, align, sysStat)
    	})
    
    }
    
    // Must run on system stack because stack growth can (re)invoke it.
    // See issue 9174.
    //go:systemstack
    
    func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
    
    	const (
    		chunk    = 256 << 10
    		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
    	)
    
    
    	if size == 0 {
    		throw("persistentalloc: size == 0")
    	}
    
    	if align != 0 {
    		if align&(align-1) != 0 {
    
    			throw("persistentalloc: align is not a power of 2")
    
    		}
    		if align > _PageSize {
    
    			throw("persistentalloc: align is too large")
    
    		}
    	} else {
    		align = 8
    	}
    
    	if size >= maxBlock {
    
    		return (*notInHeap)(sysAlloc(size, sysStat))
    
    	mp := acquirem()
    	var persistent *persistentAlloc
    
    	if mp != nil && mp.p != 0 {
    		persistent = &mp.p.ptr().palloc
    
    	} else {
    		lock(&globalAlloc.mutex)
    		persistent = &globalAlloc.persistentAlloc
    	}
    
    	persistent.off = round(persistent.off, align)
    
    	if persistent.off+size > chunk || persistent.base == nil {
    
    		persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys))
    
    			if persistent == &globalAlloc.persistentAlloc {
    				unlock(&globalAlloc.mutex)
    			}
    
    			throw("runtime: cannot allocate memory")
    
    	p := persistent.base.add(persistent.off)
    
    	releasem(mp)
    	if persistent == &globalAlloc.persistentAlloc {
    		unlock(&globalAlloc.mutex)
    	}
    
    	if sysStat != &memstats.other_sys {
    		mSysStatInc(sysStat, size)
    		mSysStatDec(&memstats.other_sys, size)
    
    // linearAlloc is a simple linear allocator that pre-reserves a region
    // of memory and then maps that region as needed. The caller is
    // responsible for locking.
    type linearAlloc struct {
    	next   uintptr // next free byte
    	mapped uintptr // one byte past end of mapped space
    	end    uintptr // end of reserved space
    }
    
    func (l *linearAlloc) init(base, size uintptr) {
    	l.next, l.mapped = base, base
    	l.end = base + size
    }
    
    func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
    	p := round(l.next, align)
    	if p+size > l.end {
    		return nil
    	}
    	l.next = p + size
    	if pEnd := round(l.next-1, physPageSize); pEnd > l.mapped {
    		// We need to map more of the reserved space.
    
    		sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
    
    		l.mapped = pEnd
    	}
    	return unsafe.Pointer(p)
    }
    
    
    // notInHeap is off-heap memory allocated by a lower-level allocator
    // like sysAlloc or persistentAlloc.
    //
    // In general, it's better to use real types marked as go:notinheap,
    // but this serves as a generic type for situations where that isn't
    // possible (like in the allocators).
    //
    // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
    //
    //go:notinheap
    type notInHeap struct{}
    
    func (p *notInHeap) add(bytes uintptr) *notInHeap {
    	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
    }