Skip to content
Snippets Groups Projects
malloc.go 49.3 KiB
Newer Older
  • Learn to ignore specific revisions
  • 			//
    			// SetFinalizer has a special case for objects potentially coming
    			// from tiny allocator, it such case it allows to set finalizers
    			// for an inner byte of a memory block.
    			//
    			// The main targets of tiny allocator are small strings and
    			// standalone escaping variables. On a json benchmark
    			// the allocator reduces number of allocations by ~12% and
    			// reduces heap size by ~20%.
    
    			off := c.tinyoffset
    			// Align tiny pointer for required (conservative) alignment.
    			if size&7 == 0 {
    
    			} else if sys.PtrSize == 4 && size == 12 {
    				// Conservatively align 12-byte objects to 8 bytes on 32-bit
    				// systems so that objects whose first field is a 64-bit
    				// value is aligned to 8 bytes and does not cause a fault on
    				// atomic access. See issue 37262.
    				// TODO(mknyszek): Remove this workaround if/when issue 36606
    				// is resolved.
    				off = alignUp(off, 8)
    
    			if off+size <= maxTinySize && c.tiny != 0 {
    
    				// The object fits into existing tiny block.
    
    				x = unsafe.Pointer(c.tiny + off)
    
    				mp.mallocing = 0
    				releasem(mp)
    
    			}
    			// Allocate a new maxTinySize block.
    
    			span = c.alloc[tinySpanClass]
    
    				v, span, shouldhelpgc = c.nextFree(tinySpanClass)
    
    			x = unsafe.Pointer(v)
    			(*[2]uint64)(x)[0] = 0
    			(*[2]uint64)(x)[1] = 0
    			// See if we need to replace the existing tiny block with the new one
    			// based on amount of remaining free space.
    
    			if size < c.tinyoffset || c.tiny == 0 {
    				c.tiny = uintptr(x)
    
    			}
    			size = maxTinySize
    		} else {
    
    			var sizeclass uint8
    			if size <= smallSizeMax-8 {
    
    				sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
    
    			} else {
    
    				sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
    
    			}
    			size = uintptr(class_to_size[sizeclass])
    
    			spc := makeSpanClass(sizeclass, noscan)
    
    				v, span, shouldhelpgc = c.nextFree(spc)
    
    			x = unsafe.Pointer(v)
    
    			if needzero && span.needzero != 0 {
    
    				memclrNoHeapPointers(unsafe.Pointer(v), size)
    
    		shouldhelpgc = true
    
    		span = c.allocLarge(size, needzero, noscan)
    
    		span.freeindex = 1
    		span.allocCount = 1
    		x = unsafe.Pointer(span.base())
    		size = span.elemsize
    
    		// If allocating a defer+arg block, now that we've picked a malloc size
    		// large enough to hold everything, cut the "asked for" size down to
    		// just the defer header, so that the GC bitmap will record the arg block
    		// as containing nothing at all (as if it were unused space at the end of
    		// a malloc block caused by size rounding).
    		// The defer arg areas are scanned as part of scanstack.
    		if typ == deferType {
    			dataSize = unsafe.Sizeof(_defer{})
    
    		heapBitsSetType(uintptr(x), size, dataSize, typ)
    
    		if dataSize > typ.size {
    			// Array allocation. If there are any
    			// pointers, GC has to scan to the last
    			// element.
    			if typ.ptrdata != 0 {
    
    				scanSize = dataSize - typ.size + typ.ptrdata
    
    			scanSize = typ.ptrdata
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    
    
    	// Allocate black during GC.
    
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    
    	if gcphase != _GCoff {
    
    		gcmarknewobject(span, uintptr(x), size, scanSize)
    
    	if raceenabled {
    		racemalloc(x, size)
    	}
    
    	if msanenabled {
    		msanmalloc(x, size)
    	}
    
    	mp.mallocing = 0
    	releasem(mp)
    
    	if debug.malloc {
    		if debug.allocfreetrace != 0 {
    			tracealloc(x, size, typ)
    		}
    
    		if inittrace.active && inittrace.id == getg().goid {
    			// Init functions are executed sequentially in a single Go routine.
    			inittrace.bytes += uint64(size)
    		}
    
    
    	if rate := MemProfileRate; rate > 0 {
    
    		if rate != 1 && size < c.nextSample {
    			c.nextSample -= size
    
    		} else {
    
    			profilealloc(mp, x, size)
    
    	if assistG != nil {
    		// Account for internal fragmentation in the assist
    		// debt now that we know it.
    		assistG.gcAssistBytes -= int64(size - dataSize)
    	}
    
    
    	if shouldhelpgc {
    		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    
    	}
    
    	return x
    }
    
    // implementation of new builtin
    
    // compiler (both frontend and SSA backend) knows the signature
    // of this function
    
    func newobject(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.size, typ, true)
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_New reflect.unsafe_New
    func reflect_unsafe_New(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.size, typ, true)
    
    //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
    func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
    	return mallocgc(typ.size, typ, true)
    }
    
    
    // newarray allocates an array of n elements of type typ.
    func newarray(typ *_type, n int) unsafe.Pointer {
    
    	if n == 1 {
    		return mallocgc(typ.size, typ, true)
    	}
    
    	mem, overflow := math.MulUintptr(typ.size, uintptr(n))
    	if overflow || mem > maxAlloc || n < 0 {
    
    		panic(plainError("runtime: allocation size out of range"))
    
    	return mallocgc(mem, typ, true)
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
    
    func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
    
    Russ Cox's avatar
    Russ Cox committed
    	return newarray(typ, n)
    }
    
    
    func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
    
    	mProf_Malloc(x, size)
    
    // nextSample returns the next sampling point for heap profiling. The goal is
    // to sample allocations on average every MemProfileRate bytes, but with a
    // completely random distribution over the allocation timeline; this
    // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
    // processes, the distance between two samples follows the exponential
    // distribution (exp(MemProfileRate)), so the best return value is a random
    // number taken from an exponential distribution whose mean is MemProfileRate.
    
    	if GOOS == "plan9" {
    		// Plan 9 doesn't support floating point in note handler.
    		if g := getg(); g == g.m.gsignal {
    			return nextSampleNoFP()
    		}
    	}
    
    
    	return uintptr(fastexprand(MemProfileRate))
    
    // fastexprand returns a random number from an exponential distribution with
    // the specified mean.
    func fastexprand(mean int) int32 {
    	// Avoid overflow. Maximum possible step is
    	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
    
    	case mean > 0x7000000:
    		mean = 0x7000000
    	case mean == 0:
    
    	// Take a random sample of the exponential distribution exp(-mean*x).
    	// The probability distribution function is mean*exp(-mean*x), so the CDF is
    	// p = 1 - exp(-mean*x), so
    	// q = 1 - p == exp(-mean*x)
    	// log_e(q) = -mean*x
    	// -log_e(q)/mean = x
    	// x = -log_e(q) * mean
    	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
    
    	const randomBitCount = 26
    
    	q := fastrand()%(1<<randomBitCount) + 1
    
    	qlog := fastlog2(float64(q)) - randomBitCount
    	if qlog > 0 {
    		qlog = 0
    	}
    	const minusLog2 = -0.6931471805599453 // -ln(2)
    
    	return int32(qlog*(minusLog2*float64(mean))) + 1
    
    // nextSampleNoFP is similar to nextSample, but uses older,
    // simpler code to avoid floating point.
    
    	// Set first allocation sample size.
    	rate := MemProfileRate
    	if rate > 0x3fffffff { // make 2*rate not overflow
    		rate = 0x3fffffff
    	}
    	if rate != 0 {
    
    		return uintptr(fastrand() % uint32(2*rate))
    
    type persistentAlloc struct {
    
    var globalAlloc struct {
    	mutex
    	persistentAlloc
    }
    
    
    // persistentChunkSize is the number of bytes we allocate when we grow
    // a persistentAlloc.
    const persistentChunkSize = 256 << 10
    
    // persistentChunks is a list of all the persistent chunks we have
    // allocated. The list is maintained through the first word in the
    // persistent chunk. This is updated atomically.
    var persistentChunks *notInHeap
    
    
    // Wrapper around sysAlloc that can allocate small chunks.
    // There is no associated free operation.
    // Intended for things like function/type/debug-related persistent data.
    // If align is 0, uses default align (currently 8).
    
    // The returned memory will be zeroed.
    
    //
    // Consider marking persistentalloc'd types go:notinheap.
    
    func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
    
    	systemstack(func() {
    		p = persistentalloc1(size, align, sysStat)
    	})
    
    }
    
    // Must run on system stack because stack growth can (re)invoke it.
    // See issue 9174.
    //go:systemstack
    
    func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
    
    	const (
    		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
    	)
    
    
    	if size == 0 {
    		throw("persistentalloc: size == 0")
    	}
    
    	if align != 0 {
    		if align&(align-1) != 0 {
    
    			throw("persistentalloc: align is not a power of 2")
    
    		}
    		if align > _PageSize {
    
    			throw("persistentalloc: align is too large")
    
    		}
    	} else {
    		align = 8
    	}
    
    	if size >= maxBlock {
    
    		return (*notInHeap)(sysAlloc(size, sysStat))
    
    	mp := acquirem()
    	var persistent *persistentAlloc
    
    	if mp != nil && mp.p != 0 {
    		persistent = &mp.p.ptr().palloc
    
    	} else {
    		lock(&globalAlloc.mutex)
    		persistent = &globalAlloc.persistentAlloc
    	}
    
    	persistent.off = alignUp(persistent.off, align)
    
    	if persistent.off+size > persistentChunkSize || persistent.base == nil {
    		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
    
    			if persistent == &globalAlloc.persistentAlloc {
    				unlock(&globalAlloc.mutex)
    			}
    
    			throw("runtime: cannot allocate memory")
    
    
    		// Add the new chunk to the persistentChunks list.
    		for {
    			chunks := uintptr(unsafe.Pointer(persistentChunks))
    			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
    			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
    				break
    			}
    		}
    
    		persistent.off = alignUp(sys.PtrSize, align)
    
    	p := persistent.base.add(persistent.off)
    
    	releasem(mp)
    	if persistent == &globalAlloc.persistentAlloc {
    		unlock(&globalAlloc.mutex)
    	}
    
    	if sysStat != &memstats.other_sys {
    
    		sysStat.add(int64(size))
    		memstats.other_sys.add(-int64(size))
    
    // inPersistentAlloc reports whether p points to memory allocated by
    // persistentalloc. This must be nosplit because it is called by the
    // cgo checker code, which is called by the write barrier code.
    //go:nosplit
    func inPersistentAlloc(p uintptr) bool {
    	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
    	for chunk != 0 {
    		if p >= chunk && p < chunk+persistentChunkSize {
    			return true
    		}
    		chunk = *(*uintptr)(unsafe.Pointer(chunk))
    	}
    	return false
    }
    
    
    // linearAlloc is a simple linear allocator that pre-reserves a region
    
    // of memory and then maps that region into the Ready state as needed. The
    // caller is responsible for locking.
    
    type linearAlloc struct {
    	next   uintptr // next free byte
    	mapped uintptr // one byte past end of mapped space
    	end    uintptr // end of reserved space
    }
    
    func (l *linearAlloc) init(base, size uintptr) {
    
    	if base+size < base {
    		// Chop off the last byte. The runtime isn't prepared
    		// to deal with situations where the bounds could overflow.
    		// Leave that memory reserved, though, so we don't map it
    		// later.
    		size -= 1
    	}
    
    	l.next, l.mapped = base, base
    	l.end = base + size
    }
    
    
    func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
    
    	if p+size > l.end {
    		return nil
    	}
    	l.next = p + size
    
    	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
    
    		// Transition from Reserved to Prepared to Ready.
    
    		sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
    
    		sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
    
    		l.mapped = pEnd
    	}
    	return unsafe.Pointer(p)
    }
    
    
    // notInHeap is off-heap memory allocated by a lower-level allocator
    // like sysAlloc or persistentAlloc.
    //
    // In general, it's better to use real types marked as go:notinheap,
    // but this serves as a generic type for situations where that isn't
    // possible (like in the allocators).
    //
    // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
    //
    //go:notinheap
    type notInHeap struct{}
    
    func (p *notInHeap) add(bytes uintptr) *notInHeap {
    	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
    }