Skip to content
Snippets Groups Projects
malloc.go 53.6 KiB
Newer Older
  • Learn to ignore specific revisions
  • 			// the amount of potentially wasted memory is bounded.
    			//
    			// Size of the memory block used for combining (maxTinySize) is tunable.
    			// Current setting is 16 bytes, which relates to 2x worst case memory
    			// wastage (when all but one subobjects are unreachable).
    			// 8 bytes would result in no wastage at all, but provides less
    			// opportunities for combining.
    			// 32 bytes provides more opportunities for combining,
    			// but can lead to 4x worst case wastage.
    			// The best case winning is 8x regardless of block size.
    			//
    			// Objects obtained from tiny allocator must not be freed explicitly.
    			// So when an object will be freed explicitly, we ensure that
    			// its size >= maxTinySize.
    			//
    			// SetFinalizer has a special case for objects potentially coming
    			// from tiny allocator, it such case it allows to set finalizers
    			// for an inner byte of a memory block.
    			//
    			// The main targets of tiny allocator are small strings and
    			// standalone escaping variables. On a json benchmark
    			// the allocator reduces number of allocations by ~12% and
    			// reduces heap size by ~20%.
    
    			off := c.tinyoffset
    			// Align tiny pointer for required (conservative) alignment.
    			if size&7 == 0 {
    
    				// Conservatively align 12-byte objects to 8 bytes on 32-bit
    				// systems so that objects whose first field is a 64-bit
    				// value is aligned to 8 bytes and does not cause a fault on
    				// atomic access. See issue 37262.
    				// TODO(mknyszek): Remove this workaround if/when issue 36606
    				// is resolved.
    				off = alignUp(off, 8)
    
    			if off+size <= maxTinySize && c.tiny != 0 {
    
    				// The object fits into existing tiny block.
    
    				x = unsafe.Pointer(c.tiny + off)
    
    				mp.mallocing = 0
    				releasem(mp)
    
    			}
    			// Allocate a new maxTinySize block.
    
    			span = c.alloc[tinySpanClass]
    
    				v, span, shouldhelpgc = c.nextFree(tinySpanClass)
    
    			x = unsafe.Pointer(v)
    			(*[2]uint64)(x)[0] = 0
    			(*[2]uint64)(x)[1] = 0
    			// See if we need to replace the existing tiny block with the new one
    			// based on amount of remaining free space.
    
    			if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
    				// Note: disabled when race detector is on, see comment near end of this function.
    
    				c.tiny = uintptr(x)
    
    			}
    			size = maxTinySize
    		} else {
    
    			var sizeclass uint8
    			if size <= smallSizeMax-8 {
    
    				sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
    
    			} else {
    
    				sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
    
    			}
    			size = uintptr(class_to_size[sizeclass])
    
    			spc := makeSpanClass(sizeclass, noscan)
    
    				v, span, shouldhelpgc = c.nextFree(spc)
    
    			x = unsafe.Pointer(v)
    
    			if needzero && span.needzero != 0 {
    
    				memclrNoHeapPointers(unsafe.Pointer(v), size)
    
    		shouldhelpgc = true
    
    		// For large allocations, keep track of zeroed state so that
    		// bulk zeroing can be happen later in a preemptible context.
    
    		span = c.allocLarge(size, noscan)
    
    		span.freeindex = 1
    		span.allocCount = 1
    		size = span.elemsize
    
    		x = unsafe.Pointer(span.base())
    		if needzero && span.needzero != 0 {
    			if noscan {
    				delayedZeroing = true
    			} else {
    				memclrNoHeapPointers(x, size)
    				// We've in theory cleared almost the whole span here,
    				// and could take the extra step of actually clearing
    				// the whole thing. However, don't. Any GC bits for the
    				// uncleared parts will be zero, and it's just going to
    				// be needzero = 1 once freed anyway.
    			}
    		}
    
    		heapBitsSetType(uintptr(x), size, dataSize, typ)
    
    		if dataSize > typ.size {
    			// Array allocation. If there are any
    			// pointers, GC has to scan to the last
    			// element.
    			if typ.ptrdata != 0 {
    
    				scanSize = dataSize - typ.size + typ.ptrdata
    
    			scanSize = typ.ptrdata
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    
    
    	// Allocate black during GC.
    
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    
    	if gcphase != _GCoff {
    
    		gcmarknewobject(span, uintptr(x), size, scanSize)
    
    	if raceenabled {
    		racemalloc(x, size)
    	}
    
    	if msanenabled {
    		msanmalloc(x, size)
    	}
    
    	if asanenabled {
    		// We should only read/write the memory with the size asked by the user.
    		// The rest of the allocated memory should be poisoned, so that we can report
    		// errors when accessing poisoned memory.
    		// The allocated memory is larger than required userSize, it will also include
    		// redzone and some other padding bytes.
    		rzBeg := unsafe.Add(x, userSize)
    		asanpoison(rzBeg, size-userSize)
    		asanunpoison(x, userSize)
    	}
    
    
    	if rate := MemProfileRate; rate > 0 {
    		// Note cache c only valid while m acquired; see #47302
    		if rate != 1 && size < c.nextSample {
    			c.nextSample -= size
    		} else {
    			profilealloc(mp, x, size)
    		}
    	}
    
    	mp.mallocing = 0
    	releasem(mp)
    
    	// Pointerfree data can be zeroed late in a context where preemption can occur.
    	// x will keep the memory alive.
    
    	if delayedZeroing {
    		if !noscan {
    			throw("delayed zeroing on data that may contain pointers")
    		}
    
    		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
    
    	if debug.malloc {
    		if debug.allocfreetrace != 0 {
    			tracealloc(x, size, typ)
    		}
    
    		if inittrace.active && inittrace.id == getg().goid {
    
    			// Init functions are executed sequentially in a single goroutine.
    
    			inittrace.bytes += uint64(size)
    		}
    
    	if assistG != nil {
    		// Account for internal fragmentation in the assist
    		// debt now that we know it.
    		assistG.gcAssistBytes -= int64(size - dataSize)
    	}
    
    
    	if shouldhelpgc {
    		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    
    	if raceenabled && noscan && dataSize < maxTinySize {
    		// Pad tinysize allocations so they are aligned with the end
    		// of the tinyalloc region. This ensures that any arithmetic
    		// that goes off the top end of the object will be detectable
    		// by checkptr (issue 38872).
    		// Note that we disable tinyalloc when raceenabled for this to work.
    		// TODO: This padding is only performed when the race detector
    		// is enabled. It would be nice to enable it if any package
    		// was compiled with checkptr, but there's no easy way to
    		// detect that (especially at compile time).
    		// TODO: enable this padding for all allocations, not just
    		// tinyalloc ones. It's tricky because of pointer maps.
    		// Maybe just all noscan objects?
    		x = add(x, size-dataSize)
    	}
    
    
    // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
    // on chunks of the buffer to be zeroed, with opportunities for preemption
    // along the way.  memclrNoHeapPointers contains no safepoints and also
    // cannot be preemptively scheduled, so this provides a still-efficient
    // block copy that can also be preempted on a reasonable granularity.
    //
    // Use this with care; if the data being cleared is tagged to contain
    // pointers, this allows the GC to run before it is all cleared.
    func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
    	v := uintptr(x)
    	// got this from benchmarking. 128k is too small, 512k is too large.
    	const chunkBytes = 256 * 1024
    	vsize := v + size
    	for voff := v; voff < vsize; voff = voff + chunkBytes {
    		if getg().preempt {
    			// may hold locks, e.g., profiling
    			goschedguarded()
    		}
    		// clear min(avail, lump) bytes
    		n := vsize - voff
    		if n > chunkBytes {
    			n = chunkBytes
    		}
    		memclrNoHeapPointers(unsafe.Pointer(voff), n)
    	}
    }
    
    
    // implementation of new builtin
    
    // compiler (both frontend and SSA backend) knows the signature
    // of this function
    
    func newobject(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.size, typ, true)
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_New reflect.unsafe_New
    func reflect_unsafe_New(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.size, typ, true)
    
    //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
    func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
    	return mallocgc(typ.size, typ, true)
    }
    
    
    // newarray allocates an array of n elements of type typ.
    func newarray(typ *_type, n int) unsafe.Pointer {
    
    	if n == 1 {
    		return mallocgc(typ.size, typ, true)
    	}
    
    	mem, overflow := math.MulUintptr(typ.size, uintptr(n))
    	if overflow || mem > maxAlloc || n < 0 {
    
    		panic(plainError("runtime: allocation size out of range"))
    
    	return mallocgc(mem, typ, true)
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
    
    func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
    
    Russ Cox's avatar
    Russ Cox committed
    	return newarray(typ, n)
    }
    
    
    func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
    
    	c := getMCache(mp)
    
    	if c == nil {
    		throw("profilealloc called without a P or outside bootstrapping")
    	}
    	c.nextSample = nextSample()
    
    	mProf_Malloc(x, size)
    
    // nextSample returns the next sampling point for heap profiling. The goal is
    // to sample allocations on average every MemProfileRate bytes, but with a
    // completely random distribution over the allocation timeline; this
    // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
    // processes, the distance between two samples follows the exponential
    // distribution (exp(MemProfileRate)), so the best return value is a random
    // number taken from an exponential distribution whose mean is MemProfileRate.
    
    	if MemProfileRate == 1 {
    		// Callers assign our return value to
    		// mcache.next_sample, but next_sample is not used
    		// when the rate is 1. So avoid the math below and
    		// just return something.
    		return 0
    	}
    
    	if GOOS == "plan9" {
    		// Plan 9 doesn't support floating point in note handler.
    		if g := getg(); g == g.m.gsignal {
    			return nextSampleNoFP()
    		}
    	}
    
    
    	return uintptr(fastexprand(MemProfileRate))
    
    // fastexprand returns a random number from an exponential distribution with
    // the specified mean.
    func fastexprand(mean int) int32 {
    	// Avoid overflow. Maximum possible step is
    	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
    
    	case mean > 0x7000000:
    		mean = 0x7000000
    	case mean == 0:
    
    	// Take a random sample of the exponential distribution exp(-mean*x).
    	// The probability distribution function is mean*exp(-mean*x), so the CDF is
    	// p = 1 - exp(-mean*x), so
    	// q = 1 - p == exp(-mean*x)
    	// log_e(q) = -mean*x
    	// -log_e(q)/mean = x
    	// x = -log_e(q) * mean
    	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
    
    	const randomBitCount = 26
    
    	q := fastrandn(1<<randomBitCount) + 1
    
    	qlog := fastlog2(float64(q)) - randomBitCount
    	if qlog > 0 {
    		qlog = 0
    	}
    	const minusLog2 = -0.6931471805599453 // -ln(2)
    
    	return int32(qlog*(minusLog2*float64(mean))) + 1
    
    // nextSampleNoFP is similar to nextSample, but uses older,
    // simpler code to avoid floating point.
    
    	// Set first allocation sample size.
    	rate := MemProfileRate
    	if rate > 0x3fffffff { // make 2*rate not overflow
    		rate = 0x3fffffff
    	}
    	if rate != 0 {
    
    		return uintptr(fastrandn(uint32(2 * rate)))
    
    type persistentAlloc struct {
    
    var globalAlloc struct {
    	mutex
    	persistentAlloc
    }
    
    
    // persistentChunkSize is the number of bytes we allocate when we grow
    // a persistentAlloc.
    const persistentChunkSize = 256 << 10
    
    // persistentChunks is a list of all the persistent chunks we have
    // allocated. The list is maintained through the first word in the
    // persistent chunk. This is updated atomically.
    var persistentChunks *notInHeap
    
    
    // Wrapper around sysAlloc that can allocate small chunks.
    // There is no associated free operation.
    // Intended for things like function/type/debug-related persistent data.
    // If align is 0, uses default align (currently 8).
    
    // The returned memory will be zeroed.
    
    //
    // Consider marking persistentalloc'd types go:notinheap.
    
    func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
    
    	systemstack(func() {
    		p = persistentalloc1(size, align, sysStat)
    	})
    
    }
    
    // Must run on system stack because stack growth can (re)invoke it.
    // See issue 9174.
    //go:systemstack
    
    func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
    
    	const (
    		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
    	)
    
    
    	if size == 0 {
    		throw("persistentalloc: size == 0")
    	}
    
    	if align != 0 {
    		if align&(align-1) != 0 {
    
    			throw("persistentalloc: align is not a power of 2")
    
    		}
    		if align > _PageSize {
    
    			throw("persistentalloc: align is too large")
    
    		}
    	} else {
    		align = 8
    	}
    
    	if size >= maxBlock {
    
    		return (*notInHeap)(sysAlloc(size, sysStat))
    
    	mp := acquirem()
    	var persistent *persistentAlloc
    
    	if mp != nil && mp.p != 0 {
    		persistent = &mp.p.ptr().palloc
    
    	} else {
    		lock(&globalAlloc.mutex)
    		persistent = &globalAlloc.persistentAlloc
    	}
    
    	persistent.off = alignUp(persistent.off, align)
    
    	if persistent.off+size > persistentChunkSize || persistent.base == nil {
    		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
    
    			if persistent == &globalAlloc.persistentAlloc {
    				unlock(&globalAlloc.mutex)
    			}
    
    			throw("runtime: cannot allocate memory")
    
    
    		// Add the new chunk to the persistentChunks list.
    		for {
    			chunks := uintptr(unsafe.Pointer(persistentChunks))
    			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
    			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
    				break
    			}
    		}
    
    		persistent.off = alignUp(goarch.PtrSize, align)
    
    	p := persistent.base.add(persistent.off)
    
    	releasem(mp)
    	if persistent == &globalAlloc.persistentAlloc {
    		unlock(&globalAlloc.mutex)
    	}
    
    	if sysStat != &memstats.other_sys {
    
    		sysStat.add(int64(size))
    		memstats.other_sys.add(-int64(size))
    
    // inPersistentAlloc reports whether p points to memory allocated by
    // persistentalloc. This must be nosplit because it is called by the
    // cgo checker code, which is called by the write barrier code.
    //go:nosplit
    func inPersistentAlloc(p uintptr) bool {
    	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
    	for chunk != 0 {
    		if p >= chunk && p < chunk+persistentChunkSize {
    			return true
    		}
    		chunk = *(*uintptr)(unsafe.Pointer(chunk))
    	}
    	return false
    }
    
    
    // linearAlloc is a simple linear allocator that pre-reserves a region
    
    // of memory and then optionally maps that region into the Ready state
    // as needed.
    //
    // The caller is responsible for locking.
    
    type linearAlloc struct {
    	next   uintptr // next free byte
    	mapped uintptr // one byte past end of mapped space
    	end    uintptr // end of reserved space
    
    
    	mapMemory bool // transition memory from Reserved to Ready if true
    
    func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
    
    	if base+size < base {
    		// Chop off the last byte. The runtime isn't prepared
    		// to deal with situations where the bounds could overflow.
    		// Leave that memory reserved, though, so we don't map it
    		// later.
    		size -= 1
    	}
    
    	l.next, l.mapped = base, base
    	l.end = base + size
    
    func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
    
    	if p+size > l.end {
    		return nil
    	}
    	l.next = p + size
    
    	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
    
    		if l.mapMemory {
    			// Transition from Reserved to Prepared to Ready.
    			sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
    			sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
    		}
    
    		l.mapped = pEnd
    	}
    	return unsafe.Pointer(p)
    }
    
    
    // notInHeap is off-heap memory allocated by a lower-level allocator
    // like sysAlloc or persistentAlloc.
    //
    // In general, it's better to use real types marked as go:notinheap,
    // but this serves as a generic type for situations where that isn't
    // possible (like in the allocators).
    //
    // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
    //
    //go:notinheap
    type notInHeap struct{}
    
    func (p *notInHeap) add(bytes uintptr) *notInHeap {
    	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
    }
    
    
    // computeRZlog computes the size of the redzone.
    // Refer to the implementation of the compiler-rt.
    func computeRZlog(userSize uintptr) uintptr {
    	switch {
    	case userSize <= (64 - 16):
    		return 16 << 0
    	case userSize <= (128 - 32):
    		return 16 << 1
    	case userSize <= (512 - 64):
    		return 16 << 2
    	case userSize <= (4096 - 128):
    		return 16 << 3
    	case userSize <= (1<<14)-256:
    		return 16 << 4
    	case userSize <= (1<<15)-512:
    		return 16 << 5
    	case userSize <= (1<<16)-1024:
    		return 16 << 6
    	default:
    		return 16 << 7
    	}
    }