Skip to content
Snippets Groups Projects
malloc.go 70 KiB
Newer Older
  • Learn to ignore specific revisions
  • //
    // Do not remove or change the type signature.
    // See go.dev/issue/67401.
    //
    //go:linkname mallocgc
    
    func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
    
    	if doubleCheckMalloc {
    		if gcphase == _GCmarktermination {
    			throw("mallocgc called with gcphase == _GCmarktermination")
    		}
    
    	// Short-circuit zero-sized allocation requests.
    
    	if size == 0 {
    
    		return unsafe.Pointer(&zerobase)
    
    
    	// It's possible for any malloc to trigger sweeping, which may in
    	// turn queue finalizers. Record this dynamic lock edge.
    
    	// N.B. Compiled away if lockrank experiment is not enabled.
    
    	lockRankMayQueueFinalizer()
    
    
    		if x := preMallocgcDebug(size, typ); x != nil {
    			return x
    
    	// For ASAN, we allocate extra memory around each allocation called the "redzone."
    	// These "redzones" are marked as unaddressable.
    	var asanRZ uintptr
    	if asanenabled {
    
    		asanRZ = redZoneSize(size)
    
    	// Assist the GC if needed.
    	if gcBlackenEnabled != 0 {
    		deductAssistCredit(size)
    	}
    
    	// Actually do the allocation.
    	var x unsafe.Pointer
    	var elemsize uintptr
    	if size <= maxSmallSize-mallocHeaderSize {
    		if typ == nil || !typ.Pointers() {
    			if size < maxTinySize {
    
    				x, elemsize = mallocgcTiny(size, typ)
    
    			} else {
    				x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
    			}
    		} else {
    
    			if !needzero {
    				throw("objects with pointers must be zeroed")
    			}
    			if heapBitsInSpan(size) {
    				x, elemsize = mallocgcSmallScanNoHeader(size, typ)
    			} else {
    				x, elemsize = mallocgcSmallScanHeader(size, typ)
    			}
    
    		}
    	} else {
    		x, elemsize = mallocgcLarge(size, typ, needzero)
    	}
    
    	// Notify sanitizers, if enabled.
    	if raceenabled {
    		racemalloc(x, size-asanRZ)
    	}
    	if msanenabled {
    		msanmalloc(x, size-asanRZ)
    	}
    	if asanenabled {
    		// Poison the space between the end of the requested size of x
    		// and the end of the slot. Unpoison the requested allocation.
    
    		if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-mallocHeaderSize {
    
    		asanpoison(unsafe.Add(x, size-asanRZ), asanRZ)
    
    		asanunpoison(x, size-asanRZ)
    	}
    
    	// Adjust our GC assist debt to account for internal fragmentation.
    	if gcBlackenEnabled != 0 && elemsize != 0 {
    		if assistG := getg().m.curg; assistG != nil {
    			assistG.gcAssistBytes -= int64(elemsize - size)
    		}
    	}
    
    	// Post-malloc debug hooks.
    	if debug.malloc {
    		postMallocgcDebug(x, elemsize, typ)
    	}
    	return x
    }
    
    
    func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
    
    	// Set mp.mallocing to keep from being preempted by GC.
    	mp := acquirem()
    
    	if doubleCheckMalloc {
    		if mp.mallocing != 0 {
    			throw("malloc deadlock")
    		}
    		if mp.gsignal == getg() {
    			throw("malloc during signal")
    		}
    
    		if typ != nil && typ.Pointers() {
    			throw("expected noscan for tiny alloc")
    		}
    
    	// Tiny allocator.
    	//
    	// Tiny allocator combines several tiny allocation requests
    	// into a single memory block. The resulting memory block
    	// is freed when all subobjects are unreachable. The subobjects
    	// must be noscan (don't have pointers), this ensures that
    	// the amount of potentially wasted memory is bounded.
    	//
    	// Size of the memory block used for combining (maxTinySize) is tunable.
    	// Current setting is 16 bytes, which relates to 2x worst case memory
    	// wastage (when all but one subobjects are unreachable).
    	// 8 bytes would result in no wastage at all, but provides less
    	// opportunities for combining.
    	// 32 bytes provides more opportunities for combining,
    	// but can lead to 4x worst case wastage.
    	// The best case winning is 8x regardless of block size.
    	//
    	// Objects obtained from tiny allocator must not be freed explicitly.
    	// So when an object will be freed explicitly, we ensure that
    	// its size >= maxTinySize.
    	//
    	// SetFinalizer has a special case for objects potentially coming
    	// from tiny allocator, it such case it allows to set finalizers
    	// for an inner byte of a memory block.
    	//
    	// The main targets of tiny allocator are small strings and
    	// standalone escaping variables. On a json benchmark
    	// the allocator reduces number of allocations by ~12% and
    	// reduces heap size by ~20%.
    
    	c := getMCache(mp)
    
    	off := c.tinyoffset
    	// Align tiny pointer for required (conservative) alignment.
    	if size&7 == 0 {
    		off = alignUp(off, 8)
    	} else if goarch.PtrSize == 4 && size == 12 {
    		// Conservatively align 12-byte objects to 8 bytes on 32-bit
    		// systems so that objects whose first field is a 64-bit
    		// value is aligned to 8 bytes and does not cause a fault on
    		// atomic access. See issue 37262.
    		// TODO(mknyszek): Remove this workaround if/when issue 36606
    		// is resolved.
    		off = alignUp(off, 8)
    	} else if size&3 == 0 {
    		off = alignUp(off, 4)
    	} else if size&1 == 0 {
    		off = alignUp(off, 2)
    
    	if off+size <= maxTinySize && c.tiny != 0 {
    		// The object fits into existing tiny block.
    		x := unsafe.Pointer(c.tiny + off)
    		c.tinyoffset = off + size
    		c.tinyAllocs++
    		mp.mallocing = 0
    		releasem(mp)
    		return x, 0
    	}
    	// Allocate a new maxTinySize block.
    	checkGCTrigger := false
    	span := c.alloc[tinySpanClass]
    	v := nextFreeFast(span)
    	if v == 0 {
    		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
    	}
    	x := unsafe.Pointer(v)
    
    	(*[2]uint64)(x)[0] = 0 // Always zero
    
    	(*[2]uint64)(x)[1] = 0
    	// See if we need to replace the existing tiny block with the new one
    	// based on amount of remaining free space.
    	if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
    		// Note: disabled when race detector is on, see comment near end of this function.
    		c.tiny = uintptr(x)
    		c.tinyoffset = size
    	}
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    	// As x and the heap bits are initialized, update
    	// freeIndexForScan now so x is seen by the GC
    	// (including conservative scan) as an allocated object.
    	// While this pointer can't escape into user code as a
    	// _live_ pointer until we return, conservative scanning
    	// may find a dead pointer that happens to point into this
    	// object. Delaying this update until now ensures that
    	// conservative scanning considers this pointer dead until
    	// this point.
    	span.freeIndexForScan = span.freeindex
    
    	// Allocate black during GC.
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    	if writeBarrier.enabled {
    		gcmarknewobject(span, uintptr(x))
    	}
    
    	// Note cache c only valid while m acquired; see #47302
    
    	// N.B. Use the full size because that matches how the GC
    	// will update the mem profile on the "free" side.
    	//
    	// TODO(mknyszek): We should really count the header as part
    	// of gc_sys or something. The code below just pretends it is
    	// internal fragmentation and matches the GC's accounting by
    	// using the whole allocation slot.
    	c.nextSample -= int64(span.elemsize)
    	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
    		profilealloc(mp, x, span.elemsize)
    	}
    	mp.mallocing = 0
    	releasem(mp)
    
    	if checkGCTrigger {
    		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    			gcStart(t)
    		}
    	}
    
    	if raceenabled {
    		// Pad tinysize allocations so they are aligned with the end
    		// of the tinyalloc region. This ensures that any arithmetic
    		// that goes off the top end of the object will be detectable
    		// by checkptr (issue 38872).
    		// Note that we disable tinyalloc when raceenabled for this to work.
    		// TODO: This padding is only performed when the race detector
    		// is enabled. It would be nice to enable it if any package
    		// was compiled with checkptr, but there's no easy way to
    		// detect that (especially at compile time).
    		// TODO: enable this padding for all allocations, not just
    		// tinyalloc ones. It's tricky because of pointer maps.
    		// Maybe just all noscan objects?
    		x = add(x, span.elemsize-size)
    	}
    	return x, span.elemsize
    }
    
    func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
    	// Set mp.mallocing to keep from being preempted by GC.
    	mp := acquirem()
    	if doubleCheckMalloc {
    		if mp.mallocing != 0 {
    			throw("malloc deadlock")
    		}
    		if mp.gsignal == getg() {
    			throw("malloc during signal")
    		}
    		if typ != nil && typ.Pointers() {
    			throw("expected noscan type for noscan alloc")
    
    	}
    	mp.mallocing = 1
    
    	checkGCTrigger := false
    	c := getMCache(mp)
    	var sizeclass uint8
    	if size <= smallSizeMax-8 {
    		sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
    
    	} else {
    
    		sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
    	}
    	size = uintptr(class_to_size[sizeclass])
    	spc := makeSpanClass(sizeclass, true)
    	span := c.alloc[spc]
    	v := nextFreeFast(span)
    	if v == 0 {
    		v, span, checkGCTrigger = c.nextFree(spc)
    	}
    	x := unsafe.Pointer(v)
    	if needzero && span.needzero != 0 {
    		memclrNoHeapPointers(x, size)
    	}
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    	// As x and the heap bits are initialized, update
    	// freeIndexForScan now so x is seen by the GC
    	// (including conservative scan) as an allocated object.
    	// While this pointer can't escape into user code as a
    	// _live_ pointer until we return, conservative scanning
    	// may find a dead pointer that happens to point into this
    	// object. Delaying this update until now ensures that
    	// conservative scanning considers this pointer dead until
    	// this point.
    	span.freeIndexForScan = span.freeindex
    
    	// Allocate black during GC.
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    	if writeBarrier.enabled {
    		gcmarknewobject(span, uintptr(x))
    	}
    
    	// Note cache c only valid while m acquired; see #47302
    	//
    	// N.B. Use the full size because that matches how the GC
    	// will update the mem profile on the "free" side.
    	//
    	// TODO(mknyszek): We should really count the header as part
    	// of gc_sys or something. The code below just pretends it is
    	// internal fragmentation and matches the GC's accounting by
    	// using the whole allocation slot.
    	c.nextSample -= int64(size)
    	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
    		profilealloc(mp, x, size)
    	}
    	mp.mallocing = 0
    	releasem(mp)
    
    	if checkGCTrigger {
    		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    			gcStart(t)
    
    func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
    
    	// Set mp.mallocing to keep from being preempted by GC.
    	mp := acquirem()
    	if doubleCheckMalloc {
    		if mp.mallocing != 0 {
    			throw("malloc deadlock")
    		}
    		if mp.gsignal == getg() {
    			throw("malloc during signal")
    		}
    		if typ == nil || !typ.Pointers() {
    			throw("noscan allocated in scan-only path")
    		}
    		if !heapBitsInSpan(size) {
    			throw("heap bits in not in span for non-header-only path")
    
    	mp.mallocing = 1
    
    	checkGCTrigger := false
    	c := getMCache(mp)
    	sizeclass := size_to_class8[divRoundUp(size, smallSizeDiv)]
    	spc := makeSpanClass(sizeclass, false)
    	span := c.alloc[spc]
    	v := nextFreeFast(span)
    	if v == 0 {
    		v, span, checkGCTrigger = c.nextFree(spc)
    
    	if span.needzero != 0 {
    
    	if goarch.PtrSize == 8 && sizeclass == 1 {
    		// initHeapBits already set the pointer bits for the 8-byte sizeclass
    		// on 64-bit platforms.
    		c.scanAlloc += 8
    	} else {
    		c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
    	}
    
    	size = uintptr(class_to_size[sizeclass])
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    
    	// As x and the heap bits are initialized, update
    	// freeIndexForScan now so x is seen by the GC
    
    	// (including conservative scan) as an allocated object.
    
    	// While this pointer can't escape into user code as a
    	// _live_ pointer until we return, conservative scanning
    	// may find a dead pointer that happens to point into this
    	// object. Delaying this update until now ensures that
    	// conservative scanning considers this pointer dead until
    	// this point.
    	span.freeIndexForScan = span.freeindex
    
    	// Allocate black during GC.
    
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    
    		gcmarknewobject(span, uintptr(x))
    
    	// Note cache c only valid while m acquired; see #47302
    	//
    	// N.B. Use the full size because that matches how the GC
    	// will update the mem profile on the "free" side.
    	//
    	// TODO(mknyszek): We should really count the header as part
    	// of gc_sys or something. The code below just pretends it is
    	// internal fragmentation and matches the GC's accounting by
    	// using the whole allocation slot.
    	c.nextSample -= int64(size)
    	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
    		profilealloc(mp, x, size)
    
    	if checkGCTrigger {
    		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    			gcStart(t)
    		}
    
    func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
    
    	// Set mp.mallocing to keep from being preempted by GC.
    	mp := acquirem()
    	if doubleCheckMalloc {
    		if mp.mallocing != 0 {
    			throw("malloc deadlock")
    		}
    		if mp.gsignal == getg() {
    			throw("malloc during signal")
    		}
    		if typ == nil || !typ.Pointers() {
    			throw("noscan allocated in scan-only path")
    		}
    		if heapBitsInSpan(size) {
    			throw("heap bits in span for header-only path")
    		}
    	}
    	mp.mallocing = 1
    
    	checkGCTrigger := false
    	c := getMCache(mp)
    	size += mallocHeaderSize
    	var sizeclass uint8
    	if size <= smallSizeMax-8 {
    		sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
    	} else {
    		sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
    	}
    	size = uintptr(class_to_size[sizeclass])
    	spc := makeSpanClass(sizeclass, false)
    	span := c.alloc[spc]
    	v := nextFreeFast(span)
    	if v == 0 {
    		v, span, checkGCTrigger = c.nextFree(spc)
    	}
    	x := unsafe.Pointer(v)
    
    	if span.needzero != 0 {
    
    		memclrNoHeapPointers(x, size)
    	}
    	header := (**_type)(x)
    	x = add(x, mallocHeaderSize)
    
    	c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-mallocHeaderSize, typ, header, span)
    
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    	// As x and the heap bits are initialized, update
    	// freeIndexForScan now so x is seen by the GC
    	// (including conservative scan) as an allocated object.
    	// While this pointer can't escape into user code as a
    	// _live_ pointer until we return, conservative scanning
    	// may find a dead pointer that happens to point into this
    	// object. Delaying this update until now ensures that
    	// conservative scanning considers this pointer dead until
    	// this point.
    	span.freeIndexForScan = span.freeindex
    
    	// Allocate black during GC.
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    	if writeBarrier.enabled {
    		gcmarknewobject(span, uintptr(x))
    	}
    
    	// Note cache c only valid while m acquired; see #47302
    	//
    	// N.B. Use the full size because that matches how the GC
    	// will update the mem profile on the "free" side.
    	//
    	// TODO(mknyszek): We should really count the header as part
    	// of gc_sys or something. The code below just pretends it is
    	// internal fragmentation and matches the GC's accounting by
    	// using the whole allocation slot.
    	c.nextSample -= int64(size)
    	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
    		profilealloc(mp, x, size)
    	}
    	mp.mallocing = 0
    	releasem(mp)
    
    	if checkGCTrigger {
    		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    			gcStart(t)
    		}
    	}
    	return x, size
    }
    
    func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
    	// Set mp.mallocing to keep from being preempted by GC.
    	mp := acquirem()
    	if doubleCheckMalloc {
    		if mp.mallocing != 0 {
    			throw("malloc deadlock")
    		}
    		if mp.gsignal == getg() {
    			throw("malloc during signal")
    		}
    	}
    	mp.mallocing = 1
    
    	c := getMCache(mp)
    	// For large allocations, keep track of zeroed state so that
    	// bulk zeroing can be happen later in a preemptible context.
    	span := c.allocLarge(size, typ == nil || !typ.Pointers())
    	span.freeindex = 1
    	span.allocCount = 1
    	span.largeType = nil // Tell the GC not to look at this yet.
    	size = span.elemsize
    	x := unsafe.Pointer(span.base())
    
    	// Ensure that the stores above that initialize x to
    	// type-safe memory and set the heap bits occur before
    	// the caller can make x observable to the garbage
    	// collector. Otherwise, on weakly ordered machines,
    	// the garbage collector could follow a pointer to x,
    	// but see uninitialized memory or stale heap bits.
    	publicationBarrier()
    	// As x and the heap bits are initialized, update
    	// freeIndexForScan now so x is seen by the GC
    	// (including conservative scan) as an allocated object.
    	// While this pointer can't escape into user code as a
    	// _live_ pointer until we return, conservative scanning
    	// may find a dead pointer that happens to point into this
    	// object. Delaying this update until now ensures that
    	// conservative scanning considers this pointer dead until
    	// this point.
    	span.freeIndexForScan = span.freeindex
    
    	// Allocate black during GC.
    	// All slots hold nil so no scanning is needed.
    	// This may be racing with GC so do it atomically if there can be
    	// a race marking the bit.
    	if writeBarrier.enabled {
    		gcmarknewobject(span, uintptr(x))
    
    	// Note cache c only valid while m acquired; see #47302
    	//
    	// N.B. Use the full size because that matches how the GC
    	// will update the mem profile on the "free" side.
    	//
    
    	// TODO(mknyszek): We should really count the header as part
    
    	// of gc_sys or something. The code below just pretends it is
    	// internal fragmentation and matches the GC's accounting by
    	// using the whole allocation slot.
    
    	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
    
    	mp.mallocing = 0
    	releasem(mp)
    
    	// Check to see if we need to trigger the GC.
    	if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
    		gcStart(t)
    	}
    
    
    	// Objects can be zeroed late in a context where preemption can occur.
    	// If the object contains pointers, its pointer data must be cleared
    	// or otherwise indicate that the GC shouldn't scan it.
    
    	if noscan := typ == nil || !typ.Pointers(); !noscan || (needzero && span.needzero != 0) {
    
    		// N.B. size == fullSize always in this case.
    
    		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
    
    
    		// Finish storing the type information for this case.
    
    			getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span)
    
    		// Publish the object with the now-zeroed memory.
    		publicationBarrier()
    		releasem(mp)
    
    func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer {
    	if debug.sbrk != 0 {
    		align := uintptr(16)
    		if typ != nil {
    			// TODO(austin): This should be just
    			//   align = uintptr(typ.align)
    			// but that's only 4 on 32-bit platforms,
    			// even if there's a uint64 field in typ (see #599).
    			// This causes 64-bit atomic accesses to panic.
    			// Hence, we use stricter alignment that matches
    			// the normal allocator better.
    			if size&7 == 0 {
    				align = 8
    			} else if size&3 == 0 {
    				align = 4
    			} else if size&1 == 0 {
    				align = 2
    			} else {
    				align = 1
    			}
    		}
    		return persistentalloc(size, align, &memstats.other_sys)
    	}
    	if inittrace.active && inittrace.id == getg().goid {
    		// Init functions are executed sequentially in a single goroutine.
    		inittrace.allocs += 1
    	}
    	return nil
    }
    
    func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) {
    	if inittrace.active && inittrace.id == getg().goid {
    		// Init functions are executed sequentially in a single goroutine.
    		inittrace.bytes += uint64(elemsize)
    	}
    
    	if traceAllocFreeEnabled() {
    		trace := traceAcquire()
    		if trace.ok() {
    			trace.HeapObjectAlloc(uintptr(x), typ)
    			traceRelease(trace)
    		}
    	}
    }
    
    
    // deductAssistCredit reduces the current G's assist credit
    // by size bytes, and assists the GC if necessary.
    //
    // Caller must be preemptible.
    //
    // Returns the G for which the assist credit was accounted.
    
    func deductAssistCredit(size uintptr) {
    	// Charge the current user G for this allocation.
    	assistG := getg()
    	if assistG.m.curg != nil {
    		assistG = assistG.m.curg
    	}
    	// Charge the allocation against the G. We'll account
    	// for internal fragmentation at the end of mallocgc.
    	assistG.gcAssistBytes -= int64(size)
    
    	if assistG.gcAssistBytes < 0 {
    		// This G is in debt. Assist the GC to correct
    		// this before allocating. This must happen
    		// before disabling preemption.
    		gcAssistAlloc(assistG)
    
    // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
    // on chunks of the buffer to be zeroed, with opportunities for preemption
    // along the way.  memclrNoHeapPointers contains no safepoints and also
    // cannot be preemptively scheduled, so this provides a still-efficient
    // block copy that can also be preempted on a reasonable granularity.
    //
    // Use this with care; if the data being cleared is tagged to contain
    // pointers, this allows the GC to run before it is all cleared.
    func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
    	v := uintptr(x)
    	// got this from benchmarking. 128k is too small, 512k is too large.
    	const chunkBytes = 256 * 1024
    	vsize := v + size
    	for voff := v; voff < vsize; voff = voff + chunkBytes {
    		if getg().preempt {
    			// may hold locks, e.g., profiling
    			goschedguarded()
    		}
    		// clear min(avail, lump) bytes
    		n := vsize - voff
    		if n > chunkBytes {
    			n = chunkBytes
    		}
    		memclrNoHeapPointers(unsafe.Pointer(voff), n)
    	}
    }
    
    
    // implementation of new builtin
    
    // compiler (both frontend and SSA backend) knows the signature
    
    // of this function.
    
    func newobject(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.Size_, typ, true)
    
    //go:linkname maps_newobject internal/runtime/maps.newobject
    func maps_newobject(typ *_type) unsafe.Pointer {
    	return newobject(typ)
    }
    
    
    // reflect_unsafe_New is meant for package reflect,
    // but widely used packages access it using linkname.
    // Notable members of the hall of shame include:
    
    //
    // Do not remove or change the type signature.
    // See go.dev/issue/67401.
    //
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_New reflect.unsafe_New
    func reflect_unsafe_New(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.Size_, typ, true)
    
    //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
    func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
    
    	return mallocgc(typ.Size_, typ, true)
    
    // newarray allocates an array of n elements of type typ.
    
    //
    // newarray should be an internal detail,
    // but widely used packages access it using linkname.
    // Notable members of the hall of shame include:
    
    //   - github.com/RomiChan/protobuf
    
    //   - github.com/segmentio/encoding
    
    //   - github.com/ugorji/go/codec
    //
    // Do not remove or change the type signature.
    // See go.dev/issue/67401.
    //
    //go:linkname newarray
    
    func newarray(typ *_type, n int) unsafe.Pointer {
    
    		return mallocgc(typ.Size_, typ, true)
    
    	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
    
    	if overflow || mem > maxAlloc || n < 0 {
    
    		panic(plainError("runtime: allocation size out of range"))
    
    	return mallocgc(mem, typ, true)
    
    // reflect_unsafe_NewArray is meant for package reflect,
    // but widely used packages access it using linkname.
    // Notable members of the hall of shame include:
    
    //   - github.com/segmentio/encoding
    
    //   - github.com/segmentio/kafka-go
    
    //
    // Do not remove or change the type signature.
    // See go.dev/issue/67401.
    //
    
    Russ Cox's avatar
    Russ Cox committed
    //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
    
    func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
    
    Russ Cox's avatar
    Russ Cox committed
    	return newarray(typ, n)
    }
    
    
    //go:linkname maps_newarray internal/runtime/maps.newarray
    func maps_newarray(typ *_type, n int) unsafe.Pointer {
    	return newarray(typ, n)
    }
    
    
    // profilealloc resets the current mcache's nextSample counter and
    // records a memory profile sample.
    //
    // The caller must be non-preemptible and have a P.
    
    func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
    
    	c := getMCache(mp)
    
    	if c == nil {
    		throw("profilealloc called without a P or outside bootstrapping")
    	}
    
    	c.memProfRate = MemProfileRate
    
    	c.nextSample = nextSample()
    
    	mProf_Malloc(mp, x, size)
    
    // nextSample returns the next sampling point for heap profiling. The goal is
    // to sample allocations on average every MemProfileRate bytes, but with a
    // completely random distribution over the allocation timeline; this
    // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
    // processes, the distance between two samples follows the exponential
    // distribution (exp(MemProfileRate)), so the best return value is a random
    // number taken from an exponential distribution whose mean is MemProfileRate.
    
    func nextSample() int64 {
    	if MemProfileRate == 0 {
    		// Basically never sample.
    		return maxInt64
    	}
    
    	return int64(fastexprand(MemProfileRate))
    
    // fastexprand returns a random number from an exponential distribution with
    // the specified mean.
    func fastexprand(mean int) int32 {
    	// Avoid overflow. Maximum possible step is
    	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
    
    	case mean > 0x7000000:
    		mean = 0x7000000
    	case mean == 0:
    
    	// Take a random sample of the exponential distribution exp(-mean*x).
    	// The probability distribution function is mean*exp(-mean*x), so the CDF is
    	// p = 1 - exp(-mean*x), so
    	// q = 1 - p == exp(-mean*x)
    	// log_e(q) = -mean*x
    	// -log_e(q)/mean = x
    	// x = -log_e(q) * mean
    	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
    
    	const randomBitCount = 26
    
    	q := cheaprandn(1<<randomBitCount) + 1
    
    	qlog := fastlog2(float64(q)) - randomBitCount
    	if qlog > 0 {
    		qlog = 0
    	}
    	const minusLog2 = -0.6931471805599453 // -ln(2)
    
    	return int32(qlog*(minusLog2*float64(mean))) + 1
    
    type persistentAlloc struct {
    
    var globalAlloc struct {
    	mutex
    	persistentAlloc
    }
    
    
    // persistentChunkSize is the number of bytes we allocate when we grow
    // a persistentAlloc.
    const persistentChunkSize = 256 << 10
    
    // persistentChunks is a list of all the persistent chunks we have
    // allocated. The list is maintained through the first word in the
    // persistent chunk. This is updated atomically.
    var persistentChunks *notInHeap
    
    
    // Wrapper around sysAlloc that can allocate small chunks.
    // There is no associated free operation.
    // Intended for things like function/type/debug-related persistent data.
    // If align is 0, uses default align (currently 8).
    
    // The returned memory will be zeroed.
    
    // Consider marking persistentalloc'd types not in heap by embedding
    
    // internal/runtime/sys.NotInHeap.
    
    //
    // nosplit because it is used during write barriers and must not be preempted.
    //
    //go:nosplit
    
    func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
    
    	systemstack(func() {
    		p = persistentalloc1(size, align, sysStat)
    	})
    
    }
    
    // Must run on system stack because stack growth can (re)invoke it.
    // See issue 9174.
    
    func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
    
    	const (
    		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
    	)
    
    
    	if size == 0 {
    		throw("persistentalloc: size == 0")
    	}
    
    	if align != 0 {
    		if align&(align-1) != 0 {
    
    			throw("persistentalloc: align is not a power of 2")
    
    		}
    		if align > _PageSize {
    
    			throw("persistentalloc: align is too large")
    
    		}
    	} else {
    		align = 8
    	}
    
    	if size >= maxBlock {
    
    		return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata"))
    
    	mp := acquirem()
    	var persistent *persistentAlloc
    
    	if mp != nil && mp.p != 0 {
    		persistent = &mp.p.ptr().palloc
    
    	} else {
    		lock(&globalAlloc.mutex)
    		persistent = &globalAlloc.persistentAlloc
    	}
    
    	persistent.off = alignUp(persistent.off, align)
    
    	if persistent.off+size > persistentChunkSize || persistent.base == nil {
    
    		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata"))
    
    			if persistent == &globalAlloc.persistentAlloc {
    				unlock(&globalAlloc.mutex)
    			}
    
    			throw("runtime: cannot allocate memory")
    
    
    		// Add the new chunk to the persistentChunks list.
    		for {
    			chunks := uintptr(unsafe.Pointer(persistentChunks))
    			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
    			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
    				break
    			}
    		}
    
    		persistent.off = alignUp(goarch.PtrSize, align)
    
    	p := persistent.base.add(persistent.off)
    
    	releasem(mp)
    	if persistent == &globalAlloc.persistentAlloc {
    		unlock(&globalAlloc.mutex)
    	}
    
    	if sysStat != &memstats.other_sys {
    
    		sysStat.add(int64(size))
    		memstats.other_sys.add(-int64(size))
    
    // inPersistentAlloc reports whether p points to memory allocated by
    // persistentalloc. This must be nosplit because it is called by the
    // cgo checker code, which is called by the write barrier code.
    
    //go:nosplit
    func inPersistentAlloc(p uintptr) bool {
    	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
    	for chunk != 0 {
    		if p >= chunk && p < chunk+persistentChunkSize {
    			return true
    		}
    		chunk = *(*uintptr)(unsafe.Pointer(chunk))
    	}
    	return false
    }
    
    
    // linearAlloc is a simple linear allocator that pre-reserves a region
    
    // of memory and then optionally maps that region into the Ready state
    // as needed.
    //
    // The caller is responsible for locking.
    
    type linearAlloc struct {
    	next   uintptr // next free byte
    	mapped uintptr // one byte past end of mapped space
    	end    uintptr // end of reserved space
    
    
    	mapMemory bool // transition memory from Reserved to Ready if true
    
    func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
    
    	if base+size < base {
    		// Chop off the last byte. The runtime isn't prepared
    		// to deal with situations where the bounds could overflow.
    		// Leave that memory reserved, though, so we don't map it
    		// later.
    		size -= 1