diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 010f20bf9475f792fadcf51f5acf0c56a1131067..ffaf92debc374a23730c3b5c1c1a00904fbce1fc 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -455,8 +455,8 @@ func mallocinit() { throw("max pointer/scan bitmap size for headerless objects is too large") } - if minTagBits > taggedPointerBits { - throw("taggedPointerBits too small") + if minTagBits > tagBits { + throw("tagBits too small") } // Initialize the heap. diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index b2219b92ce642fce8ab46d2a8dd09f5199004934..fab921e2d385bf3f6c99b3e1fe12caf5c4d51cab 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -302,7 +302,7 @@ func (c *pollCache) free(pd *pollDesc) { // Increment the fdseq field, so that any currently // running netpoll calls will not mark pd as ready. fdseq := pd.fdseq.Load() - fdseq = (fdseq + 1) & (1<<taggedPointerBits - 1) + fdseq = (fdseq + 1) & (1<<tagBits - 1) pd.fdseq.Store(fdseq) pd.publishInfo() diff --git a/src/runtime/tagptr_64bit.go b/src/runtime/tagptr_64bit.go index b0e3d97223b06303e0873b67a13ea6ee1ff81b67..31cbce03b742016e8e928d0235a276516026e333 100644 --- a/src/runtime/tagptr_64bit.go +++ b/src/runtime/tagptr_64bit.go @@ -17,21 +17,15 @@ const ( // // See heapAddrBits for a table of address space sizes on // various architectures. 48 bits is enough for all - // architectures except s390x. + // arch/os combos except s390x, aix, and riscv64. // - // On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64. - // We shift the address left 16 to eliminate the sign extended part and make - // room in the bottom for the count. + // On AMD64, virtual addresses are 48-bit (or 57-bit) sign-extended. + // Other archs are 48-bit zero-extended. // // On s390x, virtual addresses are 64-bit. There's not much we // can do about this, so we just hope that the kernel doesn't // get to really high addresses and panic if it does. - addrBits = 48 - - // In addition to the 16 bits taken from the top, we can take 9 from the - // bottom, because we require pointers to be well-aligned (see tagptr.go:tagAlignBits). - // That gives us a total of 25 bits for the tag. - tagBits = 64 - addrBits + tagAlignBits + defaultAddrBits = 48 // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) @@ -39,33 +33,25 @@ const ( // We assume all tagged addresses are from memory allocated with mmap. // We use one bit to distinguish between the two ranges. aixAddrBits = 57 - aixTagBits = 64 - aixAddrBits + tagAlignBits // riscv64 SV57 mode gives 56 bits of userspace VA. // tagged pointer code supports it, // but broader support for SV57 mode is incomplete, // and there may be other issues (see #54104). riscv64AddrBits = 56 - riscv64TagBits = 64 - riscv64AddrBits + tagAlignBits -) -// The number of bits stored in the numeric tag of a taggedPointer -const taggedPointerBits = (goos.IsAix * aixTagBits) + (goarch.IsRiscv64 * riscv64TagBits) + ((1 - goos.IsAix) * (1 - goarch.IsRiscv64) * tagBits) + addrBits = goos.IsAix*aixAddrBits + goarch.IsRiscv64*riscv64AddrBits + (1-goos.IsAix)*(1-goarch.IsRiscv64)*defaultAddrBits + + // In addition to the 16 bits (or other, depending on arch/os) taken from the top, + // we can take 9 from the bottom, because we require pointers to be well-aligned + // (see tagptr.go:tagAlignBits). That gives us a total of 25 bits for the tag. + tagBits = 64 - addrBits + tagAlignBits +) // taggedPointerPack created a taggedPointer from a pointer and a tag. // Tag bits that don't fit in the result are discarded. func taggedPointerPack(ptr unsafe.Pointer, tag uintptr) taggedPointer { - var t taggedPointer - if GOOS == "aix" { - if GOARCH != "ppc64" { - throw("check this code for aix on non-ppc64") - } - t = taggedPointer(uint64(uintptr(ptr))<<(64-aixAddrBits) | uint64(tag&(1<<aixTagBits-1))) - } else if GOARCH == "riscv64" { - t = taggedPointer(uint64(uintptr(ptr))<<(64-riscv64AddrBits) | uint64(tag&(1<<riscv64TagBits-1))) - } else { - t = taggedPointer(uint64(uintptr(ptr))<<(64-addrBits) | uint64(tag&(1<<tagBits-1))) - } + t := taggedPointer(uint64(uintptr(ptr))<<(tagBits-tagAlignBits) | uint64(tag&(1<<tagBits-1))) if t.pointer() != ptr || t.tag() != tag { print("runtime: taggedPointerPack invalid packing: ptr=", ptr, " tag=", hex(tag), " packed=", hex(t), " -> ptr=", t.pointer(), " tag=", hex(t.tag()), "\n") throw("taggedPointerPack") @@ -80,16 +66,10 @@ func (tp taggedPointer) pointer() unsafe.Pointer { // val before unpacking. return unsafe.Pointer(uintptr(int64(tp) >> tagBits << tagAlignBits)) } - if GOOS == "aix" { - return unsafe.Pointer(uintptr((tp >> aixTagBits << tagAlignBits) | 0xa<<56)) - } - if GOARCH == "riscv64" { - return unsafe.Pointer(uintptr(tp >> riscv64TagBits << tagAlignBits)) - } return unsafe.Pointer(uintptr(tp >> tagBits << tagAlignBits)) } // Tag returns the tag from a taggedPointer. func (tp taggedPointer) tag() uintptr { - return uintptr(tp & (1<<taggedPointerBits - 1)) + return uintptr(tp & (1<<tagBits - 1)) }