Skip to content
Snippets Groups Projects
range.go 18.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • // Copyright 2009 The Go Authors. All rights reserved.
    // Use of this source code is governed by a BSD-style
    // license that can be found in the LICENSE file.
    
    
    	"internal/buildcfg"
    
    	"cmd/compile/internal/reflectdata"
    
    	"cmd/compile/internal/ssagen"
    
    	"cmd/compile/internal/typecheck"
    
    	"cmd/internal/src"
    
    func cheapComputableIndex(width int64) bool {
    
    	switch ssagen.Arch.LinkArch.Family {
    
    	// MIPS does not have R+R addressing
    	// Arm64 may lack ability to generate this code in our assembler,
    	// but the architecture supports it.
    	case sys.PPC64, sys.S390X:
    		return width == 1
    	case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
    		switch width {
    		case 1, 2, 4, 8:
    			return true
    		}
    	}
    	return false
    }
    
    
    // walkRange transforms various forms of ORANGE into
    
    // simpler forms.  The result must be assigned back to n.
    // Node n may also be modified in place, and may also be
    // the returned node.
    
    func walkRange(nrange *ir.RangeStmt) ir.Node {
    
    	base.Assert(!nrange.DistinctVars) // Should all be rewritten before escape analysis
    
    		return mapRangeClear(nrange)
    
    	nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil, nrange.DistinctVars)
    
    	// variable name conventions:
    	//	ohv1, hv1, hv2: hidden (old) val 1, 2
    	//	ha, hit: hidden aggregate, iterator
    	//	hn, hp: hidden len, pointer
    	//	hb: hidden bool
    	//	a, v1, v2: not hidden aggregate, val 1, 2
    
    
    	v1, v2 := nrange.Key, nrange.Value
    
    	if ir.IsBlank(v1) && v2 == nil {
    
    		base.Fatalf("walkRange: v2 != nil while v1 == nil")
    
    	switch k := t.Kind(); {
    
    	case types.IsInt[k]:
    
    		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
    			base.Pos = lno
    			return nn
    		}
    
    		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t)
    		hn := typecheck.TempAt(base.Pos, ir.CurFunc, t)
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    		init = append(init, ir.NewAssignStmt(base.Pos, hn, a))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
    		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
    
    		if v1 != nil {
    			body = []ir.Node{rangeAssign(nrange, hv1)}
    		}
    
    	case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array
    
    		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
    
    		// Element type of the iteration
    		var elem *types.Type
    		switch t.Kind() {
    		case types.TSLICE, types.TARRAY:
    			elem = t.Elem()
    		case types.TPTR:
    			elem = t.Elem().Elem()
    		}
    
    
    		// order.stmt arranged for a copy of the array/slice variable if needed.
    
    		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
    		hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    		init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
    
    		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
    
    			break
    		}
    
    		// for v1 := range ha { body }
    		if v2 == nil {
    
    			body = []ir.Node{rangeAssign(nrange, hv1)}
    
    		if cheapComputableIndex(elem.Size()) {
    
    			tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
    
    			body = []ir.Node{rangeAssign2(nrange, hv1, tmp)}
    
    		// Slice to iterate over
    		var hs ir.Node
    		if t.IsSlice() {
    			hs = ha
    		} else {
    			var arr ir.Node
    			if t.IsPtr() {
    				arr = ha
    			} else {
    				arr = typecheck.NodAddr(ha)
    				arr.SetType(t.PtrTo())
    				arr.SetTypecheck(1)
    			}
    			hs = ir.NewSliceExpr(base.Pos, ir.OSLICEARR, arr, nil, nil, nil)
    			// old typechecker doesn't know OSLICEARR, so we set types explicitly
    			hs.SetType(types.NewSlice(elem))
    			hs.SetTypecheck(1)
    		}
    
    
    		// We use a "pointer" to keep track of where we are in the backing array
    		// of the slice hs. This pointer starts at hs.ptr and gets incremented
    		// by the element size each time through the loop.
    		//
    		// It's tricky, though, as on the last iteration this pointer gets
    		// incremented to point past the end of the backing array. We can't
    		// let the garbage collector see that final out-of-bounds pointer.
    		//
    		// To avoid this, we keep the "pointer" alternately in 2 variables, one
    		// pointer typed and one uintptr typed. Most of the time it lives in the
    		// regular pointer variable, but when it might be out of bounds (after it
    		// has been incremented, but before the loop condition has been checked)
    		// it lives briefly in the uintptr variable.
    		//
    		// hp contains the pointer version (of type *T, where T is the element type).
    		// It is guaranteed to always be in range, keeps the backing store alive,
    		// and is updated on stack copies. If a GC occurs when this function is
    		// suspended at any safepoint, this variable ensures correct operation.
    		//
    		// hu contains the equivalent uintptr version. It may point past the
    		// end, but doesn't keep the backing store alive and doesn't get updated
    		// on a stack copy. If a GC occurs while this function is on the top of
    		// the stack, then the last frame is scanned conservatively and hu will
    		// act as a reference to the backing array to ensure it is not collected.
    		//
    		// The "pointer" we're moving across the backing array lives in one
    		// or the other of hp and hu as the loop proceeds.
    		//
    		// hp is live during most of the body of the loop. But it isn't live
    		// at the very top of the loop, when we haven't checked i<n yet, and
    		// it could point off the end of the backing store.
    		// hu is live only at the very top and very bottom of the loop.
    		// In particular, only when it cannot possibly be live across a call.
    		//
    		// So we do
    		//   hu = uintptr(unsafe.Pointer(hs.ptr))
    		//   for i := 0; i < hs.len; i++ {
    		//     hp = (*T)(unsafe.Pointer(hu))
    		//     v1, v2 = i, *hp
    		//     ... body of loop ...
    		//     hu = uintptr(unsafe.Pointer(hp)) + elemsize
    		//   }
    		//
    		// Between the assignments to hu and the assignment back to hp, there
    		// must not be any calls.
    
    		// Pointer to current iteration position. Start on entry to the loop
    		// with the pointer in hu.
    		ptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, hs)
    
    		huVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], ptr)
    		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
    
    		hu := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hu, huVal))
    
    
    		// Convert hu to hp at the top of the loop (after the condition has been checked).
    
    		hpVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hu)
    		hpVal.SetCheckPtr(true) // disable checkptr on this conversion
    		hpVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, elem.PtrTo(), hpVal)
    
    		hp := typecheck.TempAt(base.Pos, ir.CurFunc, elem.PtrTo())
    
    		body = append(body, ir.NewAssignStmt(base.Pos, hp, hpVal))
    
    		// Assign variables on the LHS of the range statement. Use *hp to get the element.
    		e := ir.NewStarExpr(base.Pos, hp)
    		e.SetBounded(true)
    		a := rangeAssign2(nrange, hv1, e)
    
    		// Advance pointer for next iteration of the loop.
    
    		// This reads from hp and writes to hu.
    		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hp)
    		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
    
    		as := ir.NewAssignStmt(base.Pos, hu, ir.NewBinaryExpr(base.Pos, ir.OADD, huVal, ir.NewInt(base.Pos, elem.Size())))
    
    		nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
    
    	case k == types.TMAP:
    
    		// order.stmt allocated the iterator for us.
    
    		// we only use a once, so no copy needed.
    
    		hit := nrange.Prealloc
    
    		// depends on layout of iterator struct.
    		// See cmd/compile/internal/reflectdata/reflect.go:MapIterType
    
    		var keysym, elemsym *types.Sym
    
    		var iterInit, iterNext string
    
    		if buildcfg.Experiment.SwissMap {
    			keysym = th.Field(0).Sym
    			elemsym = th.Field(1).Sym // ditto
    
    			iterInit = "mapIterStart"
    			iterNext = "mapIterNext"
    
    		} else {
    			keysym = th.Field(0).Sym
    			elemsym = th.Field(1).Sym // ditto
    
    			iterInit = "mapiterinit"
    			iterNext = "mapiternext"
    
    		fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th)
    
    		init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
    
    		fn = typecheck.LookupRuntime(iterNext, th)
    
    		nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
    
    		key := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), types.NewPtr(t.Key())))
    
    		if v1 == nil {
    			body = nil
    		} else if v2 == nil {
    
    			body = []ir.Node{rangeAssign(nrange, key)}
    
    			elem := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym), types.NewPtr(t.Elem())))
    
    			body = []ir.Node{rangeAssign2(nrange, key, elem)}
    
    	case k == types.TCHAN:
    
    		// order.stmt arranged for a copy of the channel variable.
    
    		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t.Elem())
    
    			init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    
    		hb := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(base.Pos, false))
    
    		lhs := []ir.Node{hv1, hb}
    		rhs := []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
    		a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, lhs, rhs)
    
    		nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
    
    			body = []ir.Node{rangeAssign(nrange, hv1)}
    
    		// Zero hv1. This prevents hv1 from being the sole, inaccessible
    		// reference to an otherwise GC-able value during the next channel receive.
    		// See issue 15281.
    
    		body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
    
    	case k == types.TSTRING:
    
    		// Transform string range statements like "for v1, v2 = range a" into
    		//
    		// ha := a
    		// for hv1 := 0; hv1 < len(ha); {
    
    		//   hv2 := rune(ha[hv1])
    		//   if hv2 < utf8.RuneSelf {
    		//      hv1++
    		//   } else {
    
    		//      hv2, hv1 = decoderune(ha, hv1)
    
    		// order.stmt arranged for a copy of the string variable.
    
    		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
    		hv1t := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
    		hv2 := typecheck.TempAt(base.Pos, ir.CurFunc, types.RuneType)
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
    
    			body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
    
    		nind := ir.NewIndexExpr(base.Pos, ha, hv1)
    
    		body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
    
    		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
    
    
    		// On x86, hv2 <= 127 is shorter to encode than hv2 < 128
    		// Doesn't hurt other archs.
    		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, hv2, ir.NewInt(base.Pos, utf8.RuneSelf-1))
    
    		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))}
    
    		// hv2, hv1 = decoderune(ha, hv1)
    
    		fn := typecheck.LookupRuntime("decoderune")
    
    		call := mkcall1(fn, fn.Type().ResultsTuple(), &nif.Else, ha, hv1)
    
    		a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
    		nif.Else.Append(a)
    
    		if v1 != nil {
    			if v2 != nil {
    				// v1, v2 = hv1t, hv2
    
    				body = append(body, rangeAssign2(nrange, hv1t, hv2))
    
    				body = append(body, rangeAssign(nrange, hv1t))
    
    	nfor.PtrInit().Append(init...)
    
    	typecheck.Stmts(nfor.Cond.Init())
    
    	nfor.Cond = typecheck.Expr(nfor.Cond)
    	nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
    	nfor.Post = typecheck.Stmt(nfor.Post)
    	typecheck.Stmts(body)
    
    	nfor.Body.Append(nrange.Body...)
    
    // rangeAssign returns "n.Key = key".
    func rangeAssign(n *ir.RangeStmt, key ir.Node) ir.Node {
    
    	key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
    
    	return ir.NewAssignStmt(n.Pos(), n.Key, key)
    }
    
    // rangeAssign2 returns "n.Key, n.Value = key, value".
    func rangeAssign2(n *ir.RangeStmt, key, value ir.Node) ir.Node {
    	// Use OAS2 to correctly handle assignments
    	// of the form "v1, a[v1] = range".
    
    	key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
    	value = rangeConvert(n, n.Value.Type(), value, n.ValueTypeWord, n.ValueSrcRType)
    
    	return ir.NewAssignListStmt(n.Pos(), ir.OAS2, []ir.Node{n.Key, n.Value}, []ir.Node{key, value})
    }
    
    
    // rangeConvert returns src, converted to dst if necessary. If a
    // conversion is necessary, then typeWord and srcRType are copied to
    // their respective ConvExpr fields.
    func rangeConvert(nrange *ir.RangeStmt, dst *types.Type, src, typeWord, srcRType ir.Node) ir.Node {
    	src = typecheck.Expr(src)
    	if dst.Kind() == types.TBLANK || types.Identical(dst, src.Type()) {
    		return src
    	}
    
    	n := ir.NewConvExpr(nrange.Pos(), ir.OCONV, dst, src)
    	n.TypeWord = typeWord
    	n.SrcRType = srcRType
    	return typecheck.Expr(n)
    }
    
    
    // isMapClear checks if n is of the form:
    //
    
    //	for k := range m {
    //		delete(m, k)
    //	}
    
    //
    // where == for keys of map m is reflexive.
    
    func isMapClear(n *ir.RangeStmt) bool {
    
    	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
    
    	t := n.X.Type()
    	if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
    
    	// Require k to be a new variable name.
    
    	stmt := n.Body[0] // only stmt in body
    
    	if stmt == nil || stmt.Op() != ir.ODELETE {
    
    	if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
    
    		return false
    	}
    
    	// Keys where equality is not reflexive can not be deleted from maps.
    
    	if !types.IsReflexive(t.Key()) {
    
    // mapRangeClear constructs a call to runtime.mapclear for the map range idiom.
    func mapRangeClear(nrange *ir.RangeStmt) ir.Node {
    
    	m := nrange.X
    	origPos := ir.SetPos(m)
    	defer func() { base.Pos = origPos }()
    
    
    	return mapClear(m, reflectdata.RangeMapRType(base.Pos, nrange))
    }
    
    // mapClear constructs a call to runtime.mapclear for the map m.
    func mapClear(m, rtyp ir.Node) ir.Node {
    
    
    	// instantiate mapclear(typ *type, hmap map[any]any)
    
    	fn := typecheck.LookupRuntime("mapclear", t.Key(), t.Elem())
    
    	n := mkcallstmt1(fn, rtyp, m)
    
    	return walkStmt(typecheck.Stmt(n))
    
    // Lower n into runtime·memclr if possible, for
    // fast zeroing of slices and arrays (issue 5373).
    // Look for instances of
    //
    
    //	for i := range a {
    //		a[i] = zero
    //	}
    
    //
    // in which the evaluation of a is side-effect-free.
    //
    
    // Parameters are as in walkRange: "for v1, v2 = range a".
    
    func arrayRangeClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
    
    	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
    
    	if len(loop.Body) != 1 || loop.Body[0] == nil {
    
    	stmt1 := loop.Body[0] // only stmt in body
    
    	if stmt1.Op() != ir.OAS {
    		return nil
    	}
    	stmt := stmt1.(*ir.AssignStmt)
    
    
    	// Get constant number of iterations for int and array cases.
    	n := int64(-1)
    	if ir.IsConst(a, constant.Int) {
    		n = ir.Int64Val(a)
    	} else if a.Type().IsArray() {
    		n = a.Type().NumElem()
    	} else if a.Type().IsPtr() && a.Type().Elem().IsArray() {
    		n = a.Type().Elem().NumElem()
    	}
    
    	if n >= 0 {
    		// Int/Array case.
    		if !x.Type().IsArray() {
    			return nil
    		}
    		if x.Type().NumElem() != n {
    			return nil
    		}
    	} else {
    		// Slice case.
    		if !ir.SameSafeExpr(x, a) {
    			return nil
    
    	if !ir.SameSafeExpr(lhs.Index, v1) {
    
    	if !ir.IsZero(stmt.Y) {
    		return nil
    	}
    
    
    	return arrayClear(stmt.Pos(), x, loop)
    
    }
    
    // arrayClear constructs a call to runtime.memclr for fast zeroing of slices and arrays.
    func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
    	elemsize := typecheck.RangeExprType(a.Type()).Elem().Size()
    	if elemsize <= 0 {
    
    	}
    
    	// Convert to
    	// if len(a) != 0 {
    	// 	hp = &a[0]
    	// 	hn = len(a)*sizeof(elem(a))
    
    	// 	memclr{NoHeap,Has}Pointers(hp, hn)
    
    	n := ir.NewIfStmt(base.Pos, nil, nil, nil)
    
    	n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 0))
    
    	hp := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUNSAFEPTR])
    
    	ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(base.Pos, 0))
    
    	addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
    
    	n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
    
    	hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
    
    	mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, elemsize)), types.Types[types.TUINTPTR])
    
    	n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
    
    	if a.Type().Elem().HasPointers() {
    
    		// memclrHasPointers(hp, hn)
    
    		ir.CurFunc.SetWBPos(wbPos)
    
    		fn = mkcallstmt("memclrHasPointers", hp, hn)
    
    	} else {
    		// memclrNoHeapPointers(hp, hn)
    
    		fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
    
    	// For array range clear, also set "i = len(a) - 1"
    	if nrange != nil {
    
    		idx := ir.NewAssignStmt(base.Pos, nrange.Key, typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 1)), nrange.Key.Type()))
    
    		n.Body.Append(idx)
    	}
    
    	n.Cond = typecheck.Expr(n.Cond)
    	n.Cond = typecheck.DefaultLit(n.Cond, nil)
    	typecheck.Stmts(n.Body)