Skip to content
Snippets Groups Projects
range.go 12.9 KiB
Newer Older
  • Learn to ignore specific revisions
  • // Copyright 2009 The Go Authors. All rights reserved.
    // Use of this source code is governed by a BSD-style
    // license that can be found in the LICENSE file.
    
    
    	"cmd/compile/internal/reflectdata"
    
    	"cmd/compile/internal/ssagen"
    
    	"cmd/compile/internal/typecheck"
    
    func cheapComputableIndex(width int64) bool {
    
    	switch ssagen.Arch.LinkArch.Family {
    
    	// MIPS does not have R+R addressing
    	// Arm64 may lack ability to generate this code in our assembler,
    	// but the architecture supports it.
    	case sys.PPC64, sys.S390X:
    		return width == 1
    	case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
    		switch width {
    		case 1, 2, 4, 8:
    			return true
    		}
    	}
    	return false
    }
    
    
    // walkRange transforms various forms of ORANGE into
    
    // simpler forms.  The result must be assigned back to n.
    // Node n may also be modified in place, and may also be
    // the returned node.
    
    func walkRange(nrange *ir.RangeStmt) ir.Node {
    
    	nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
    
    	// variable name conventions:
    	//	ohv1, hv1, hv2: hidden (old) val 1, 2
    	//	ha, hit: hidden aggregate, iterator
    	//	hn, hp: hidden len, pointer
    	//	hb: hidden bool
    	//	a, v1, v2: not hidden aggregate, val 1, 2
    
    
    	t := typecheck.RangeExprType(a.Type())
    
    	v1, v2 := nrange.Key, nrange.Value
    
    	if ir.IsBlank(v1) && v2 == nil {
    
    		base.Fatalf("walkRange: v2 != nil while v1 == nil")
    
    	case types.TARRAY, types.TSLICE:
    
    		if nn := arrayClear(nrange, v1, v2, a); nn != nil {
    
    		// order.stmt arranged for a copy of the array/slice variable if needed.
    
    		hv1 := typecheck.Temp(types.Types[types.TINT])
    		hn := typecheck.Temp(types.Types[types.TINT])
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    		init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
    
    		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))
    
    			break
    		}
    
    		// for v1 := range ha { body }
    		if v2 == nil {
    
    			body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
    
    		if cheapComputableIndex(t.Elem().Width) {
    
    			tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
    
    			tmp.SetBounded(true)
    			// Use OAS2 to correctly handle assignments
    			// of the form "v1, a[v1] := range".
    
    			a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
    
    			a.Lhs = []ir.Node{v1, v2}
    			a.Rhs = []ir.Node{hv1, tmp}
    
    		// TODO(austin): OFORUNTIL is a strange beast, but is
    		// necessary for expressing the control flow we need
    		// while also making "break" and "continue" work. It
    		// would be nice to just lower ORANGE during SSA, but
    		// racewalk needs to see many of the operations
    		// involved in ORANGE's implementation. If racewalk
    		// moves into SSA, consider moving ORANGE into SSA and
    		// eliminating OFORUNTIL.
    
    		// TODO(austin): OFORUNTIL inhibits bounds-check
    		// elimination on the index variable (see #20711).
    		// Enhance the prove pass to understand this.
    
    		ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
    
    		ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
    
    		hp := typecheck.Temp(types.NewPtr(t.Elem()))
    
    		tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
    
    
    		// Use OAS2 to correctly handle assignments
    		// of the form "v1, a[v1] := range".
    
    		a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
    
    		a.Lhs = []ir.Node{v1, v2}
    		a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}
    
    		// Advance pointer as part of the late increment.
    		//
    		// This runs *after* the condition check, so we know
    		// advancing the pointer is safe and won't go past the
    		// end of the allocation.
    
    		as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width))
    
    		nfor.Late = []ir.Node{typecheck.Stmt(as)}
    
    		// order.stmt allocated the iterator for us.
    
    		// we only use a once, so no copy needed.
    
    		hit := nrange.Prealloc
    
    		keysym := th.Field(0).Sym  // depends on layout of iterator struct.  See reflect.go:MapIterType
    
    		elemsym := th.Field(1).Sym // ditto
    
    		fn := typecheck.LookupRuntime("mapiterinit")
    
    		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
    
    		init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
    
    		fn = typecheck.LookupRuntime("mapiternext")
    		fn = typecheck.SubstArgTypes(fn, th)
    		nfor.Post = mkcall1(fn, nil, nil, typecheck.NodAddr(hit))
    
    		key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
    
    		if v1 == nil {
    			body = nil
    		} else if v2 == nil {
    
    			body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
    
    			elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
    			a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
    
    			a.Lhs = []ir.Node{v1, v2}
    			a.Rhs = []ir.Node{key, elem}
    
    		// order.stmt arranged for a copy of the channel variable.
    
    		hv1 := typecheck.Temp(t.Elem())
    
    			init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    
    		hb := typecheck.Temp(types.Types[types.TBOOL])
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false))
    
    		a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil)
    
    		a.Lhs = []ir.Node{hv1, hb}
    		a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
    
    		nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
    
    			body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
    
    		// Zero hv1. This prevents hv1 from being the sole, inaccessible
    		// reference to an otherwise GC-able value during the next channel receive.
    		// See issue 15281.
    
    		body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
    
    		// Transform string range statements like "for v1, v2 = range a" into
    		//
    		// ha := a
    		// for hv1 := 0; hv1 < len(ha); {
    
    		//   hv2 := rune(ha[hv1])
    		//   if hv2 < utf8.RuneSelf {
    		//      hv1++
    		//   } else {
    
    		//      hv2, hv1 = decoderune(ha, hv1)
    
    		// order.stmt arranged for a copy of the string variable.
    
    		hv1 := typecheck.Temp(types.Types[types.TINT])
    		hv1t := typecheck.Temp(types.Types[types.TINT])
    		hv2 := typecheck.Temp(types.RuneType)
    
    		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    
    		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
    
    			body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
    
    		nind := ir.NewIndexExpr(base.Pos, ha, hv1)
    
    		body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
    
    		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
    
    		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf))
    
    		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))}
    
    		eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
    
    		nif.Else = []ir.Node{eif}
    
    		// hv2, hv1 = decoderune(ha, hv1)
    
    		eif.Lhs = []ir.Node{hv2, hv1}
    
    		fn := typecheck.LookupRuntime("decoderune")
    
    		eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)}
    
    		if v1 != nil {
    			if v2 != nil {
    				// v1, v2 = hv1t, hv2
    
    				a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
    
    				a.Lhs = []ir.Node{v1, v2}
    				a.Rhs = []ir.Node{hv1t, hv2}
    
    				body = append(body, a)
    			} else {
    				// v1 = hv1t
    
    				body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
    
    		ifGuard.PtrInit().Append(init...)
    
    		ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
    
    		nfor.PtrInit().Append(init...)
    
    	typecheck.Stmts(nfor.Cond.Init())
    
    	nfor.Cond = typecheck.Expr(nfor.Cond)
    	nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
    	nfor.Post = typecheck.Stmt(nfor.Post)
    	typecheck.Stmts(body)
    
    	nfor.Body.Append(nrange.Body...)
    
    		ifGuard.Body = []ir.Node{n}
    
    // isMapClear checks if n is of the form:
    //
    // for k := range m {
    //   delete(m, k)
    // }
    //
    // where == for keys of map m is reflexive.
    
    func isMapClear(n *ir.RangeStmt) bool {
    
    	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
    
    	t := n.X.Type()
    	if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
    
    	// Require k to be a new variable name.
    
    	stmt := n.Body[0] // only stmt in body
    
    	if stmt == nil || stmt.Op() != ir.ODELETE {
    
    	if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
    
    		return false
    	}
    
    	// Keys where equality is not reflexive can not be deleted from maps.
    
    	if !types.IsReflexive(t.Key()) {
    
    		return false
    	}
    
    	return true
    }
    
    // mapClear constructs a call to runtime.mapclear for the map m.
    
    
    	// instantiate mapclear(typ *type, hmap map[any]any)
    
    	fn := typecheck.LookupRuntime("mapclear")
    	fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
    
    	n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
    
    	return walkStmt(typecheck.Stmt(n))
    
    // Lower n into runtime·memclr if possible, for
    // fast zeroing of slices and arrays (issue 5373).
    // Look for instances of
    //
    // for i := range a {
    // 	a[i] = zero
    // }
    //
    // in which the evaluation of a is side-effect-free.
    //
    
    // Parameters are as in walkRange: "for v1, v2 = range a".
    
    func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
    
    	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
    
    	if len(loop.Body) != 1 || loop.Body[0] == nil {
    
    	stmt1 := loop.Body[0] // only stmt in body
    
    	if stmt1.Op() != ir.OAS {
    		return nil
    	}
    	stmt := stmt1.(*ir.AssignStmt)
    
    	if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) {
    
    	elemsize := typecheck.RangeExprType(loop.X.Type()).Elem().Width
    
    	if elemsize <= 0 || !ir.IsZero(stmt.Y) {
    
    	}
    
    	// Convert to
    	// if len(a) != 0 {
    	// 	hp = &a[0]
    	// 	hn = len(a)*sizeof(elem(a))
    
    	// 	memclr{NoHeap,Has}Pointers(hp, hn)
    
    	n := ir.NewIfStmt(base.Pos, nil, nil, nil)
    
    	n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0))
    
    	hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
    
    	ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0))
    
    	addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
    
    	n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
    
    	hn := typecheck.Temp(types.Types[types.TUINTPTR])
    	mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR])
    
    	n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
    
    	if a.Type().Elem().HasPointers() {
    
    		// memclrHasPointers(hp, hn)
    
    		ir.CurFunc.SetWBPos(stmt.Pos())
    
    		fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
    	} else {
    		// memclrNoHeapPointers(hp, hn)
    		fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
    	}
    
    	v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1)))
    
    	n.Cond = typecheck.Expr(n.Cond)
    	n.Cond = typecheck.DefaultLit(n.Cond, nil)
    	typecheck.Stmts(n.Body)
    
    
    // addptr returns (*T)(uintptr(p) + n).
    
    func addptr(p ir.Node, n int64) ir.Node {
    
    	p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
    
    	p.SetType(types.Types[types.TUINTPTR])
    
    	p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n))
    
    	p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)