Skip to content
Snippets Groups Projects
solve.go 8.55 KiB
Newer Older
  • Learn to ignore specific revisions
  • // Copyright 2018 The Go Authors. All rights reserved.
    // Use of this source code is governed by a BSD-style
    // license that can be found in the LICENSE file.
    
    package escape
    
    import (
    	"cmd/compile/internal/base"
    	"cmd/compile/internal/ir"
    	"cmd/compile/internal/logopt"
    	"cmd/internal/src"
    	"fmt"
    	"strings"
    )
    
    // walkAll computes the minimal dereferences between all pairs of
    // locations.
    func (b *batch) walkAll() {
    	// We use a work queue to keep track of locations that we need
    	// to visit, and repeatedly walk until we reach a fixed point.
    	//
    	// We walk once from each location (including the heap), and
    	// then re-enqueue each location on its transition from
    
    	// !persists->persists and !escapes->escapes, which can each
    
    	// happen at most once. So we take Θ(len(e.allLocs)) walks.
    
    	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
    	todo := make([]*location, 0, len(b.allLocs)+1)
    	enqueue := func(loc *location) {
    		if !loc.queued {
    			todo = append(todo, loc)
    			loc.queued = true
    		}
    	}
    
    	for _, loc := range b.allLocs {
    		enqueue(loc)
    	}
    
    	enqueue(&b.mutatorLoc)
    	enqueue(&b.calleeLoc)
    
    	enqueue(&b.heapLoc)
    
    	var walkgen uint32
    	for len(todo) > 0 {
    		root := todo[len(todo)-1]
    		todo = todo[:len(todo)-1]
    		root.queued = false
    
    		walkgen++
    		b.walkOne(root, walkgen, enqueue)
    	}
    }
    
    // walkOne computes the minimal number of dereferences from root to
    // all other locations.
    func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
    	// The data flow graph has negative edges (from addressing
    	// operations), so we use the Bellman-Ford algorithm. However,
    	// we don't have to worry about infinite negative cycles since
    	// we bound intermediate dereference counts to 0.
    
    	root.walkgen = walkgen
    	root.derefs = 0
    	root.dst = nil
    
    
    	if root.hasAttr(attrCalls) {
    		if clo, ok := root.n.(*ir.ClosureExpr); ok {
    			if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
    				fn.SetClosureResultsLost(true)
    
    				// Re-flow from the closure's results, now that we're aware
    				// we lost track of them.
    
    				for _, result := range fn.Type().Results() {
    
    					enqueue(b.oldLoc(result.Nname.(*ir.Name)))
    				}
    			}
    		}
    	}
    
    
    	todo := []*location{root} // LIFO queue
    	for len(todo) > 0 {
    		l := todo[len(todo)-1]
    		todo = todo[:len(todo)-1]
    
    		derefs := l.derefs
    
    
    		// If l.derefs < 0, then l's address flows to root.
    		addressOf := derefs < 0
    		if addressOf {
    			// For a flow path like "root = &l; l = x",
    			// l's address flows to root, but x's does
    			// not. We recognize this by lower bounding
    			// derefs at 0.
    			derefs = 0
    
    
    			// If l's address flows somewhere that
    			// outlives it, then l needs to be heap
    			// allocated.
    			if b.outlives(root, l) {
    				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
    					if base.Flag.LowerM >= 2 {
    						fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
    					}
    					explanation := b.explainPath(root, l)
    					if logopt.Enabled() {
    						var e_curfn *ir.Func // TODO(mdempsky): Fix.
    						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
    					}
    				}
    				newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
    			} else
    
    			// If l's address flows to a persistent location, then l needs
    			// to persist too.
    
    			if root.hasAttr(attrPersists) {
    				newAttrs |= attrPersists
    
    		if derefs == 0 {
    			newAttrs |= root.attrs & (attrMutates | attrCalls)
    		}
    
    		// l's value flows to root. If l is a function
    		// parameter and root is the heap or a
    		// corresponding result parameter, then record
    		// that value flow for tagging the function
    		// later.
    
    			if b.outlives(root, l) {
    				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
    
    					if base.Flag.LowerM >= 2 {
    						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
    					}
    					explanation := b.explainPath(root, l)
    					if logopt.Enabled() {
    						var e_curfn *ir.Func // TODO(mdempsky): Fix.
    						logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
    							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
    					}
    				}
    				l.leakTo(root, derefs)
    			}
    
    			if root.hasAttr(attrMutates) {
    				l.paramEsc.AddMutator(derefs)
    			}
    			if root.hasAttr(attrCalls) {
    				l.paramEsc.AddCallee(derefs)
    			}
    		}
    
    		if newAttrs&^l.attrs != 0 {
    			l.attrs |= newAttrs
    			enqueue(l)
    			if l.attrs&attrEscapes != 0 {
    
    				continue
    			}
    			d := derefs + edge.derefs
    			if edge.src.walkgen != walkgen || edge.src.derefs > d {
    				edge.src.walkgen = walkgen
    				edge.src.derefs = d
    				edge.src.dst = l
    				edge.src.dstEdgeIdx = i
    				todo = append(todo, edge.src)
    			}
    		}
    	}
    }
    
    // explainPath prints an explanation of how src flows to the walk root.
    func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
    	visited := make(map[*location]bool)
    	pos := base.FmtPos(src.n.Pos())
    	var explanation []*logopt.LoggedOpt
    	for {
    		// Prevent infinite loop.
    		if visited[src] {
    			if base.Flag.LowerM >= 2 {
    				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
    			}
    			break
    		}
    		visited[src] = true
    		dst := src.dst
    		edge := &dst.edges[src.dstEdgeIdx]
    		if edge.src != src {
    			base.Fatalf("path inconsistency: %v != %v", edge.src, src)
    		}
    
    		explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
    
    		if dst == root {
    			break
    		}
    		src = dst
    	}
    
    	return explanation
    }
    
    func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
    	ops := "&"
    	if derefs >= 0 {
    		ops = strings.Repeat("*", derefs)
    	}
    	print := base.Flag.LowerM >= 2
    
    	flow := fmt.Sprintf("   flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
    	if print {
    		fmt.Printf("%s:%s\n", pos, flow)
    	}
    	if logopt.Enabled() {
    		var epos src.XPos
    		if notes != nil {
    			epos = notes.where.Pos()
    		} else if srcloc != nil && srcloc.n != nil {
    			epos = srcloc.n.Pos()
    		}
    		var e_curfn *ir.Func // TODO(mdempsky): Fix.
    
    		explanation = append(explanation, logopt.NewLoggedOpt(epos, epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
    
    	}
    
    	for note := notes; note != nil; note = note.next {
    		if print {
    			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
    		}
    		if logopt.Enabled() {
    			var e_curfn *ir.Func // TODO(mdempsky): Fix.
    
    			notePos := note.where.Pos()
    			explanation = append(explanation, logopt.NewLoggedOpt(notePos, notePos, "escflow", "escape", ir.FuncName(e_curfn),
    
    				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
    		}
    	}
    	return explanation
    }
    
    func (b *batch) explainLoc(l *location) string {
    	if l == &b.heapLoc {
    		return "{heap}"
    	}
    	if l.n == nil {
    		// TODO(mdempsky): Omit entirely.
    		return "{temp}"
    	}
    	if l.n.Op() == ir.ONAME {
    		return fmt.Sprintf("%v", l.n)
    	}
    	return fmt.Sprintf("{storage for %v}", l.n)
    }
    
    // outlives reports whether values stored in l may survive beyond
    // other's lifetime if stack allocated.
    func (b *batch) outlives(l, other *location) bool {
    	// The heap outlives everything.
    
    	// Pseudo-locations that don't really exist.
    	if l == &b.mutatorLoc || l == &b.calleeLoc {
    		return false
    	}
    
    
    	// We don't know what callers do with returned values, so
    	// pessimistically we need to assume they flow to the heap and
    	// outlive everything too.
    
    		// Exception: Closures can return locations allocated outside of
    		// them without forcing them to the heap, if we can statically
    		// identify all call sites. For example:
    
    		//	var u int  // okay to stack allocate
    		//	fn := func() *int { return &u }()
    		//	*fn() = 42
    
    		if ir.ContainsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
    
    			return false
    		}
    
    		return true
    	}
    
    	// If l and other are within the same function, then l
    	// outlives other if it was declared outside other's loop
    	// scope. For example:
    	//
    
    	//	var l *int
    	//	for {
    	//		l = new(int) // must heap allocate: outlives for loop
    	//	}
    
    	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
    		return true
    	}
    
    	// If other is declared within a child closure of where l is
    	// declared, then l outlives it. For example:
    	//
    
    	//	var l *int
    	//	func() {
    	//		l = new(int) // must heap allocate: outlives call frame (if not inlined)
    	//	}()
    
    	if ir.ContainsClosure(l.curfn, other.curfn) {