diff --git a/src/cmd/5g/cgen.go b/src/cmd/5g/cgen.go
index bdee52aca64dc8ef6f24d72ae79492e055733060..638c5a677c1f121d23979e1786e7a37f2442af18 100644
--- a/src/cmd/5g/cgen.go
+++ b/src/cmd/5g/cgen.go
@@ -20,26 +20,18 @@ import "cmd/internal/gc"
  * simplifies and calls gmove.
  */
 func cgen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var f0 gc.Node
-	var f1 gc.Node
-	var a int
-	var w int
-	var rg int
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var addr obj.Addr
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\ncgen-n", n)
 		gc.Dump("cgen-res", res)
 	}
 
+	var n1 gc.Node
+	var nr *gc.Node
+	var nl *gc.Node
+	var a int
+	var f1 gc.Node
+	var f0 gc.Node
+	var n2 gc.Node
 	if n == nil || n.Type == nil {
 		goto ret
 	}
@@ -55,6 +47,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -65,6 +58,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	case gc.OEFACE:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -83,6 +77,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			gc.Fatal("cgen: this is going to misscompile")
 		}
 		if res.Ullman >= gc.UINF {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			cgen(n, &n1)
 			cgen(&n1, res)
@@ -122,6 +117,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
 			gmove(n, res)
 		} else {
+			var n1 gc.Node
 			regalloc(&n1, n.Type, nil)
 			gmove(n, &n1)
 			cgen(&n1, res)
@@ -135,6 +131,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	if n.Addable == 0 && res.Addable == 0 {
 		// could use regalloc here sometimes,
 		// but have to check for ullman >= UINF.
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 
 		cgen(n, &n1)
@@ -145,6 +142,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	// if result is not addressable directly but n is,
 	// compute its address and then store via the address.
 	if res.Addable == 0 {
+		var n1 gc.Node
 		igen(res, &n1, nil)
 		cgen(n, &n1)
 		regfree(&n1)
@@ -158,11 +156,14 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	// if n is sudoaddable generate addr and move
 	if !gc.Is64(n.Type) && !gc.Is64(res.Type) && gc.Iscomplex[n.Type.Etype] == 0 && gc.Iscomplex[res.Type.Etype] == 0 {
-		a = optoas(gc.OAS, n.Type)
+		a := optoas(gc.OAS, n.Type)
+		var w int
+		var addr obj.Addr
 		if sudoaddable(a, n, &addr, &w) {
 			if res.Op != gc.OREGISTER {
+				var n2 gc.Node
 				regalloc(&n2, res.Type, nil)
-				p1 = gins(a, nil, &n2)
+				p1 := gins(a, nil, &n2)
 				p1.From = addr
 				if gc.Debug['g'] != 0 {
 					fmt.Printf("%v [ignore previous line]\n", p1)
@@ -170,7 +171,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 				gmove(&n2, res)
 				regfree(&n2)
 			} else {
-				p1 = gins(a, nil, res)
+				p1 := gins(a, nil, res)
 				p1.From = addr
 				if gc.Debug['g'] != 0 {
 					fmt.Printf("%v [ignore previous line]\n", p1)
@@ -191,9 +192,10 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	if nl != nil && nl.Ullman >= gc.UINF {
 		if nr != nil && nr.Ullman >= gc.UINF {
+			var n1 gc.Node
 			gc.Tempname(&n1, nl.Type)
 			cgen(nl, &n1)
-			n2 = *n
+			n2 := *n
 			n2.Left = &n1
 			cgen(&n2, res)
 			goto ret
@@ -244,11 +246,11 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OGE,
 		gc.OGT,
 		gc.ONOT:
-		p1 = gc.Gbranch(arm.AB, nil, 0)
+		p1 := gc.Gbranch(arm.AB, nil, 0)
 
-		p2 = gc.Pc
+		p2 := gc.Pc
 		gmove(gc.Nodbool(true), res)
-		p3 = gc.Gbranch(arm.AB, nil, 0)
+		p3 := gc.Gbranch(arm.AB, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
 		gmove(gc.Nodbool(false), res)
@@ -261,7 +263,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		// unary
 	case gc.OCOM:
-		a = optoas(gc.OXOR, nl.Type)
+		a := optoas(gc.OXOR, nl.Type)
 
 		regalloc(&n1, nl.Type, nil)
 		cgen(nl, &n1)
@@ -306,6 +308,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			break
 		}
 
+		var n1 gc.Node
 		if nl.Addable != 0 && !gc.Is64(nl.Type) {
 			regalloc(&n1, nl.Type, res)
 			gmove(nl, &n1)
@@ -318,6 +321,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			cgen(nl, &n1)
 		}
 
+		var n2 gc.Node
 		if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] != 0 {
 			gc.Tempname(&n2, n.Type)
 		} else {
@@ -337,6 +341,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OINDEX,
 		gc.OIND,
 		gc.ONAME: // PHEAP or PPARAMREF var
+		var n1 gc.Node
 		igen(n, &n1, res)
 
 		gmove(&n1, res)
@@ -344,6 +349,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		// interface table is first word of interface value
 	case gc.OITAB:
+		var n1 gc.Node
 		igen(nl, &n1, res)
 
 		n1.Type = n.Type
@@ -353,14 +359,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
 		if gc.Isconst(nl, gc.CTSTR) {
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
-			p1 = gins(arm.AMOVW, nil, &n1)
+			p1 := gins(arm.AMOVW, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			gmove(&n1, res)
 			regfree(&n1)
 			break
 		}
 
+		var n1 gc.Node
 		igen(nl, &n1, res)
 		n1.Type = n.Type
 		gmove(&n1, res)
@@ -370,13 +378,15 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map has len in the first 32-bit word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 
 			cgen(nl, &n1)
 
+			var n2 gc.Node
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -392,6 +402,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
+			var n1 gc.Node
 			igen(nl, &n1, res)
 
 			n1.Type = gc.Types[gc.TUINT32]
@@ -407,13 +418,15 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second 32-bit word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 
 			cgen(nl, &n1)
 
+			var n2 gc.Node
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -429,6 +442,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if gc.Isslice(nl.Type) {
+			var n1 gc.Node
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.TUINT32]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -446,7 +460,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	// Pick it up again after the call.
 	case gc.OCALLMETH,
 		gc.OCALLFUNC:
-		rg = -1
+		rg := -1
 
 		if n.Ullman >= gc.UINF {
 			if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
@@ -479,7 +493,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 sbop: // symmetric binary
 	if nl.Ullman < nr.Ullman {
-		r = nl
+		r := nl
 		nl = nr
 		nr = r
 	}
@@ -601,20 +615,16 @@ ret:
  * returns Prog* to patch to panic call.
  */
 func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
-	var tmp gc.Node
-	var lo gc.Node
-	var hi gc.Node
-	var zero gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-
 	if !gc.Is64(n.Type) {
 		cgen(n, res)
 		return nil
 	}
 
+	var tmp gc.Node
 	gc.Tempname(&tmp, gc.Types[gc.TINT64])
 	cgen(n, &tmp)
+	var lo gc.Node
+	var hi gc.Node
 	split64(&tmp, &lo, &hi)
 	gmove(&lo, res)
 	if bounded {
@@ -622,8 +632,11 @@ func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
 		return nil
 	}
 
+	var n1 gc.Node
 	regalloc(&n1, gc.Types[gc.TINT32], nil)
+	var n2 gc.Node
 	regalloc(&n2, gc.Types[gc.TINT32], nil)
+	var zero gc.Node
 	gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
 	gmove(&hi, &n1)
 	gmove(&zero, &n2)
@@ -640,12 +653,6 @@ func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
  * The generated code checks that the result is not nil.
  */
 func agen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var r int
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nagen-res", res)
 		gc.Dump("agen-r", n)
@@ -659,15 +666,18 @@ func agen(n *gc.Node, res *gc.Node) {
 		n = n.Left
 	}
 
+	var nl *gc.Node
 	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
 		// be terribly efficient. See issue 3670.
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 
 		gc.Gvardef(&n1)
 		clearfat(&n1)
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.Tptr], res)
 		gins(arm.AMOVW, &n1, &n2)
 		gmove(&n2, res)
@@ -676,9 +686,10 @@ func agen(n *gc.Node, res *gc.Node) {
 	}
 
 	if n.Addable != 0 {
-		n1 = gc.Node{}
+		n1 := gc.Node{}
 		n1.Op = gc.OADDR
 		n1.Left = n
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.Tptr], res)
 		gins(arm.AMOVW, &n1, &n2)
 		gmove(&n2, res)
@@ -696,7 +707,7 @@ func agen(n *gc.Node, res *gc.Node) {
 	// Pick it up again after the call.
 	case gc.OCALLMETH,
 		gc.OCALLFUNC:
-		r = -1
+		r := -1
 
 		if n.Ullman >= gc.UINF {
 			if res.Op == gc.OREGISTER || res.Op == gc.OINDREG {
@@ -724,16 +735,19 @@ func agen(n *gc.Node, res *gc.Node) {
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_slice(n, &n1)
 		agen(&n1, res)
 
 	case gc.OEFACE:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_eface(n, &n1)
 		agen(&n1, res)
 
 	case gc.OINDEX:
+		var n1 gc.Node
 		agenr(n, &n1, res)
 		gmove(&n1, res)
 		regfree(&n1)
@@ -753,8 +767,11 @@ func agen(n *gc.Node, res *gc.Node) {
 
 		cgen(n.Heapaddr, res)
 		if n.Xoffset != 0 {
+			var n1 gc.Node
 			gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+			var n2 gc.Node
 			regalloc(&n2, n1.Type, nil)
+			var n3 gc.Node
 			regalloc(&n3, gc.Types[gc.TINT32], nil)
 			gmove(&n1, &n2)
 			gmove(res, &n3)
@@ -771,8 +788,11 @@ func agen(n *gc.Node, res *gc.Node) {
 	case gc.ODOT:
 		agen(nl, res)
 		if n.Xoffset != 0 {
+			var n1 gc.Node
 			gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+			var n2 gc.Node
 			regalloc(&n2, n1.Type, nil)
+			var n3 gc.Node
 			regalloc(&n3, gc.Types[gc.TINT32], nil)
 			gmove(&n1, &n2)
 			gmove(res, &n3)
@@ -786,8 +806,11 @@ func agen(n *gc.Node, res *gc.Node) {
 		cgen(nl, res)
 		gc.Cgen_checknil(res)
 		if n.Xoffset != 0 {
+			var n1 gc.Node
 			gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+			var n2 gc.Node
 			regalloc(&n2, n1.Type, nil)
+			var n3 gc.Node
 			regalloc(&n3, gc.Types[gc.Tptr], nil)
 			gmove(&n1, &n2)
 			gmove(res, &n3)
@@ -811,9 +834,6 @@ ret:
  * The generated code checks that the result is not *nil.
  */
 func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var r int
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nigen-n", n)
 	}
@@ -844,6 +864,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 	case gc.ODOTPTR:
 		if n.Left.Addable != 0 || n.Left.Op == gc.OCALLFUNC || n.Left.Op == gc.OCALLMETH || n.Left.Op == gc.OCALLINTER {
 			// igen-able nodes.
+			var n1 gc.Node
 			igen(n.Left, &n1, res)
 
 			regalloc(a, gc.Types[gc.Tptr], &n1)
@@ -865,7 +886,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 	case gc.OCALLMETH,
 		gc.OCALLFUNC,
 		gc.OCALLINTER:
-		r = -1
+		r := -1
 
 		if n.Ullman >= gc.UINF {
 			if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
@@ -906,8 +927,6 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
  * The caller must call regfree(a).
  */
 func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("cgenr-n", n)
 	}
@@ -930,6 +949,7 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.OCALLFUNC,
 		gc.OCALLMETH,
 		gc.OCALLINTER:
+		var n1 gc.Node
 		igen(n, &n1, res)
 		regalloc(a, gc.Types[gc.Tptr], &n1)
 		gmove(&n1, a)
@@ -949,25 +969,12 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
  * The generated code checks that the result is not nil.
  */
 func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var n4 gc.Node
-	var tmp gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var w uint32
-	var v uint64
-	var bounded bool
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("agenr-n", n)
 	}
 
-	nl = n.Left
-	nr = n.Right
+	nl := n.Left
+	nr := n.Right
 
 	switch n.Op {
 	case gc.ODOT,
@@ -975,6 +982,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.OCALLFUNC,
 		gc.OCALLMETH,
 		gc.OCALLINTER:
+		var n1 gc.Node
 		igen(n, &n1, res)
 		regalloc(a, gc.Types[gc.Tptr], &n1)
 		agen(&n1, a)
@@ -985,10 +993,13 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.Cgen_checknil(a)
 
 	case gc.OINDEX:
-		p2 = nil // to be patched to panicindex.
-		w = uint32(n.Type.Width)
-		bounded = gc.Debug['B'] != 0 || n.Bounded
+		p2 := (*obj.Prog)(nil) // to be patched to panicindex.
+		w := uint32(n.Type.Width)
+		bounded := gc.Debug['B'] != 0 || n.Bounded
+		var n1 gc.Node
+		var n3 gc.Node
 		if nr.Addable != 0 {
+			var tmp gc.Node
 			if !gc.Isconst(nr, gc.CTINT) {
 				gc.Tempname(&tmp, gc.Types[gc.TINT32])
 			}
@@ -1002,6 +1013,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			}
 		} else if nl.Addable != 0 {
 			if !gc.Isconst(nr, gc.CTINT) {
+				var tmp gc.Node
 				gc.Tempname(&tmp, gc.Types[gc.TINT32])
 				p2 = cgenindex(nr, &tmp, bounded)
 				regalloc(&n1, tmp.Type, nil)
@@ -1012,6 +1024,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 				agenr(nl, &n3, res)
 			}
 		} else {
+			var tmp gc.Node
 			gc.Tempname(&tmp, gc.Types[gc.TINT32])
 			p2 = cgenindex(nr, &tmp, bounded)
 			nr = &tmp
@@ -1031,19 +1044,21 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index")
 			}
-			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
+			var n2 gc.Node
 			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				if gc.Debug['B'] == 0 && !n.Bounded {
 					n1 = n3
 					n1.Op = gc.OINDREG
 					n1.Type = gc.Types[gc.Tptr]
 					n1.Xoffset = int64(gc.Array_nel)
+					var n4 gc.Node
 					regalloc(&n4, n1.Type, nil)
 					gmove(&n1, &n4)
 					gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
 					gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
 					regfree(&n4)
-					p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
 					ginscall(gc.Panicindex, 0)
 					gc.Patch(p1, gc.Pc)
 				}
@@ -1061,10 +1076,12 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			break
 		}
 
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.TINT32], &n1) // i
 		gmove(&n1, &n2)
 		regfree(&n1)
 
+		var n4 gc.Node
 		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
 			if gc.Isconst(nl, gc.CTSTR) {
@@ -1084,7 +1101,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			if n4.Op == gc.OREGISTER {
 				regfree(&n4)
 			}
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
 			if p2 != nil {
 				gc.Patch(p2, gc.Pc)
 			}
@@ -1094,7 +1111,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 
 		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
-			p1 = gins(arm.AMOVW, nil, &n3)
+			p1 := gins(arm.AMOVW, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			p1.From.Type = obj.TYPE_ADDR
 		} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
@@ -1141,15 +1158,14 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 
 func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
 	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var a int
 
 	regalloc(&n1, t, nil)
 	cgen(n, &n1)
-	a = optoas(gc.OCMP, t)
+	a := optoas(gc.OCMP, t)
 	if a != arm.ACMP {
+		var n2 gc.Node
 		gc.Nodconst(&n2, t, 0)
+		var n3 gc.Node
 		regalloc(&n3, t, nil)
 		gmove(&n2, &n3)
 		gcmp(a, &n1, &n3)
@@ -1167,19 +1183,6 @@ func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
  *	if(n == true) goto to;
  */
 func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
-	var et int
-	var a int
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var tmp gc.Node
-	var ll *gc.NodeList
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nbgen", n)
 	}
@@ -1192,6 +1195,9 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.Genlist(n.Ninit)
 	}
 
+	var et int
+	var nl *gc.Node
+	var nr *gc.Node
 	if n.Type == nil {
 		gc.Convlit(&n, gc.Types[gc.TBOOL])
 		if n.Type == nil {
@@ -1210,7 +1216,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 
 	switch n.Op {
 	default:
-		a = gc.ONE
+		a := gc.ONE
 		if !true_ {
 			a = gc.OEQ
 		}
@@ -1227,8 +1233,8 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 	case gc.OANDAND,
 		gc.OOROR:
 		if (n.Op == gc.OANDAND) == true_ {
-			p1 = gc.Gbranch(obj.AJMP, nil, 0)
-			p2 = gc.Gbranch(obj.AJMP, nil, 0)
+			p1 := gc.Gbranch(obj.AJMP, nil, 0)
+			p2 := gc.Gbranch(obj.AJMP, nil, 0)
 			gc.Patch(p1, gc.Pc)
 			bgen(n.Left, !true_, -likely, p2)
 			bgen(n.Right, !true_, -likely, p2)
@@ -1273,15 +1279,15 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.OGT,
 		gc.OLE,
 		gc.OGE:
-		a = int(n.Op)
+		a := int(n.Op)
 		if !true_ {
 			if gc.Isfloat[nl.Type.Etype] != 0 {
 				// brcom is not valid on floats when NaN is involved.
-				p1 = gc.Gbranch(arm.AB, nil, 0)
+				p1 := gc.Gbranch(arm.AB, nil, 0)
 
-				p2 = gc.Gbranch(arm.AB, nil, 0)
+				p2 := gc.Gbranch(arm.AB, nil, 0)
 				gc.Patch(p1, gc.Pc)
-				ll = n.Ninit
+				ll := n.Ninit
 				n.Ninit = nil
 				bgen(n, true, -likely, p2)
 				n.Ninit = ll
@@ -1297,7 +1303,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		// make simplest on right
 		if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) {
 			a = gc.Brrev(a)
-			r = nl
+			r := nl
 			nl = nr
 			nr = r
 		}
@@ -1309,6 +1315,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 				break
 			}
 
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Xoffset += int64(gc.Array_array)
 			n1.Type = gc.Types[gc.Tptr]
@@ -1324,6 +1331,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 				break
 			}
 
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Type = gc.Types[gc.Tptr]
 			n1.Xoffset += 0
@@ -1339,12 +1347,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 
 		if gc.Is64(nr.Type) {
 			if nl.Addable == 0 {
+				var n1 gc.Node
 				gc.Tempname(&n1, nl.Type)
 				cgen(nl, &n1)
 				nl = &n1
 			}
 
 			if nr.Addable == 0 {
+				var n2 gc.Node
 				gc.Tempname(&n2, nr.Type)
 				cgen(nr, &n2)
 				nr = &n2
@@ -1369,13 +1379,16 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		a = optoas(a, nr.Type)
 
 		if nr.Ullman >= gc.UINF {
+			var n1 gc.Node
 			regalloc(&n1, nl.Type, nil)
 			cgen(nl, &n1)
 
+			var tmp gc.Node
 			gc.Tempname(&tmp, nl.Type)
 			gmove(&n1, &tmp)
 			regfree(&n1)
 
+			var n2 gc.Node
 			regalloc(&n2, nr.Type, nil)
 			cgen(nr, &n2)
 
@@ -1390,26 +1403,30 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			break
 		}
 
+		var n3 gc.Node
 		gc.Tempname(&n3, nl.Type)
 		cgen(nl, &n3)
 
+		var tmp gc.Node
 		gc.Tempname(&tmp, nr.Type)
 		cgen(nr, &tmp)
 
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, nil)
 		gmove(&n3, &n1)
 
+		var n2 gc.Node
 		regalloc(&n2, nr.Type, nil)
 		gmove(&tmp, &n2)
 
 		gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
 		if gc.Isfloat[nl.Type.Etype] != 0 {
 			if n.Op == gc.ONE {
-				p1 = gc.Gbranch(arm.ABVS, nr.Type, likely)
+				p1 := gc.Gbranch(arm.ABVS, nr.Type, likely)
 				gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
 				gc.Patch(p1, to)
 			} else {
-				p1 = gc.Gbranch(arm.ABVS, nr.Type, -likely)
+				p1 := gc.Gbranch(arm.ABVS, nr.Type, -likely)
 				gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
 				gc.Patch(p1, gc.Pc)
 			}
@@ -1432,31 +1449,27 @@ ret:
  * return n's offset from SP.
  */
 func stkof(n *gc.Node) int32 {
-	var t *gc.Type
-	var flist gc.Iter
-	var off int32
-
 	switch n.Op {
 	case gc.OINDREG:
 		return int32(n.Xoffset)
 
 	case gc.ODOT:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
 		return int32(int64(off) + n.Xoffset)
 
 	case gc.OINDEX:
-		t = n.Left.Type
+		t := n.Left.Type
 		if !gc.Isfixedarray(t) {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
@@ -1468,11 +1481,12 @@ func stkof(n *gc.Node) int32 {
 	case gc.OCALLMETH,
 		gc.OCALLINTER,
 		gc.OCALLFUNC:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
 
+		var flist gc.Iter
 		t = gc.Structfirst(&flist, gc.Getoutarg(t))
 		if t != nil {
 			return int32(t.Width + 4) // correct for LR
@@ -1490,24 +1504,6 @@ func stkof(n *gc.Node) int32 {
  * NB: character copy assumed little endian architecture
  */
 func sgen(n *gc.Node, res *gc.Node, w int64) {
-	var dst gc.Node
-	var src gc.Node
-	var tmp gc.Node
-	var nend gc.Node
-	var r0 gc.Node
-	var r1 gc.Node
-	var r2 gc.Node
-	var f *gc.Node
-	var c int32
-	var odst int32
-	var osrc int32
-	var dir int
-	var align int
-	var op int
-	var p *obj.Prog
-	var ploop *obj.Prog
-	var l *gc.NodeList
-
 	if gc.Debug['g'] != 0 {
 		fmt.Printf("\nsgen w=%d\n", w)
 		gc.Dump("r", n)
@@ -1528,6 +1524,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 
 	if w == 0 {
 		// evaluate side effects only.
+		var dst gc.Node
 		regalloc(&dst, gc.Types[gc.Tptr], nil)
 
 		agen(res, &dst)
@@ -1539,7 +1536,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 	// If copying .args, that's all the results, so record definition sites
 	// for them for the liveness analysis.
 	if res.Op == gc.ONAME && res.Sym.Name == ".args" {
-		for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 			if l.N.Class == gc.PPARAMOUT {
 				gc.Gvardef(l.N)
 			}
@@ -1555,8 +1552,9 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 	// want to avoid unaligned access, so have to use
 	// smaller operations for less aligned types.
 	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
-	align = int(n.Type.Align)
+	align := int(n.Type.Align)
 
+	var op int
 	switch align {
 	default:
 		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
@@ -1574,17 +1572,18 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 	if w%int64(align) != 0 {
 		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
 	}
-	c = int32(w / int64(align))
+	c := int32(w / int64(align))
 
 	// offset on the stack
-	osrc = stkof(n)
+	osrc := stkof(n)
 
-	odst = stkof(res)
+	odst := stkof(res)
 	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
 		// osrc and odst both on stack, and at least one is in
 		// an unknown position.  Could generate code to test
 		// for forward/backward copy, but instead just copy
 		// to a temporary location first.
+		var tmp gc.Node
 		gc.Tempname(&tmp, n.Type)
 
 		sgen(n, &tmp, w)
@@ -1598,21 +1597,26 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 
 	// if we are copying forward on the stack and
 	// the src and dst overlap, then reverse direction
-	dir = align
+	dir := align
 
 	if osrc < odst && int64(odst) < int64(osrc)+w {
 		dir = -dir
 	}
 
 	if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
+		var r0 gc.Node
 		r0.Op = gc.OREGISTER
 		r0.Val.U.Reg = REGALLOC_R0
+		var r1 gc.Node
 		r1.Op = gc.OREGISTER
 		r1.Val.U.Reg = REGALLOC_R0 + 1
+		var r2 gc.Node
 		r2.Op = gc.OREGISTER
 		r2.Val.U.Reg = REGALLOC_R0 + 2
 
+		var src gc.Node
 		regalloc(&src, gc.Types[gc.Tptr], &r1)
+		var dst gc.Node
 		regalloc(&dst, gc.Types[gc.Tptr], &r2)
 		if n.Ullman >= res.Ullman {
 			// eval n first
@@ -1631,9 +1635,10 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 			agen(n, &src)
 		}
 
+		var tmp gc.Node
 		regalloc(&tmp, gc.Types[gc.Tptr], &r0)
-		f = gc.Sysfunc("duffcopy")
-		p = gins(obj.ADUFFCOPY, nil, f)
+		f := gc.Sysfunc("duffcopy")
+		p := gins(obj.ADUFFCOPY, nil, f)
 		gc.Afunclit(&p.To, f)
 
 		// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
@@ -1645,6 +1650,8 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 		return
 	}
 
+	var dst gc.Node
+	var src gc.Node
 	if n.Ullman >= res.Ullman {
 		agenr(n, &dst, res) // temporarily use dst
 		regalloc(&src, gc.Types[gc.Tptr], nil)
@@ -1661,15 +1668,16 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 		agenr(n, &src, nil)
 	}
 
+	var tmp gc.Node
 	regalloc(&tmp, gc.Types[gc.TUINT32], nil)
 
 	// set up end marker
-	nend = gc.Node{}
+	nend := gc.Node{}
 
 	if c >= 4 {
 		regalloc(&nend, gc.Types[gc.TUINT32], nil)
 
-		p = gins(arm.AMOVW, &src, &nend)
+		p := gins(arm.AMOVW, &src, &nend)
 		p.From.Type = obj.TYPE_ADDR
 		if dir < 0 {
 			p.From.Offset = int64(dir)
@@ -1680,7 +1688,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 
 	// move src and dest to the end of block if necessary
 	if dir < 0 {
-		p = gins(arm.AMOVW, &src, &src)
+		p := gins(arm.AMOVW, &src, &src)
 		p.From.Type = obj.TYPE_ADDR
 		p.From.Offset = w + int64(dir)
 
@@ -1691,11 +1699,11 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 
 	// move
 	if c >= 4 {
-		p = gins(op, &src, &tmp)
+		p := gins(op, &src, &tmp)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Offset = int64(dir)
 		p.Scond |= arm.C_PBIT
-		ploop = p
+		ploop := p
 
 		p = gins(op, &tmp, &dst)
 		p.To.Type = obj.TYPE_MEM
@@ -1708,6 +1716,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
 		regfree(&nend)
 	} else {
+		var p *obj.Prog
 		for {
 			tmp14 := c
 			c--
@@ -1757,23 +1766,16 @@ func cadable(n *gc.Node) bool {
 func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
-	var tmp gc.Node
-	var t *gc.Type
-	var freel int
-	var freer int
-	var fldcount int64
-	var loffset int64
-	var roffset int64
 
-	freel = 0
-	freer = 0
+	freel := 0
+	freer := 0
 
 	switch nl.Type.Etype {
 	default:
 		goto no
 
 	case gc.TARRAY:
-		t = nl.Type
+		t := nl.Type
 
 		// Slices are ok.
 		if gc.Isslice(t) {
@@ -1790,9 +1792,9 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		// Small structs with non-fat types are ok.
 	// Zero-sized structs are treated separately elsewhere.
 	case gc.TSTRUCT:
-		fldcount = 0
+		fldcount := int64(0)
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			if gc.Isfat(t.Type) {
 				goto no
 			}
@@ -1828,6 +1830,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		}
 	} else {
 		// When zeroing, prepare a register containing zero.
+		var tmp gc.Node
 		gc.Nodconst(&tmp, nl.Type, 0)
 
 		regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1849,11 +1852,11 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		t = nl.Type
+		t := nl.Type
 		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
-			for fldcount = 0; fldcount < t.Bound; fldcount++ {
+			for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
 				if nr == nil {
 					gc.Clearslim(&nodl)
 				} else {
@@ -1956,8 +1959,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		loffset = nodl.Xoffset
-		roffset = nodr.Xoffset
+		loffset := nodl.Xoffset
+		roffset := nodr.Xoffset
 
 		// funarg structs may not begin at offset zero.
 		if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1967,7 +1970,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 			roffset -= nr.Type.Type.Width
 		}
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			nodl.Xoffset = loffset + t.Width
 			nodl.Type = t.Type
 
diff --git a/src/cmd/5g/cgen64.go b/src/cmd/5g/cgen64.go
index f89c21cf08a7117197ca58a776c3cfb3b19dfdce..b9e5b7cc955c738b6c4b05eb5463bb0c83825e84 100644
--- a/src/cmd/5g/cgen64.go
+++ b/src/cmd/5g/cgen64.go
@@ -16,61 +16,43 @@ import "cmd/internal/gc"
  * return 1 on success, 0 if op not handled.
  */
 func cgen64(n *gc.Node, res *gc.Node) {
-	var t1 gc.Node
-	var t2 gc.Node
-	var l *gc.Node
-	var r *gc.Node
-	var lo1 gc.Node
-	var lo2 gc.Node
-	var hi1 gc.Node
-	var hi2 gc.Node
-	var al gc.Node
-	var ah gc.Node
-	var bl gc.Node
-	var bh gc.Node
-	var cl gc.Node
-	var ch gc.Node
-	var s gc.Node
-	var n1 gc.Node
-	var creg gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var p4 *obj.Prog
-	var p5 *obj.Prog
-	var p6 *obj.Prog
-	var v uint64
-
 	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
 		gc.Dump("n", n)
 		gc.Dump("res", res)
 		gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
 	}
 
-	l = n.Left
+	l := n.Left
+	var t1 gc.Node
 	if l.Addable == 0 {
 		gc.Tempname(&t1, l.Type)
 		cgen(l, &t1)
 		l = &t1
 	}
 
+	var hi1 gc.Node
+	var lo1 gc.Node
 	split64(l, &lo1, &hi1)
 	switch n.Op {
 	default:
 		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
 
 	case gc.OMINUS:
+		var lo2 gc.Node
+		var hi2 gc.Node
 		split64(res, &lo2, &hi2)
 
 		regalloc(&t1, lo1.Type, nil)
+		var al gc.Node
 		regalloc(&al, lo1.Type, nil)
+		var ah gc.Node
 		regalloc(&ah, hi1.Type, nil)
 
 		gins(arm.AMOVW, &lo1, &al)
 		gins(arm.AMOVW, &hi1, &ah)
 
 		gmove(ncon(0), &t1)
-		p1 = gins(arm.ASUB, &al, &t1)
+		p1 := gins(arm.ASUB, &al, &t1)
 		p1.Scond |= arm.C_SBIT
 		gins(arm.AMOVW, &t1, &lo2)
 
@@ -89,7 +71,10 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		regalloc(&t1, lo1.Type, nil)
 		gmove(ncon(^uint32(0)), &t1)
 
+		var lo2 gc.Node
+		var hi2 gc.Node
 		split64(res, &lo2, &hi2)
+		var n1 gc.Node
 		regalloc(&n1, lo1.Type, nil)
 
 		gins(arm.AMOVW, &lo1, &n1)
@@ -121,19 +106,24 @@ func cgen64(n *gc.Node, res *gc.Node) {
 	}
 
 	// setup for binary operators
-	r = n.Right
+	r := n.Right
 
 	if r != nil && r.Addable == 0 {
+		var t2 gc.Node
 		gc.Tempname(&t2, r.Type)
 		cgen(r, &t2)
 		r = &t2
 	}
 
+	var hi2 gc.Node
+	var lo2 gc.Node
 	if gc.Is64(r.Type) {
 		split64(r, &lo2, &hi2)
 	}
 
+	var al gc.Node
 	regalloc(&al, lo1.Type, nil)
+	var ah gc.Node
 	regalloc(&ah, hi1.Type, nil)
 
 	// Do op.  Leave result in ah:al.
@@ -143,14 +133,16 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		// TODO: Constants
 	case gc.OADD:
+		var bl gc.Node
 		regalloc(&bl, gc.Types[gc.TPTR32], nil)
 
+		var bh gc.Node
 		regalloc(&bh, gc.Types[gc.TPTR32], nil)
 		gins(arm.AMOVW, &hi1, &ah)
 		gins(arm.AMOVW, &lo1, &al)
 		gins(arm.AMOVW, &hi2, &bh)
 		gins(arm.AMOVW, &lo2, &bl)
-		p1 = gins(arm.AADD, &bl, &al)
+		p1 := gins(arm.AADD, &bl, &al)
 		p1.Scond |= arm.C_SBIT
 		gins(arm.AADC, &bh, &ah)
 		regfree(&bl)
@@ -158,14 +150,16 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		// TODO: Constants.
 	case gc.OSUB:
+		var bl gc.Node
 		regalloc(&bl, gc.Types[gc.TPTR32], nil)
 
+		var bh gc.Node
 		regalloc(&bh, gc.Types[gc.TPTR32], nil)
 		gins(arm.AMOVW, &lo1, &al)
 		gins(arm.AMOVW, &hi1, &ah)
 		gins(arm.AMOVW, &lo2, &bl)
 		gins(arm.AMOVW, &hi2, &bh)
-		p1 = gins(arm.ASUB, &bl, &al)
+		p1 := gins(arm.ASUB, &bl, &al)
 		p1.Scond |= arm.C_SBIT
 		gins(arm.ASBC, &bh, &ah)
 		regfree(&bl)
@@ -173,10 +167,14 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		// TODO(kaib): this can be done with 4 regs and does not need 6
 	case gc.OMUL:
+		var bl gc.Node
 		regalloc(&bl, gc.Types[gc.TPTR32], nil)
 
+		var bh gc.Node
 		regalloc(&bh, gc.Types[gc.TPTR32], nil)
+		var cl gc.Node
 		regalloc(&cl, gc.Types[gc.TPTR32], nil)
+		var ch gc.Node
 		regalloc(&ch, gc.Types[gc.TPTR32], nil)
 
 		// load args into bh:bl and bh:bl.
@@ -187,7 +185,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		gins(arm.AMOVW, &lo2, &cl)
 
 		// bl * cl -> ah al
-		p1 = gins(arm.AMULLU, nil, nil)
+		p1 := gins(arm.AMULLU, nil, nil)
 
 		p1.From.Type = obj.TYPE_REG
 		p1.From.Reg = bl.Val.U.Reg
@@ -239,9 +237,11 @@ func cgen64(n *gc.Node, res *gc.Node) {
 	//	shld hi:lo, c
 	//	shld lo:t, c
 	case gc.OLROT:
-		v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+		v := uint64(gc.Mpgetfix(r.Val.U.Xval))
 
+		var bl gc.Node
 		regalloc(&bl, lo1.Type, nil)
+		var bh gc.Node
 		regalloc(&bh, hi1.Type, nil)
 		if v >= 32 {
 			// reverse during load to do the first 32 bits of rotate
@@ -274,13 +274,24 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		regfree(&bh)
 
 	case gc.OLSH:
+		var bl gc.Node
 		regalloc(&bl, lo1.Type, nil)
+		var bh gc.Node
 		regalloc(&bh, hi1.Type, nil)
 		gins(arm.AMOVW, &hi1, &bh)
 		gins(arm.AMOVW, &lo1, &bl)
 
+		var p6 *obj.Prog
+		var s gc.Node
+		var n1 gc.Node
+		var creg gc.Node
+		var p1 *obj.Prog
+		var p2 *obj.Prog
+		var p3 *obj.Prog
+		var p4 *obj.Prog
+		var p5 *obj.Prog
 		if r.Op == gc.OLITERAL {
-			v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
 			if v >= 64 {
 				// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
 				// here and below (verify it optimizes to EOR)
@@ -316,6 +327,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		regalloc(&creg, gc.Types[gc.TUINT32], nil)
 		if gc.Is64(r.Type) {
 			// shift is >= 1<<32
+			var cl gc.Node
+			var ch gc.Node
 			split64(r, &cl, &ch)
 
 			gmove(&ch, &s)
@@ -422,13 +435,24 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		regfree(&bh)
 
 	case gc.ORSH:
+		var bl gc.Node
 		regalloc(&bl, lo1.Type, nil)
+		var bh gc.Node
 		regalloc(&bh, hi1.Type, nil)
 		gins(arm.AMOVW, &hi1, &bh)
 		gins(arm.AMOVW, &lo1, &bl)
 
+		var p4 *obj.Prog
+		var p5 *obj.Prog
+		var n1 gc.Node
+		var p6 *obj.Prog
+		var s gc.Node
+		var p1 *obj.Prog
+		var p2 *obj.Prog
+		var creg gc.Node
+		var p3 *obj.Prog
 		if r.Op == gc.OLITERAL {
-			v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
 			if v >= 64 {
 				if bh.Type.Etype == gc.TINT32 {
 					//	MOVW	bh->31, al
@@ -487,10 +511,13 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		regalloc(&creg, gc.Types[gc.TUINT32], nil)
 		if gc.Is64(r.Type) {
 			// shift is >= 1<<32
+			var ch gc.Node
+			var cl gc.Node
 			split64(r, &cl, &ch)
 
 			gmove(&ch, &s)
 			gins(arm.ATST, &s, nil)
+			var p1 *obj.Prog
 			if bh.Type.Etype == gc.TINT32 {
 				p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
 			} else {
@@ -578,12 +605,12 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		if bh.Type.Etype == gc.TINT32 {
 			//	MOVW	bh->(s-32), al
-			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
+			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
 
 			p1.Scond = arm.C_SCOND_LO
 		} else {
 			//	MOVW	bh>>(v-32), al
-			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
+			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
 
 			p1.Scond = arm.C_SCOND_LO
 		}
@@ -708,6 +735,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 	case gc.OXOR,
 		gc.OAND,
 		gc.OOR:
+		var n1 gc.Node
 		regalloc(&n1, lo1.Type, nil)
 
 		gins(arm.AMOVW, &lo1, &al)
@@ -746,15 +774,13 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
 	var hi2 gc.Node
 	var r1 gc.Node
 	var r2 gc.Node
-	var br *obj.Prog
-	var t *gc.Type
 
 	split64(nl, &lo1, &hi1)
 	split64(nr, &lo2, &hi2)
 
 	// compare most significant word;
 	// if they differ, we're done.
-	t = hi1.Type
+	t := hi1.Type
 
 	regalloc(&r1, gc.Types[gc.TINT32], nil)
 	regalloc(&r2, gc.Types[gc.TINT32], nil)
@@ -764,7 +790,7 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
 	regfree(&r1)
 	regfree(&r2)
 
-	br = nil
+	br := (*obj.Prog)(nil)
 	switch op {
 	default:
 		gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
diff --git a/src/cmd/5g/ggen.go b/src/cmd/5g/ggen.go
index 3b007d8484c35ce290db2e68726aba9784b5b071..8b7010f7728362a1c704d4426fab88de43e2a71b 100644
--- a/src/cmd/5g/ggen.go
+++ b/src/cmd/5g/ggen.go
@@ -11,30 +11,24 @@ import (
 import "cmd/internal/gc"
 
 func defframe(ptxt *obj.Prog) {
-	var frame uint32
-	var r0 uint32
-	var p *obj.Prog
-	var hi int64
-	var lo int64
-	var l *gc.NodeList
 	var n *gc.Node
 
 	// fill in argument size, stack size
 	ptxt.To.Type = obj.TYPE_TEXTSIZE
 
 	ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
-	frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
 	ptxt.To.Offset = int64(frame)
 
 	// insert code to contain ambiguously live variables
 	// so that garbage collector only sees initialized values
 	// when it looks for pointers.
-	p = ptxt
+	p := ptxt
 
-	hi = 0
-	lo = hi
-	r0 = 0
-	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+	hi := int64(0)
+	lo := hi
+	r0 := uint32(0)
+	for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Needzero == 0 {
 			continue
@@ -66,12 +60,7 @@ func defframe(ptxt *obj.Prog) {
 }
 
 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
-	var cnt int64
-	var i int64
-	var p1 *obj.Prog
-	var f *gc.Node
-
-	cnt = hi - lo
+	cnt := hi - lo
 	if cnt == 0 {
 		return p
 	}
@@ -81,14 +70,14 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Pr
 	}
 
 	if cnt < int64(4*gc.Widthptr) {
-		for i = 0; i < cnt; i += int64(gc.Widthptr) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
 			p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
 		}
 	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
 		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
 		p.Reg = arm.REGSP
 		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
-		f = gc.Sysfunc("duffzero")
+		f := gc.Sysfunc("duffzero")
 		gc.Naddr(f, &p.To, 1)
 		gc.Afunclit(&p.To, f)
 		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
@@ -98,7 +87,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Pr
 		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
 		p.Reg = arm.REG_R1
 		p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
-		p1 = p
+		p1 := p
 		p.Scond |= arm.C_PBIT
 		p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
 		p.Reg = arm.REG_R2
@@ -110,9 +99,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Pr
 }
 
 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
-	var q *obj.Prog
-
-	q = gc.Ctxt.NewProg()
+	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
 	q.As = int16(as)
 	q.Lineno = p.Lineno
@@ -137,14 +124,8 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int
   *	proc=3	normal call to C pointer (not Go func value)
 */
 func ginscall(f *gc.Node, proc int) {
-	var p *obj.Prog
-	var r gc.Node
-	var r1 gc.Node
-	var con gc.Node
-	var extra int32
-
 	if f.Type != nil {
-		extra = 0
+		extra := int32(0)
 		if proc == 1 || proc == 2 {
 			extra = 2 * int32(gc.Widthptr)
 		}
@@ -168,13 +149,14 @@ func ginscall(f *gc.Node, proc int) {
 				// ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
 				// Use the latter form because the NOP pseudo-instruction
 				// would be removed by the linker.
+				var r gc.Node
 				gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
 
-				p = gins(arm.AAND, &r, &r)
+				p := gins(arm.AAND, &r, &r)
 				p.Scond = arm.C_SCOND_EQ
 			}
 
-			p = gins(arm.ABL, nil, f)
+			p := gins(arm.ABL, nil, f)
 			gc.Afunclit(&p.To, f)
 			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
@@ -182,7 +164,9 @@ func ginscall(f *gc.Node, proc int) {
 			break
 		}
 
+		var r gc.Node
 		gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
 		gmove(f, &r)
 		r.Op = gc.OINDREG
@@ -196,11 +180,13 @@ func ginscall(f *gc.Node, proc int) {
 
 	case 1, // call in new proc (go)
 		2: // deferred call (defer)
+		var r gc.Node
 		regalloc(&r, gc.Types[gc.Tptr], nil)
 
+		var con gc.Node
 		gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
 		gins(arm.AMOVW, &con, &r)
-		p = gins(arm.AMOVW, &r, nil)
+		p := gins(arm.AMOVW, &r, nil)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = arm.REGSP
 		p.To.Offset = 4
@@ -221,7 +207,7 @@ func ginscall(f *gc.Node, proc int) {
 
 		if proc == 2 {
 			gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
-			p = gins(arm.ACMP, &con, nil)
+			p := gins(arm.ACMP, &con, nil)
 			p.Reg = arm.REG_R0
 			p = gc.Gbranch(arm.ABEQ, nil, +1)
 			cgen_ret(nil)
@@ -235,21 +221,12 @@ func ginscall(f *gc.Node, proc int) {
  * generate res = n.
  */
 func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
-	var r int
-	var i *gc.Node
-	var f *gc.Node
-	var tmpi gc.Node
-	var nodo gc.Node
-	var nodr gc.Node
-	var nodsp gc.Node
-	var p *obj.Prog
-
-	i = n.Left
+	i := n.Left
 	if i.Op != gc.ODOTINTER {
 		gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
 	}
 
-	f = i.Right // field
+	f := i.Right // field
 	if f.Op != gc.ONAME {
 		gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
 	}
@@ -258,7 +235,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 
 	// Release res register during genlist and cgen,
 	// which might have their own function calls.
-	r = -1
+	r := -1
 
 	if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
 		r = int(res.Val.U.Reg)
@@ -266,6 +243,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	}
 
 	if i.Addable == 0 {
+		var tmpi gc.Node
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -276,12 +254,15 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 		reg[r]++
 	}
 
+	var nodr gc.Node
 	regalloc(&nodr, gc.Types[gc.Tptr], res)
+	var nodo gc.Node
 	regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
 	nodo.Op = gc.OINDREG
 
 	agen(i, &nodr) // REG = &inter
 
+	var nodsp gc.Node
 	gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
 
 	nodsp.Xoffset = int64(gc.Widthptr)
@@ -305,7 +286,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 		proc = 3
 	} else {
 		// go/defer. generate go func value.
-		p = gins(arm.AMOVW, &nodo, &nodr)
+		p := gins(arm.AMOVW, &nodo, &nodr)
 
 		p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
 	}
@@ -324,14 +305,11 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
  *	proc=2	defer call save away stack
  */
 func cgen_call(n *gc.Node, proc int) {
-	var t *gc.Type
-	var nod gc.Node
-	var afun gc.Node
-
 	if n == nil {
 		return
 	}
 
+	var afun gc.Node
 	if n.Left.Ullman >= gc.UINF {
 		// if name involves a fn call
 		// precompute the address of the fn
@@ -341,10 +319,11 @@ func cgen_call(n *gc.Node, proc int) {
 	}
 
 	gc.Genlist(n.List) // assign the args
-	t = n.Left.Type
+	t := n.Left.Type
 
 	// call tempname pointer
 	if n.Left.Ullman >= gc.UINF {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, &afun)
 		nod.Type = t
@@ -355,6 +334,7 @@ func cgen_call(n *gc.Node, proc int) {
 
 	// call pointer
 	if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, n.Left)
 		nod.Type = t
@@ -377,22 +357,18 @@ ret:
  *	res = return value from call.
  */
 func cgen_callret(n *gc.Node, res *gc.Node) {
-	var nod gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_callret: nil")
 	}
 
-	nod = gc.Node{}
+	nod := gc.Node{}
 	nod.Op = gc.OINDREG
 	nod.Val.U.Reg = arm.REGSP
 	nod.Addable = 1
@@ -408,23 +384,18 @@ func cgen_callret(n *gc.Node, res *gc.Node) {
  *	res = &return value from call.
  */
 func cgen_aret(n *gc.Node, res *gc.Node) {
-	var nod1 gc.Node
-	var nod2 gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if gc.Isptr[t.Etype] != 0 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_aret: nil")
 	}
 
-	nod1 = gc.Node{}
+	nod1 := gc.Node{}
 	nod1.Op = gc.OINDREG
 	nod1.Val.U.Reg = arm.REGSP
 	nod1.Addable = 1
@@ -433,6 +404,7 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
 	nod1.Type = fp.Type
 
 	if res.Op != gc.OREGISTER {
+		var nod2 gc.Node
 		regalloc(&nod2, gc.Types[gc.Tptr], res)
 		agen(&nod1, &nod2)
 		gins(arm.AMOVW, &nod2, res)
@@ -447,8 +419,6 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
  * n->left is assignments to return values.
  */
 func cgen_ret(n *gc.Node) {
-	var p *obj.Prog
-
 	if n != nil {
 		gc.Genlist(n.List) // copy out args
 	}
@@ -456,7 +426,7 @@ func cgen_ret(n *gc.Node) {
 		ginscall(gc.Deferreturn, 0)
 	}
 	gc.Genlist(gc.Curfn.Exit)
-	p = gins(obj.ARET, nil, nil)
+	p := gins(obj.ARET, nil, nil)
 	if n != nil && n.Op == gc.ORETJMP {
 		p.To.Name = obj.NAME_EXTERN
 		p.To.Type = obj.TYPE_ADDR
@@ -469,23 +439,18 @@ func cgen_ret(n *gc.Node) {
  *  res = (nl * nr) >> wordsize
  */
 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var w int
-	var n1 gc.Node
-	var n2 gc.Node
-	var tmp *gc.Node
-	var t *gc.Type
-	var p *obj.Prog
-
 	if nl.Ullman < nr.Ullman {
-		tmp = nl
+		tmp := nl
 		nl = nr
 		nr = tmp
 	}
 
-	t = nl.Type
-	w = int(t.Width * 8)
+	t := nl.Type
+	w := int(t.Width * 8)
+	var n1 gc.Node
 	regalloc(&n1, t, res)
 	cgen(nl, &n1)
+	var n2 gc.Node
 	regalloc(&n2, t, nil)
 	cgen(nr, &n2)
 	switch gc.Simtype[t.Etype] {
@@ -502,6 +467,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		// perform a long multiplication.
 	case gc.TINT32,
 		gc.TUINT32:
+		var p *obj.Prog
 		if gc.Issigned[t.Etype] != 0 {
 			p = gins(arm.AMULL, &n2, nil)
 		} else {
@@ -530,34 +496,21 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
  *	res = nl >> nr
  */
 func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var nt gc.Node
-	var t gc.Node
-	var lo gc.Node
-	var hi gc.Node
-	var w int
-	var v int
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var tr *gc.Type
-	var sc uint64
-
 	if nl.Type.Width > 4 {
 		gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
 	}
 
-	w = int(nl.Type.Width * 8)
+	w := int(nl.Type.Width * 8)
 
 	if op == gc.OLROT {
-		v = int(gc.Mpgetfix(nr.Val.U.Xval))
+		v := int(gc.Mpgetfix(nr.Val.U.Xval))
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		if w == 32 {
 			cgen(nl, &n1)
 			gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
 		} else {
+			var n2 gc.Node
 			regalloc(&n2, nl.Type, nil)
 			cgen(nl, &n2)
 			gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
@@ -574,9 +527,10 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	}
 
 	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		cgen(nl, &n1)
-		sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+		sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
 		if sc == 0 {
 		} else // nothing to do
 		if sc >= uint64(nl.Type.Width*8) {
@@ -603,8 +557,13 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		return
 	}
 
-	tr = nr.Type
+	tr := nr.Type
+	var t gc.Node
+	var n1 gc.Node
+	var n2 gc.Node
+	var n3 gc.Node
 	if tr.Width > 4 {
+		var nt gc.Node
 		gc.Tempname(&nt, nr.Type)
 		if nl.Ullman >= nr.Ullman {
 			regalloc(&n2, nl.Type, res)
@@ -617,6 +576,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 			cgen(nl, &n2)
 		}
 
+		var hi gc.Node
+		var lo gc.Node
 		split64(&nt, &lo, &hi)
 		regalloc(&n1, gc.Types[gc.TUINT32], nil)
 		regalloc(&n3, gc.Types[gc.TUINT32], nil)
@@ -625,7 +586,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		splitclean()
 		gins(arm.ATST, &n3, nil)
 		gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
-		p1 = gins(arm.AMOVW, &t, &n1)
+		p1 := gins(arm.AMOVW, &t, &n1)
 		p1.Scond = arm.C_SCOND_NE
 		tr = gc.Types[gc.TUINT32]
 		regfree(&n3)
@@ -646,7 +607,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// test for shift being 0
 	gins(arm.ATST, &n1, nil)
 
-	p3 = gc.Gbranch(arm.ABEQ, nil, -1)
+	p3 := gc.Gbranch(arm.ABEQ, nil, -1)
 
 	// test and fix up large shifts
 	// TODO: if(!bounded), don't emit some of this.
@@ -656,6 +617,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	gmove(&t, &n3)
 	gcmp(arm.ACMP, &n1, &n3)
 	if op == gc.ORSH {
+		var p1 *obj.Prog
+		var p2 *obj.Prog
 		if gc.Issigned[nl.Type.Etype] != 0 {
 			p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
 			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
@@ -667,8 +630,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		p1.Scond = arm.C_SCOND_HS
 		p2.Scond = arm.C_SCOND_LO
 	} else {
-		p1 = gins(arm.AEOR, &n2, &n2)
-		p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
+		p1 := gins(arm.AEOR, &n2, &n2)
+		p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
 		p1.Scond = arm.C_SCOND_HS
 		p2.Scond = arm.C_SCOND_LO
 	}
@@ -688,48 +651,41 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 }
 
 func clearfat(nl *gc.Node) {
-	var w uint32
-	var c uint32
-	var q uint32
-	var dst gc.Node
-	var nc gc.Node
-	var nz gc.Node
-	var end gc.Node
-	var r0 gc.Node
-	var r1 gc.Node
-	var f *gc.Node
-	var p *obj.Prog
-	var pl *obj.Prog
-
 	/* clear a fat object */
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nclearfat", nl)
 	}
 
-	w = uint32(nl.Type.Width)
+	w := uint32(nl.Type.Width)
 
 	// Avoid taking the address for simple enough types.
 	if componentgen(nil, nl) {
 		return
 	}
 
-	c = w % 4 // bytes
-	q = w / 4 // quads
+	c := w % 4 // bytes
+	q := w / 4 // quads
 
+	var r0 gc.Node
 	r0.Op = gc.OREGISTER
 
 	r0.Val.U.Reg = REGALLOC_R0
+	var r1 gc.Node
 	r1.Op = gc.OREGISTER
 	r1.Val.U.Reg = REGALLOC_R0 + 1
+	var dst gc.Node
 	regalloc(&dst, gc.Types[gc.Tptr], &r1)
 	agen(nl, &dst)
+	var nc gc.Node
 	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
+	var nz gc.Node
 	regalloc(&nz, gc.Types[gc.TUINT32], &r0)
 	cgen(&nc, &nz)
 
 	if q > 128 {
+		var end gc.Node
 		regalloc(&end, gc.Types[gc.Tptr], nil)
-		p = gins(arm.AMOVW, &dst, &end)
+		p := gins(arm.AMOVW, &dst, &end)
 		p.From.Type = obj.TYPE_ADDR
 		p.From.Offset = int64(q) * 4
 
@@ -737,7 +693,7 @@ func clearfat(nl *gc.Node) {
 		p.To.Type = obj.TYPE_MEM
 		p.To.Offset = 4
 		p.Scond |= arm.C_PBIT
-		pl = p
+		pl := p
 
 		p = gins(arm.ACMP, &dst, nil)
 		raddr(&end, p)
@@ -745,13 +701,14 @@ func clearfat(nl *gc.Node) {
 
 		regfree(&end)
 	} else if q >= 4 && !gc.Nacl {
-		f = gc.Sysfunc("duffzero")
-		p = gins(obj.ADUFFZERO, nil, f)
+		f := gc.Sysfunc("duffzero")
+		p := gins(obj.ADUFFZERO, nil, f)
 		gc.Afunclit(&p.To, f)
 
 		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
 		p.To.Offset = 4 * (128 - int64(q))
 	} else {
+		var p *obj.Prog
 		for q > 0 {
 			p = gins(arm.AMOVW, &nz, &dst)
 			p.To.Type = obj.TYPE_MEM
@@ -763,6 +720,7 @@ func clearfat(nl *gc.Node) {
 		}
 	}
 
+	var p *obj.Prog
 	for c > 0 {
 		p = gins(arm.AMOVB, &nz, &dst)
 		p.To.Type = obj.TYPE_MEM
@@ -781,10 +739,9 @@ func clearfat(nl *gc.Node) {
 // Expand CHECKNIL pseudo-op into actual nil pointer check.
 func expandchecks(firstp *obj.Prog) {
 	var reg int
-	var p *obj.Prog
 	var p1 *obj.Prog
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if p.As != obj.ACHECKNIL {
 			continue
 		}
diff --git a/src/cmd/5g/gsubr.go b/src/cmd/5g/gsubr.go
index 857bafaf6483fdd2b50628b0cb1e69adf5274142..a8b8ed554d77466b250c27283cbe21802dada519 100644
--- a/src/cmd/5g/gsubr.go
+++ b/src/cmd/5g/gsubr.go
@@ -49,24 +49,20 @@ var resvd = []int{
 }
 
 func ginit() {
-	var i int
-
-	for i = 0; i < len(reg); i++ {
+	for i := 0; i < len(reg); i++ {
 		reg[i] = 0
 	}
-	for i = 0; i < len(resvd); i++ {
+	for i := 0; i < len(resvd); i++ {
 		reg[resvd[i]]++
 	}
 }
 
 func gclean() {
-	var i int
-
-	for i = 0; i < len(resvd); i++ {
+	for i := 0; i < len(resvd); i++ {
 		reg[resvd[i]]--
 	}
 
-	for i = 0; i < len(reg); i++ {
+	for i := 0; i < len(reg); i++ {
 		if reg[i] != 0 {
 			gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
 		}
@@ -74,10 +70,9 @@ func gclean() {
 }
 
 func anyregalloc() bool {
-	var i int
 	var j int
 
-	for i = 0; i < len(reg); i++ {
+	for i := 0; i < len(reg); i++ {
 		if reg[i] == 0 {
 			goto ok
 		}
@@ -101,20 +96,15 @@ var regpc [REGALLOC_FMAX + 1]uint32
  * caller must regfree(n).
  */
 func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
-	var i int
-	var et int
-	var fixfree int
-	var floatfree int
-
 	if false && gc.Debug['r'] != 0 {
-		fixfree = 0
-		for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+		fixfree := 0
+		for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
 			if reg[i] == 0 {
 				fixfree++
 			}
 		}
-		floatfree = 0
-		for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+		floatfree := 0
+		for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
 			if reg[i] == 0 {
 				floatfree++
 			}
@@ -125,11 +115,12 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 	if t == nil {
 		gc.Fatal("regalloc: t nil")
 	}
-	et = int(gc.Simtype[t.Etype])
+	et := int(gc.Simtype[t.Etype])
 	if gc.Is64(t) {
 		gc.Fatal("regalloc: 64 bit type %v")
 	}
 
+	var i int
 	switch et {
 	case gc.TINT8,
 		gc.TUINT8,
@@ -154,7 +145,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 		}
 
 		fmt.Printf("registers allocated at\n")
-		for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+		for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
 			fmt.Printf("%d %p\n", i, regpc[i])
 		}
 		gc.Fatal("out of fixed registers")
@@ -195,19 +186,15 @@ out:
 }
 
 func regfree(n *gc.Node) {
-	var i int
-	var fixfree int
-	var floatfree int
-
 	if false && gc.Debug['r'] != 0 {
-		fixfree = 0
-		for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+		fixfree := 0
+		for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
 			if reg[i] == 0 {
 				fixfree++
 			}
 		}
-		floatfree = 0
-		for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+		floatfree := 0
+		for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
 			if reg[i] == 0 {
 				floatfree++
 			}
@@ -221,7 +208,7 @@ func regfree(n *gc.Node) {
 	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
 		gc.Fatal("regfree: not a register")
 	}
-	i = int(n.Val.U.Reg)
+	i := int(n.Val.U.Reg)
 	if i == arm.REGSP {
 		return
 	}
@@ -260,9 +247,6 @@ var nsclean int
  * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
  */
 func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
-	var n1 gc.Node
-	var i int64
-
 	if !gc.Is64(n.Type) {
 		gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
 	}
@@ -276,6 +260,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
 	default:
 		switch n.Op {
 		default:
+			var n1 gc.Node
 			if !dotaddable(n, &n1) {
 				igen(n, &n1, nil)
 				sclean[nsclean-1] = n1
@@ -285,6 +270,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
 
 		case gc.ONAME:
 			if n.Class == gc.PPARAMREF {
+				var n1 gc.Node
 				cgen(n.Heapaddr, &n1)
 				sclean[nsclean-1] = n1
 				n = &n1
@@ -306,8 +292,9 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
 		hi.Xoffset += 4
 
 	case gc.OLITERAL:
+		var n1 gc.Node
 		gc.Convconst(&n1, n.Type, &n.Val)
-		i = gc.Mpgetfix(n1.Val.U.Xval)
+		i := gc.Mpgetfix(n1.Val.U.Xval)
 		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
 		i >>= 32
 		if n.Type.Etype == gc.TINT64 {
@@ -329,28 +316,13 @@ func splitclean() {
 }
 
 func gmove(f *gc.Node, t *gc.Node) {
-	var a int
-	var ft int
-	var tt int
-	var fa int
-	var ta int
-	var cvt *gc.Type
-	var r1 gc.Node
-	var r2 gc.Node
-	var flo gc.Node
-	var fhi gc.Node
-	var tlo gc.Node
-	var thi gc.Node
-	var con gc.Node
-	var p1 *obj.Prog
-
 	if gc.Debug['M'] != 0 {
 		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
 	}
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
-	cvt = t.Type
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
 
 	if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
 		gc.Complexmove(f, t)
@@ -359,19 +331,26 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 	// cannot have two memory operands;
 	// except 64-bit, which always copies via registers anyway.
+	var flo gc.Node
+	var a int
+	var r1 gc.Node
+	var fhi gc.Node
 	if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
 	// convert constant to desired type
 	if f.Op == gc.OLITERAL {
+		var con gc.Node
 		switch tt {
 		default:
 			gc.Convconst(&con, t.Type, &f.Val)
 
 		case gc.TINT16,
 			gc.TINT8:
+			var con gc.Node
 			gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val)
+			var r1 gc.Node
 			regalloc(&r1, con.Type, t)
 			gins(arm.AMOVW, &con, &r1)
 			gmove(&r1, t)
@@ -380,7 +359,9 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 		case gc.TUINT16,
 			gc.TUINT8:
+			var con gc.Node
 			gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val)
+			var r1 gc.Node
 			regalloc(&r1, con.Type, t)
 			gins(arm.AMOVW, &con, &r1)
 			gmove(&r1, t)
@@ -495,8 +476,11 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TUINT64<<16 | gc.TINT32,
 		gc.TINT64<<16 | gc.TUINT32,
 		gc.TUINT64<<16 | gc.TUINT32:
+		var flo gc.Node
+		var fhi gc.Node
 		split64(f, &flo, &fhi)
 
+		var r1 gc.Node
 		regalloc(&r1, t.Type, nil)
 		gins(arm.AMOVW, &flo, &r1)
 		gins(arm.AMOVW, &r1, t)
@@ -508,10 +492,16 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TINT64<<16 | gc.TUINT64,
 		gc.TUINT64<<16 | gc.TINT64,
 		gc.TUINT64<<16 | gc.TUINT64:
+		var fhi gc.Node
+		var flo gc.Node
 		split64(f, &flo, &fhi)
 
+		var tlo gc.Node
+		var thi gc.Node
 		split64(t, &tlo, &thi)
+		var r1 gc.Node
 		regalloc(&r1, flo.Type, nil)
+		var r2 gc.Node
 		regalloc(&r2, fhi.Type, nil)
 		gins(arm.AMOVW, &flo, &r1)
 		gins(arm.AMOVW, &fhi, &r2)
@@ -580,12 +570,16 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
 		gc.TINT32<<16 | gc.TUINT64:
+		var tlo gc.Node
+		var thi gc.Node
 		split64(t, &tlo, &thi)
 
+		var r1 gc.Node
 		regalloc(&r1, tlo.Type, nil)
+		var r2 gc.Node
 		regalloc(&r2, thi.Type, nil)
 		gmove(f, &r1)
-		p1 = gins(arm.AMOVW, &r1, &r2)
+		p1 := gins(arm.AMOVW, &r1, &r2)
 		p1.From.Type = obj.TYPE_SHIFT
 		p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Val.U.Reg)&15 // r1->31
 		p1.From.Reg = 0
@@ -601,9 +595,12 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
 		gc.TUINT32<<16 | gc.TUINT64:
+		var thi gc.Node
+		var tlo gc.Node
 		split64(t, &tlo, &thi)
 
 		gmove(f, &tlo)
+		var r1 gc.Node
 		regalloc(&r1, thi.Type, nil)
 		gins(arm.AMOVW, ncon(0), &r1)
 		gins(arm.AMOVW, &r1, &thi)
@@ -630,15 +627,15 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TFLOAT64<<16 | gc.TUINT16,
 		gc.TFLOAT64<<16 | gc.TINT32,
 		gc.TFLOAT64<<16 | gc.TUINT32:
-		fa = arm.AMOVF
+		fa := arm.AMOVF
 
-		a = arm.AMOVFW
+		a := arm.AMOVFW
 		if ft == gc.TFLOAT64 {
 			fa = arm.AMOVD
 			a = arm.AMOVDW
 		}
 
-		ta = arm.AMOVW
+		ta := arm.AMOVW
 		switch tt {
 		case gc.TINT8:
 			ta = arm.AMOVBS
@@ -653,10 +650,12 @@ func gmove(f *gc.Node, t *gc.Node) {
 			ta = arm.AMOVHU
 		}
 
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[ft], f)
+		var r2 gc.Node
 		regalloc(&r2, gc.Types[tt], t)
-		gins(fa, f, &r1)       // load to fpu
-		p1 = gins(a, &r1, &r1) // convert to w
+		gins(fa, f, &r1)        // load to fpu
+		p1 := gins(a, &r1, &r1) // convert to w
 		switch tt {
 		case gc.TUINT8,
 			gc.TUINT16,
@@ -685,7 +684,7 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TUINT16<<16 | gc.TFLOAT64,
 		gc.TINT32<<16 | gc.TFLOAT64,
 		gc.TUINT32<<16 | gc.TFLOAT64:
-		fa = arm.AMOVW
+		fa := arm.AMOVW
 
 		switch ft {
 		case gc.TINT8:
@@ -701,18 +700,20 @@ func gmove(f *gc.Node, t *gc.Node) {
 			fa = arm.AMOVHU
 		}
 
-		a = arm.AMOVWF
-		ta = arm.AMOVF
+		a := arm.AMOVWF
+		ta := arm.AMOVF
 		if tt == gc.TFLOAT64 {
 			a = arm.AMOVWD
 			ta = arm.AMOVD
 		}
 
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[ft], f)
+		var r2 gc.Node
 		regalloc(&r2, gc.Types[tt], t)
 		gins(fa, f, &r1)          // load to cpu
 		gins(arm.AMOVW, &r1, &r2) // copy to fpu
-		p1 = gins(a, &r2, &r2)    // convert
+		p1 := gins(a, &r2, &r2)   // convert
 		switch ft {
 		case gc.TUINT8,
 			gc.TUINT16,
@@ -740,6 +741,7 @@ func gmove(f *gc.Node, t *gc.Node) {
 		a = arm.AMOVD
 
 	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[gc.TFLOAT64], t)
 		gins(arm.AMOVF, f, &r1)
 		gins(arm.AMOVFD, &r1, &r1)
@@ -748,6 +750,7 @@ func gmove(f *gc.Node, t *gc.Node) {
 		return
 
 	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[gc.TFLOAT64], t)
 		gins(arm.AMOVD, f, &r1)
 		gins(arm.AMOVDF, &r1, &r1)
@@ -816,13 +819,9 @@ func samaddr(f *gc.Node, t *gc.Node) bool {
  *	as f, t
  */
 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
-	var p *obj.Prog
-	var af obj.Addr
 	//	Node nod;
 	//	int32 v;
 
-	var at obj.Addr
-
 	if f != nil && f.Op == gc.OINDEX {
 		gc.Fatal("gins OINDEX not implemented")
 	}
@@ -843,16 +842,16 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 	//		constnode.vconst = v;
 	//		idx.reg = nod.reg;
 	//		regfree(&nod);
-	af = obj.Addr{}
+	af := obj.Addr{}
 
-	at = obj.Addr{}
+	at := obj.Addr{}
 	if f != nil {
 		gc.Naddr(f, &af, 1)
 	}
 	if t != nil {
 		gc.Naddr(t, &at, 1)
 	}
-	p = gc.Prog(as)
+	p := gc.Prog(as)
 	if f != nil {
 		p.From = af
 	}
@@ -888,13 +887,11 @@ func raddr(n *gc.Node, p *obj.Prog) {
 TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
 */
 func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
-	var p *obj.Prog
-
 	if lhs.Op != gc.OREGISTER {
 		gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
 	}
 
-	p = gins(as, rhs, nil)
+	p := gins(as, rhs, nil)
 	raddr(lhs, p)
 	return p
 }
@@ -903,15 +900,13 @@ func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
  * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
  */
 func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
-	var p *obj.Prog
-
 	if sval <= 0 || sval > 32 {
 		gc.Fatal("bad shift value: %d", sval)
 	}
 
 	sval = sval & 0x1f
 
-	p = gins(as, nil, rhs)
+	p := gins(as, nil, rhs)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Val.U.Reg)&15
 	return p
@@ -920,8 +915,7 @@ func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Pr
 /* generate a register shift
  */
 func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
-	var p *obj.Prog
-	p = gins(as, nil, rhs)
+	p := gins(as, nil, rhs)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(stype) | (int64(reg.Val.U.Reg)&15)<<8 | 1<<4 | int64(lhs.Val.U.Reg)&15
 	return p
@@ -931,13 +925,11 @@ func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *o
  * return Axxx for Oxxx on type t.
  */
 func optoas(op int, t *gc.Type) int {
-	var a int
-
 	if t == nil {
 		gc.Fatal("optoas: t is nil")
 	}
 
-	a = obj.AXXX
+	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0), gc.Tconv(gc.Types[t.Etype], 0), gc.Tconv(gc.Types[gc.Simtype[t.Etype]], 0))
@@ -1246,15 +1238,13 @@ func sudoclean() {
 }
 
 func dotaddable(n *gc.Node, n1 *gc.Node) bool {
-	var o int
-	var oary [10]int64
-	var nn *gc.Node
-
 	if n.Op != gc.ODOT {
 		return false
 	}
 
-	o = gc.Dotoffset(n, oary[:], &nn)
+	var oary [10]int64
+	var nn *gc.Node
+	o := gc.Dotoffset(n, oary[:], &nn)
 	if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
 		*n1 = *nn
 		n1.Type = n.Type
@@ -1277,35 +1267,24 @@ func dotaddable(n *gc.Node, n1 *gc.Node) bool {
  * to release the register used for a.
  */
 func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
-	var o int
-	var i int
-	var oary [10]int64
-	var v int64
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var n4 gc.Node
-	var nn *gc.Node
-	var l *gc.Node
-	var r *gc.Node
-	var reg *gc.Node
-	var reg1 *gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var t *gc.Type
-
 	if n.Type == nil {
 		return false
 	}
 
 	*a = obj.Addr{}
 
+	var oary [10]int64
+	var nn *gc.Node
+	var reg *gc.Node
+	var n1 gc.Node
+	var reg1 *gc.Node
+	var o int
 	switch n.Op {
 	case gc.OLITERAL:
 		if !gc.Isconst(n, gc.CTINT) {
 			break
 		}
-		v = gc.Mpgetfix(n.Val.U.Xval)
+		v := gc.Mpgetfix(n.Val.U.Xval)
 		if v >= 32000 || v <= -32000 {
 			break
 		}
@@ -1315,25 +1294,13 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
 		gc.ODOTPTR:
 		cleani += 2
 		reg = &clean[cleani-1]
-		reg1 = &clean[cleani-2]
+		reg1 := &clean[cleani-2]
 		reg.Op = gc.OEMPTY
 		reg1.Op = gc.OEMPTY
 		goto odot
 
 	case gc.OINDEX:
 		return false
-
-		// disabled: OINDEX case is now covered by agenr
-		// for a more suitable register allocation pattern.
-		if n.Left.Type.Etype == gc.TSTRING {
-			return false
-		}
-		cleani += 2
-		reg = &clean[cleani-1]
-		reg1 = &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		goto oindex
 	}
 
 	return false
@@ -1374,7 +1341,7 @@ odot:
 
 	if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
 		// directly addressable set of DOTs
-		n1 = *nn
+		n1 := *nn
 
 		n1.Type = n.Type
 		n1.Xoffset += oary[0]
@@ -1394,7 +1361,7 @@ odot:
 		n1.Xoffset = -(oary[0] + 1)
 	}
 
-	for i = 1; i < o; i++ {
+	for i := 1; i < o; i++ {
 		if oary[i] >= 0 {
 			gc.Fatal("can't happen")
 		}
@@ -1409,187 +1376,6 @@ odot:
 	gc.Naddr(&n1, a, 1)
 	goto yes
 
-oindex:
-	l = n.Left
-	r = n.Right
-	if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
-		goto no
-	}
-
-	// set o to type of array
-	o = 0
-
-	if gc.Isptr[l.Type.Etype] != 0 {
-		o += OPtrto
-		if l.Type.Type.Etype != gc.TARRAY {
-			gc.Fatal("not ptr ary")
-		}
-		if l.Type.Type.Bound < 0 {
-			o += ODynam
-		}
-	} else {
-		if l.Type.Etype != gc.TARRAY {
-			gc.Fatal("not ary")
-		}
-		if l.Type.Bound < 0 {
-			o += ODynam
-		}
-	}
-
-	*w = int(n.Type.Width)
-	if gc.Isconst(r, gc.CTINT) {
-		goto oindex_const
-	}
-
-	switch *w {
-	default:
-		goto no
-
-	case 1,
-		2,
-		4,
-		8:
-		break
-	}
-
-	// load the array (reg)
-	if l.Ullman > r.Ullman {
-		regalloc(reg, gc.Types[gc.Tptr], nil)
-		if o&OPtrto != 0 {
-			cgen(l, reg)
-			gc.Cgen_checknil(reg)
-		} else {
-			agen(l, reg)
-		}
-	}
-
-	// load the index (reg1)
-	t = gc.Types[gc.TUINT32]
-
-	if gc.Issigned[r.Type.Etype] != 0 {
-		t = gc.Types[gc.TINT32]
-	}
-	regalloc(reg1, t, nil)
-	regalloc(&n3, gc.Types[gc.TINT32], reg1)
-	p2 = cgenindex(r, &n3, gc.Debug['B'] != 0 || n.Bounded)
-	gmove(&n3, reg1)
-	regfree(&n3)
-
-	// load the array (reg)
-	if l.Ullman <= r.Ullman {
-		regalloc(reg, gc.Types[gc.Tptr], nil)
-		if o&OPtrto != 0 {
-			cgen(l, reg)
-			gc.Cgen_checknil(reg)
-		} else {
-			agen(l, reg)
-		}
-	}
-
-	// check bounds
-	if gc.Debug['B'] == 0 {
-		if o&ODynam != 0 {
-			n2 = *reg
-			n2.Op = gc.OINDREG
-			n2.Type = gc.Types[gc.Tptr]
-			n2.Xoffset = int64(gc.Array_nel)
-		} else {
-			if o&OPtrto != 0 {
-				gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Type.Bound)
-			} else {
-				gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Bound)
-			}
-		}
-
-		regalloc(&n3, n2.Type, nil)
-		cgen(&n2, &n3)
-		gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), reg1, &n3)
-		regfree(&n3)
-		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-		if p2 != nil {
-			gc.Patch(p2, gc.Pc)
-		}
-		ginscall(gc.Panicindex, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	if o&ODynam != 0 {
-		n2 = *reg
-		n2.Op = gc.OINDREG
-		n2.Type = gc.Types[gc.Tptr]
-		n2.Xoffset = int64(gc.Array_array)
-		gmove(&n2, reg)
-	}
-
-	switch *w {
-	case 1:
-		gins(arm.AADD, reg1, reg)
-
-	case 2:
-		gshift(arm.AADD, reg1, arm.SHIFT_LL, 1, reg)
-
-	case 4:
-		gshift(arm.AADD, reg1, arm.SHIFT_LL, 2, reg)
-
-	case 8:
-		gshift(arm.AADD, reg1, arm.SHIFT_LL, 3, reg)
-	}
-
-	gc.Naddr(reg1, a, 1)
-	a.Type = obj.TYPE_MEM
-	a.Reg = reg.Val.U.Reg
-	a.Offset = 0
-	goto yes
-
-	// index is constant
-	// can check statically and
-	// can multiply by width statically
-
-oindex_const:
-	regalloc(reg, gc.Types[gc.Tptr], nil)
-
-	if o&OPtrto != 0 {
-		cgen(l, reg)
-		gc.Cgen_checknil(reg)
-	} else {
-		agen(l, reg)
-	}
-
-	v = gc.Mpgetfix(r.Val.U.Xval)
-	if o&ODynam != 0 {
-		if gc.Debug['B'] == 0 && !n.Bounded {
-			n1 = *reg
-			n1.Op = gc.OINDREG
-			n1.Type = gc.Types[gc.Tptr]
-			n1.Xoffset = int64(gc.Array_nel)
-			gc.Nodconst(&n2, gc.Types[gc.TUINT32], v)
-			regalloc(&n3, gc.Types[gc.TUINT32], nil)
-			cgen(&n2, &n3)
-			regalloc(&n4, n1.Type, nil)
-			cgen(&n1, &n4)
-			gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n3)
-			regfree(&n4)
-			regfree(&n3)
-			p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
-			ginscall(gc.Panicindex, 0)
-			gc.Patch(p1, gc.Pc)
-		}
-
-		n1 = *reg
-		n1.Op = gc.OINDREG
-		n1.Type = gc.Types[gc.Tptr]
-		n1.Xoffset = int64(gc.Array_array)
-		gmove(&n1, reg)
-	}
-
-	n2 = *reg
-	n2.Op = gc.OINDREG
-	n2.Xoffset = v * int64(*w)
-	a.Type = obj.TYPE_NONE
-	a.Name = obj.NAME_NONE
-	gc.Naddr(&n2, a, 1)
-	goto yes
-
 yes:
 	return true
 
diff --git a/src/cmd/5g/peep.go b/src/cmd/5g/peep.go
index 2fbb1e5285a2266ed8f07b9f92ec4d9a1066d077..e28ec024d873932d55d67a76d2310fbb0af1c44f 100644
--- a/src/cmd/5g/peep.go
+++ b/src/cmd/5g/peep.go
@@ -41,17 +41,15 @@ var gactive uint32
 
 // UNUSED
 func peep(firstp *obj.Prog) {
-	var r *gc.Flow
-	var g *gc.Graph
-	var p *obj.Prog
-	var t int
-
-	g = gc.Flowstart(firstp, nil)
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
 	if g == nil {
 		return
 	}
 	gactive = 0
 
+	var r *gc.Flow
+	var p *obj.Prog
+	var t int
 loop1:
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		gc.Dumpit("loop1", g.Start, 0)
@@ -121,7 +119,7 @@ loop1:
 		goto loop1
 	}
 
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		/*
@@ -141,7 +139,7 @@ loop1:
 		}
 	}
 
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		case arm.AMOVW,
@@ -250,22 +248,17 @@ func regtyp(a *obj.Addr) bool {
  * will be eliminated by copy propagation.
  */
 func subprop(r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-	var r *gc.Flow
-	var t int
-	var info gc.ProgInfo
-
-	p = r0.Prog
-	v1 = &p.From
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
 	if !regtyp(v1) {
 		return false
 	}
-	v2 = &p.To
+	v2 := (*obj.Addr)(&p.To)
 	if !regtyp(v2) {
 		return false
 	}
+	var r *gc.Flow
+	var info gc.ProgInfo
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Uniqs(r) == nil {
 			break
@@ -332,7 +325,7 @@ gotit:
 		}
 	}
 
-	t = int(v1.Reg)
+	t := int(int(v1.Reg))
 	v1.Reg = v2.Reg
 	v2.Reg = int16(t)
 	if gc.Debug['P'] != 0 {
@@ -354,13 +347,9 @@ gotit:
  *	set v2	return success
  */
 func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-
-	p = r0.Prog
-	v1 = &p.From
-	v2 = &p.To
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
 	if copyas(v1, v2) {
 		return true
 	}
@@ -369,9 +358,6 @@ func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
 }
 
 func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
-	var t int
-	var p *obj.Prog
-
 	if uint32(r.Active) == gactive {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
@@ -383,6 +369,8 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
 	}
+	var t int
+	var p *obj.Prog
 	for ; r != nil; r = r.S1 {
 		p = r.Prog
 		if gc.Debug['P'] != 0 {
@@ -473,11 +461,10 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
  * The v1->v2 should be eliminated by copy propagation.
  */
 func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
-	var p *obj.Prog
-
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
 	}
+	var p *obj.Prog
 	for ; r != nil; r = r.S1 {
 		p = r.Prog
 		if gc.Debug['P'] != 0 {
@@ -527,17 +514,13 @@ func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
  * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
  */
 func shortprop(r *gc.Flow) bool {
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var r1 *gc.Flow
-
-	p = r.Prog
-	r1 = findpre(r, &p.From)
+	p := (*obj.Prog)(r.Prog)
+	r1 := (*gc.Flow)(findpre(r, &p.From))
 	if r1 == nil {
 		return false
 	}
 
-	p1 = r1.Prog
+	p1 := (*obj.Prog)(r1.Prog)
 	if p1.As == p.As {
 		// Two consecutive extensions.
 		goto gotit
@@ -583,15 +566,7 @@ gotit:
  * ..
  */
 func shiftprop(r *gc.Flow) bool {
-	var r1 *gc.Flow
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var n int
-	var o int
-	var a obj.Addr
-
-	p = r.Prog
+	p := (*obj.Prog)(r.Prog)
 	if p.To.Type != obj.TYPE_REG {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
@@ -599,8 +574,8 @@ func shiftprop(r *gc.Flow) bool {
 		return false
 	}
 
-	n = int(p.To.Reg)
-	a = obj.Addr{}
+	n := int(int(p.To.Reg))
+	a := obj.Addr(obj.Addr{})
 	if p.Reg != 0 && p.Reg != p.To.Reg {
 		a.Type = obj.TYPE_REG
 		a.Reg = p.Reg
@@ -609,7 +584,8 @@ func shiftprop(r *gc.Flow) bool {
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("shiftprop\n%v", p)
 	}
-	r1 = r
+	r1 := (*gc.Flow)(r)
+	var p1 *obj.Prog
 	for {
 		/* find first use of shift result; abort if shift operands or result are changed */
 		r1 = gc.Uniqs(r1)
@@ -736,9 +712,10 @@ func shiftprop(r *gc.Flow) bool {
 	}
 
 	/* check whether shift result is used subsequently */
-	p2 = p1
+	p2 := (*obj.Prog)(p1)
 
 	if int(p1.To.Reg) != n {
+		var p1 *obj.Prog
 		for {
 			r1 = gc.Uniqs(r1)
 			if r1 == nil {
@@ -773,7 +750,7 @@ func shiftprop(r *gc.Flow) bool {
 	/* make the substitution */
 	p2.From.Reg = 0
 
-	o = int(p.Reg)
+	o := int(int(p.Reg))
 	if o == 0 {
 		o = int(p.To.Reg)
 	}
@@ -870,14 +847,11 @@ func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
 }
 
 func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
-	var a [3]obj.Addr
-	var i int
-	var n int
-
 	if r == r2 {
 		return true
 	}
-	n = 0
+	n := int(0)
+	var a [3]obj.Addr
 	if p.Reg != 0 && p.Reg != p.To.Reg {
 		a[n].Type = obj.TYPE_REG
 		a[n].Reg = p.Reg
@@ -900,6 +874,7 @@ func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
 	if n == 0 {
 		return true
 	}
+	var i int
 	for ; r != nil && r != r2; r = gc.Uniqs(r) {
 		p = r.Prog
 		for i = 0; i < n; i++ {
@@ -939,9 +914,7 @@ func findu1(r *gc.Flow, v *obj.Addr) bool {
 }
 
 func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
-	var r1 *gc.Flow
-
-	for r1 = g.Start; r1 != nil; r1 = r1.Link {
+	for r1 := (*gc.Flow)(g.Start); r1 != nil; r1 = r1.Link {
 		r1.Active = 0
 	}
 	return findu1(r, v)
@@ -961,19 +934,12 @@ func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
  *   MOVBU  R0<<0(R1),R0
  */
 func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
-	var r1 *gc.Flow
-	var r2 *gc.Flow
-	var r3 *gc.Flow
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var v obj.Addr
-
-	p = r.Prog
-	v = *a
+	p := (*obj.Prog)(r.Prog)
+	v := obj.Addr(*a)
 	v.Type = obj.TYPE_REG
-	r1 = findpre(r, &v)
+	r1 := (*gc.Flow)(findpre(r, &v))
 	if r1 != nil {
-		p1 = r1.Prog
+		p1 := r1.Prog
 		if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
 			switch p1.As {
 			case arm.AADD:
@@ -1030,13 +996,14 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
 
 			case arm.AMOVW:
 				if p1.From.Type == obj.TYPE_REG {
-					r2 = findinc(r1, r, &p1.From)
+					r2 := (*gc.Flow)(findinc(r1, r, &p1.From))
 					if r2 != nil {
+						var r3 *gc.Flow
 						for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
 						}
 						if r3 == r {
 							/* post-indexing */
-							p1 = r2.Prog
+							p1 := r2.Prog
 
 							a.Reg = p1.To.Reg
 							a.Offset = p1.From.Offset
@@ -1054,10 +1021,10 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
 	}
 
 	if a != &p.From || a.Reg != p.To.Reg {
-		r1 = findinc(r, nil, &v)
+		r1 := (*gc.Flow)(findinc(r, nil, &v))
 		if r1 != nil {
 			/* post-indexing */
-			p1 = r1.Prog
+			p1 := r1.Prog
 
 			a.Offset = p1.From.Offset
 			p.Scond |= arm.C_PBIT
@@ -1775,19 +1742,17 @@ func successor(r *gc.Flow) *gc.Flow {
 }
 
 func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
-	var pred int
-	var r *gc.Flow
-
 	if j.len == 0 {
 		return
 	}
+	var pred int
 	if cond == Truecond {
 		pred = predinfo[rstart.Prog.As-arm.ABEQ].scond
 	} else {
 		pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
 	}
 
-	for r = j.start; ; r = successor(r) {
+	for r := (*gc.Flow)(j.start); ; r = successor(r) {
 		if r.Prog.As == arm.AB {
 			if r != j.last || branch == Delbranch {
 				excise(r)
@@ -1813,13 +1778,12 @@ func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
 }
 
 func predicate(g *gc.Graph) {
-	var r *gc.Flow
 	var t1 int
 	var t2 int
 	var j1 Joininfo
 	var j2 Joininfo
 
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		if isbranch(r.Prog) {
 			t1 = joinsplit(r.S1, &j1)
 			t2 = joinsplit(r.S2, &j2)
@@ -1861,8 +1825,6 @@ func smallindir(a *obj.Addr, reg *obj.Addr) bool {
 }
 
 func excise(r *gc.Flow) {
-	var p *obj.Prog
-
-	p = r.Prog
+	p := (*obj.Prog)(r.Prog)
 	obj.Nopout(p)
 }
diff --git a/src/cmd/6g/cgen.go b/src/cmd/6g/cgen.go
index 36fa62c46945c9c0e2e92435fe8645b1c5a22580..58deffa6edfe229ec43338f865c64002da2e0ddb 100644
--- a/src/cmd/6g/cgen.go
+++ b/src/cmd/6g/cgen.go
@@ -24,23 +24,16 @@ import "cmd/internal/gc"
  * simplifies and calls gmove.
  */
 func cgen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var a int
-	var f int
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var addr obj.Addr
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\ncgen-n", n)
 		gc.Dump("cgen-res", res)
 	}
 
+	var nl *gc.Node
+	var n1 gc.Node
+	var nr *gc.Node
+	var n2 gc.Node
+	var a int
 	if n == nil || n.Type == nil {
 		goto ret
 	}
@@ -60,6 +53,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -70,6 +64,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	case gc.OEFACE:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -84,6 +79,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			gc.Fatal("cgen: this is going to misscompile")
 		}
 		if res.Ullman >= gc.UINF {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			cgen(n, &n1)
 			cgen(&n1, res)
@@ -101,6 +97,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	if res.Addable == 0 {
 		if n.Ullman > res.Ullman {
+			var n1 gc.Node
 			regalloc(&n1, n.Type, res)
 			cgen(n, &n1)
 			if n1.Ullman > res.Ullman {
@@ -114,6 +111,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			goto ret
 		}
 
+		var f int
 		if res.Ullman >= gc.UINF {
 			goto gen
 		}
@@ -135,9 +133,12 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if gc.Iscomplex[n.Type.Etype] == 0 {
-			a = optoas(gc.OAS, res.Type)
+			a := optoas(gc.OAS, res.Type)
+			var addr obj.Addr
 			if sudoaddable(a, res, &addr) {
+				var p1 *obj.Prog
 				if f != 0 {
+					var n2 gc.Node
 					regalloc(&n2, res.Type, nil)
 					cgen(n, &n2)
 					p1 = gins(a, &n2, nil)
@@ -155,6 +156,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 	gen:
+		var n1 gc.Node
 		igen(res, &n1, nil)
 		cgen(n, &n1)
 		regfree(&n1)
@@ -195,9 +197,10 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	if nl != nil && nl.Ullman >= gc.UINF {
 		if nr != nil && nr.Ullman >= gc.UINF {
+			var n1 gc.Node
 			gc.Tempname(&n1, nl.Type)
 			cgen(nl, &n1)
-			n2 = *n
+			n2 := *n
 			n2.Left = &n1
 			cgen(&n2, res)
 			goto ret
@@ -205,14 +208,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 	}
 
 	if gc.Iscomplex[n.Type.Etype] == 0 {
-		a = optoas(gc.OAS, n.Type)
+		a := optoas(gc.OAS, n.Type)
+		var addr obj.Addr
 		if sudoaddable(a, n, &addr) {
 			if res.Op == gc.OREGISTER {
-				p1 = gins(a, nil, res)
+				p1 := gins(a, nil, res)
 				p1.From = addr
 			} else {
+				var n2 gc.Node
 				regalloc(&n2, n.Type, nil)
-				p1 = gins(a, nil, &n2)
+				p1 := gins(a, nil, &n2)
 				p1.From = addr
 				gins(a, &n2, res)
 				regfree(&n2)
@@ -238,11 +243,11 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OGE,
 		gc.OGT,
 		gc.ONOT:
-		p1 = gc.Gbranch(obj.AJMP, nil, 0)
+		p1 := gc.Gbranch(obj.AJMP, nil, 0)
 
-		p2 = gc.Pc
+		p2 := gc.Pc
 		gmove(gc.Nodbool(true), res)
-		p3 = gc.Gbranch(obj.AJMP, nil, 0)
+		p3 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
 		gmove(gc.Nodbool(false), res)
@@ -255,10 +260,12 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		// unary
 	case gc.OCOM:
-		a = optoas(gc.OXOR, nl.Type)
+		a := optoas(gc.OXOR, nl.Type)
 
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, nil)
 		cgen(nl, &n1)
+		var n2 gc.Node
 		gc.Nodconst(&n2, nl.Type, -1)
 		gins(a, &n2, &n1)
 		gmove(&n1, res)
@@ -310,7 +317,9 @@ func cgen(n *gc.Node, res *gc.Node) {
 				gc.OINDEX,
 				gc.OIND,
 				gc.ONAME:
+				var n1 gc.Node
 				igen(nl, &n1, res)
+				var n2 gc.Node
 				regalloc(&n2, n.Type, res)
 				gmove(&n1, &n2)
 				gmove(&n2, res)
@@ -320,7 +329,9 @@ func cgen(n *gc.Node, res *gc.Node) {
 			}
 		}
 
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
+		var n2 gc.Node
 		regalloc(&n2, n.Type, &n1)
 		cgen(nl, &n1)
 
@@ -338,6 +349,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OINDEX,
 		gc.OIND,
 		gc.ONAME: // PHEAP or PPARAMREF var
+		var n1 gc.Node
 		igen(n, &n1, res)
 
 		gmove(&n1, res)
@@ -345,6 +357,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		// interface table is first word of interface value
 	case gc.OITAB:
+		var n1 gc.Node
 		igen(nl, &n1, res)
 
 		n1.Type = n.Type
@@ -354,14 +367,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
 		if gc.Isconst(nl, gc.CTSTR) {
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
-			p1 = gins(x86.ALEAQ, nil, &n1)
+			p1 := gins(x86.ALEAQ, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			gmove(&n1, res)
 			regfree(&n1)
 			break
 		}
 
+		var n1 gc.Node
 		igen(nl, &n1, res)
 		n1.Type = n.Type
 		gmove(&n1, res)
@@ -371,13 +386,15 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map and chan have len in the first int-sized word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 
 			cgen(nl, &n1)
 
+			var n2 gc.Node
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -394,6 +411,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			igen(nl, &n1, res)
 
 			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
@@ -409,13 +427,15 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second int-sized word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 
 			cgen(nl, &n1)
 
+			var n2 gc.Node
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -431,6 +451,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if gc.Isslice(nl.Type) {
+			var n1 gc.Node
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -470,11 +491,13 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if nl.Ullman >= nr.Ullman {
+			var n1 gc.Node
 			regalloc(&n1, nl.Type, res)
 			cgen(nl, &n1)
 			cgen_div(int(n.Op), &n1, nr, res)
 			regfree(&n1)
 		} else {
+			var n2 gc.Node
 			if !gc.Smallintconst(nr) {
 				regalloc(&n2, nr.Type, res)
 				cgen(nr, &n2)
@@ -514,7 +537,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	 */
 sbop: // symmetric binary
 	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
-		r = nl
+		r := nl
 		nl = nr
 		nr = r
 	}
@@ -585,8 +608,6 @@ ret:
  * The caller must call regfree(a).
  */
 func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("cgenr-n", n)
 	}
@@ -609,6 +630,7 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.OCALLFUNC,
 		gc.OCALLMETH,
 		gc.OCALLINTER:
+		var n1 gc.Node
 		igen(n, &n1, res)
 		regalloc(a, gc.Types[gc.Tptr], &n1)
 		gmove(&n1, a)
@@ -627,27 +649,12 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
  * The generated code checks that the result is not nil.
  */
 func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var n5 gc.Node
-	var tmp gc.Node
-	var tmp2 gc.Node
-	var nlen gc.Node
-	var p1 *obj.Prog
-	var t *gc.Type
-	var w uint64
-	var v uint64
-	var freelen int
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nagenr-n", n)
 	}
 
-	nl = n.Left
-	nr = n.Right
+	nl := n.Left
+	nr := n.Right
 
 	switch n.Op {
 	case gc.ODOT,
@@ -655,6 +662,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.OCALLFUNC,
 		gc.OCALLMETH,
 		gc.OCALLINTER:
+		var n1 gc.Node
 		igen(n, &n1, res)
 		regalloc(a, gc.Types[gc.Tptr], &n1)
 		agen(&n1, a)
@@ -665,10 +673,14 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.Cgen_checknil(a)
 
 	case gc.OINDEX:
-		freelen = 0
-		w = uint64(n.Type.Width)
+		freelen := 0
+		w := uint64(n.Type.Width)
 
 		// Generate the non-addressable child first.
+		var n3 gc.Node
+		var nlen gc.Node
+		var tmp gc.Node
+		var n1 gc.Node
 		if nr.Addable != 0 {
 			goto irad
 		}
@@ -703,6 +715,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			} else {
 				if nl.Addable == 0 {
 					// igen will need an addressable node.
+					var tmp2 gc.Node
 					gc.Tempname(&tmp2, nl.Type)
 
 					cgen(nl, &tmp2)
@@ -737,9 +750,10 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index") // front end should handle
 			}
-			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
 			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				if gc.Debug['B'] == 0 && !n.Bounded {
+					var n2 gc.Node
 					gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
 					if gc.Smallintconst(nr) {
 						gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
@@ -750,7 +764,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 						regfree(&tmp)
 					}
 
-					p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
 					ginscall(gc.Panicindex, -1)
 					gc.Patch(p1, gc.Pc)
 				}
@@ -766,12 +780,13 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		}
 
 		// type of the index
-		t = gc.Types[gc.TUINT64]
+		t := gc.Types[gc.TUINT64]
 
 		if gc.Issigned[n1.Type.Etype] != 0 {
 			t = gc.Types[gc.TINT64]
 		}
 
+		var n2 gc.Node
 		regalloc(&n2, t, &n1) // i
 		gmove(&n1, &n2)
 		regfree(&n1)
@@ -787,6 +802,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 				gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
 			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				if gc.Is64(nr.Type) {
+					var n5 gc.Node
 					regalloc(&n5, t, nil)
 					gmove(&nlen, &n5)
 					regfree(&nlen)
@@ -795,6 +811,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			} else {
 				gc.Nodconst(&nlen, t, nl.Type.Bound)
 				if !gc.Smallintconst(&nlen) {
+					var n5 gc.Node
 					regalloc(&n5, t, nil)
 					gmove(&nlen, &n5)
 					nlen = n5
@@ -803,14 +820,14 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			}
 
 			gins(optoas(gc.OCMP, t), &n2, &nlen)
-			p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+			p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
 			ginscall(gc.Panicindex, -1)
 			gc.Patch(p1, gc.Pc)
 		}
 
 		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
-			p1 = gins(x86.ALEAQ, nil, &n3)
+			p1 := gins(x86.ALEAQ, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			gins(x86.AADDQ, &n2, &n3)
 			goto indexdone
@@ -819,7 +836,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		if w == 0 {
 		} else // nothing to do
 		if w == 1 || w == 2 || w == 4 || w == 8 {
-			p1 = gins(x86.ALEAQ, &n2, &n3)
+			p1 := gins(x86.ALEAQ, &n2, &n3)
 			p1.From.Type = obj.TYPE_MEM
 			p1.From.Scale = int8(w)
 			p1.From.Index = p1.From.Reg
@@ -848,10 +865,6 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
  * The generated code checks that the result is not nil.
  */
 func agen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nagen-res", res)
 		gc.Dump("agen-r", n)
@@ -865,15 +878,18 @@ func agen(n *gc.Node, res *gc.Node) {
 		n = n.Left
 	}
 
+	var nl *gc.Node
 	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
 		// be terribly efficient. See issue 3670.
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 
 		gc.Gvardef(&n1)
 		clearfat(&n1)
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.Tptr], res)
 		gins(x86.ALEAQ, &n1, &n2)
 		gmove(&n2, res)
@@ -882,6 +898,7 @@ func agen(n *gc.Node, res *gc.Node) {
 	}
 
 	if n.Addable != 0 {
+		var n1 gc.Node
 		regalloc(&n1, gc.Types[gc.Tptr], res)
 		gins(x86.ALEAQ, n, &n1)
 		gmove(&n1, res)
@@ -912,16 +929,19 @@ func agen(n *gc.Node, res *gc.Node) {
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_slice(n, &n1)
 		agen(&n1, res)
 
 	case gc.OEFACE:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_eface(n, &n1)
 		agen(&n1, res)
 
 	case gc.OINDEX:
+		var n1 gc.Node
 		agenr(n, &n1, res)
 		gmove(&n1, res)
 		regfree(&n1)
@@ -975,10 +995,6 @@ ret:
  * The generated code checks that the result is not *nil.
  */
 func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var fp *gc.Type
-	var flist gc.Iter
-	var n1 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nigen-n", n)
 	}
@@ -1030,7 +1046,8 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 			cgen_callinter(n, nil, 0)
 		}
 
-		fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+		var flist gc.Iter
+		fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
 		*a = gc.Node{}
 		a.Op = gc.OINDREG
 		a.Val.U.Reg = x86.REG_SP
@@ -1050,6 +1067,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 				if gc.Isptr[n.Left.Type.Etype] == 0 {
 					igen(n.Left, a, res)
 				} else {
+					var n1 gc.Node
 					igen(n.Left, &n1, res)
 					gc.Cgen_checknil(&n1)
 					regalloc(a, gc.Types[gc.Tptr], res)
@@ -1078,19 +1096,6 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
  *	if(n == true) goto to;
  */
 func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
-	var et int
-	var a int
-	var nl *gc.Node
-	var nr *gc.Node
-	var l *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var tmp gc.Node
-	var ll *gc.NodeList
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nbgen", n)
 	}
@@ -1103,6 +1108,12 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.Genlist(n.Ninit)
 	}
 
+	var a int
+	var et int
+	var nl *gc.Node
+	var n1 gc.Node
+	var nr *gc.Node
+	var n2 gc.Node
 	if n.Type == nil {
 		gc.Convlit(&n, gc.Types[gc.TBOOL])
 		if n.Type == nil {
@@ -1141,9 +1152,10 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		if n.Addable == 0 {
 			goto def
 		}
+		var n1 gc.Node
 		gc.Nodconst(&n1, n.Type, 0)
 		gins(optoas(gc.OCMP, n.Type), n, &n1)
-		a = x86.AJNE
+		a := x86.AJNE
 		if !true_ {
 			a = x86.AJEQ
 		}
@@ -1153,8 +1165,8 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 	case gc.OANDAND,
 		gc.OOROR:
 		if (n.Op == gc.OANDAND) == true_ {
-			p1 = gc.Gbranch(obj.AJMP, nil, 0)
-			p2 = gc.Gbranch(obj.AJMP, nil, 0)
+			p1 := gc.Gbranch(obj.AJMP, nil, 0)
+			p2 := gc.Gbranch(obj.AJMP, nil, 0)
 			gc.Patch(p1, gc.Pc)
 			bgen(n.Left, !true_, -likely, p2)
 			bgen(n.Right, !true_, -likely, p2)
@@ -1199,15 +1211,15 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.OGT,
 		gc.OLE,
 		gc.OGE:
-		a = int(n.Op)
+		a := int(n.Op)
 		if !true_ {
 			if gc.Isfloat[nr.Type.Etype] != 0 {
 				// brcom is not valid on floats when NaN is involved.
-				p1 = gc.Gbranch(obj.AJMP, nil, 0)
+				p1 := gc.Gbranch(obj.AJMP, nil, 0)
 
-				p2 = gc.Gbranch(obj.AJMP, nil, 0)
+				p2 := gc.Gbranch(obj.AJMP, nil, 0)
 				gc.Patch(p1, gc.Pc)
-				ll = n.Ninit // avoid re-genning ninit
+				ll := n.Ninit // avoid re-genning ninit
 				n.Ninit = nil
 				bgen(n, true, -likely, p2)
 				n.Ninit = ll
@@ -1223,7 +1235,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		// make simplest on right
 		if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
 			a = gc.Brrev(a)
-			r = nl
+			r := nl
 			nl = nr
 			nr = r
 		}
@@ -1236,9 +1248,11 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			}
 
 			a = optoas(a, gc.Types[gc.Tptr])
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Xoffset += int64(gc.Array_array)
 			n1.Type = gc.Types[gc.Tptr]
+			var tmp gc.Node
 			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
 			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1254,8 +1268,10 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			}
 
 			a = optoas(a, gc.Types[gc.Tptr])
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Type = gc.Types[gc.Tptr]
+			var tmp gc.Node
 			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
 			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1268,10 +1284,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			break
 		}
 
+		var n2 gc.Node
+		var n1 gc.Node
 		if nr.Ullman >= gc.UINF {
 			regalloc(&n1, nl.Type, nil)
 			cgen(nl, &n1)
 
+			var tmp gc.Node
 			gc.Tempname(&tmp, nl.Type)
 			gmove(&n1, &tmp)
 			regfree(&n1)
@@ -1300,9 +1319,9 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 
 		// only < and <= work right with NaN; reverse if needed
 	cmp:
-		l = &n1
+		l := &n1
 
-		r = &n2
+		r := &n2
 		if gc.Isfloat[nl.Type.Etype] != 0 && (a == gc.OGT || a == gc.OGE) {
 			l = &n2
 			r = &n1
@@ -1314,9 +1333,9 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		if gc.Isfloat[nr.Type.Etype] != 0 && (n.Op == gc.OEQ || n.Op == gc.ONE) {
 			if n.Op == gc.OEQ {
 				// neither NE nor P
-				p1 = gc.Gbranch(x86.AJNE, nil, -likely)
+				p1 := gc.Gbranch(x86.AJNE, nil, -likely)
 
-				p2 = gc.Gbranch(x86.AJPS, nil, -likely)
+				p2 := gc.Gbranch(x86.AJPS, nil, -likely)
 				gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
 				gc.Patch(p1, gc.Pc)
 				gc.Patch(p2, gc.Pc)
@@ -1357,31 +1376,27 @@ ret:
  * return n's offset from SP.
  */
 func stkof(n *gc.Node) int64 {
-	var t *gc.Type
-	var flist gc.Iter
-	var off int64
-
 	switch n.Op {
 	case gc.OINDREG:
 		return n.Xoffset
 
 	case gc.ODOT:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
 		return off + n.Xoffset
 
 	case gc.OINDEX:
-		t = n.Left.Type
+		t := n.Left.Type
 		if !gc.Isfixedarray(t) {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
@@ -1393,11 +1408,12 @@ func stkof(n *gc.Node) int64 {
 	case gc.OCALLMETH,
 		gc.OCALLINTER,
 		gc.OCALLFUNC:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
 
+		var flist gc.Iter
 		t = gc.Structfirst(&flist, gc.Getoutarg(t))
 		if t != nil {
 			return t.Width
@@ -1414,20 +1430,6 @@ func stkof(n *gc.Node) int64 {
  *	memmove(&ns, &n, w);
  */
 func sgen(n *gc.Node, ns *gc.Node, w int64) {
-	var nodl gc.Node
-	var nodr gc.Node
-	var nodsi gc.Node
-	var noddi gc.Node
-	var cx gc.Node
-	var oldcx gc.Node
-	var tmp gc.Node
-	var c int64
-	var q int64
-	var odst int64
-	var osrc int64
-	var l *gc.NodeList
-	var p *obj.Prog
-
 	if gc.Debug['g'] != 0 {
 		fmt.Printf("\nsgen w=%d\n", w)
 		gc.Dump("r", n)
@@ -1445,7 +1447,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	// If copying .args, that's all the results, so record definition sites
 	// for them for the liveness analysis.
 	if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
-		for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 			if l.N.Class == gc.PPARAMOUT {
 				gc.Gvardef(l.N)
 			}
@@ -1459,6 +1461,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 
 	if w == 0 {
 		// evaluate side effects only
+		var nodr gc.Node
 		regalloc(&nodr, gc.Types[gc.Tptr], nil)
 
 		agen(ns, &nodr)
@@ -1468,15 +1471,16 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	}
 
 	// offset on the stack
-	osrc = stkof(n)
+	osrc := stkof(n)
 
-	odst = stkof(ns)
+	odst := stkof(ns)
 
 	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
 		// osrc and odst both on stack, and at least one is in
 		// an unknown position.  Could generate code to test
 		// for forward/backward copy, but instead just copy
 		// to a temporary location first.
+		var tmp gc.Node
 		gc.Tempname(&tmp, n.Type)
 
 		sgen(n, &tmp, w)
@@ -1484,9 +1488,13 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 		return
 	}
 
+	var noddi gc.Node
 	gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
+	var nodsi gc.Node
 	gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
 
+	var nodl gc.Node
+	var nodr gc.Node
 	if n.Ullman >= ns.Ullman {
 		agenr(n, &nodr, &nodsi)
 		if ns.Op == gc.ONAME {
@@ -1510,9 +1518,11 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	regfree(&nodl)
 	regfree(&nodr)
 
-	c = w % 8 // bytes
-	q = w / 8 // quads
+	c := w % 8 // bytes
+	q := w / 8 // quads
 
+	var oldcx gc.Node
+	var cx gc.Node
 	savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
 
 	// if we are copying forward on the stack and
@@ -1552,7 +1562,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 			gins(x86.AREP, nil, nil)   // repeat
 			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
 		} else if q >= 4 {
-			p = gins(obj.ADUFFCOPY, nil, nil)
+			p := gins(obj.ADUFFCOPY, nil, nil)
 			p.To.Type = obj.TYPE_ADDR
 			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
 
@@ -1643,23 +1653,16 @@ func cadable(n *gc.Node) bool {
 func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
-	var tmp gc.Node
-	var t *gc.Type
-	var freel int
-	var freer int
-	var fldcount int64
-	var loffset int64
-	var roffset int64
 
-	freel = 0
-	freer = 0
+	freel := 0
+	freer := 0
 
 	switch nl.Type.Etype {
 	default:
 		goto no
 
 	case gc.TARRAY:
-		t = nl.Type
+		t := nl.Type
 
 		// Slices are ok.
 		if gc.Isslice(t) {
@@ -1676,9 +1679,9 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		// Small structs with non-fat types are ok.
 	// Zero-sized structs are treated separately elsewhere.
 	case gc.TSTRUCT:
-		fldcount = 0
+		fldcount := int64(0)
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			if gc.Isfat(t.Type) {
 				goto no
 			}
@@ -1714,6 +1717,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		}
 	} else {
 		// When zeroing, prepare a register containing zero.
+		var tmp gc.Node
 		gc.Nodconst(&tmp, nl.Type, 0)
 
 		regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1735,11 +1739,11 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		t = nl.Type
+		t := nl.Type
 		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
-			for fldcount = 0; fldcount < t.Bound; fldcount++ {
+			for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
 				if nr == nil {
 					gc.Clearslim(&nodl)
 				} else {
@@ -1842,8 +1846,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		loffset = nodl.Xoffset
-		roffset = nodr.Xoffset
+		loffset := nodl.Xoffset
+		roffset := nodr.Xoffset
 
 		// funarg structs may not begin at offset zero.
 		if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1853,7 +1857,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 			roffset -= nr.Type.Type.Width
 		}
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			nodl.Xoffset = loffset + t.Width
 			nodl.Type = t.Type
 
diff --git a/src/cmd/6g/ggen.go b/src/cmd/6g/ggen.go
index be6ff2152e8e46c5902ec86bb38fd9e39b401f0f..d0c43d6db907a1aed264edace3e7c176fbeb7f41 100644
--- a/src/cmd/6g/ggen.go
+++ b/src/cmd/6g/ggen.go
@@ -11,32 +11,26 @@ import (
 import "cmd/internal/gc"
 
 func defframe(ptxt *obj.Prog) {
-	var frame uint32
-	var ax uint32
-	var p *obj.Prog
-	var hi int64
-	var lo int64
-	var l *gc.NodeList
 	var n *gc.Node
 
 	// fill in argument size, stack size
 	ptxt.To.Type = obj.TYPE_TEXTSIZE
 
 	ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
-	frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
 	ptxt.To.Offset = int64(frame)
 
 	// insert code to zero ambiguously live variables
 	// so that the garbage collector only sees initialized values
 	// when it looks for pointers.
-	p = ptxt
+	p := ptxt
 
-	hi = 0
-	lo = hi
-	ax = 0
+	hi := int64(0)
+	lo := hi
+	ax := uint32(0)
 
 	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+	for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Needzero == 0 {
 			continue
@@ -69,10 +63,7 @@ func defframe(ptxt *obj.Prog) {
 }
 
 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
-	var cnt int64
-	var i int64
-
-	cnt = hi - lo
+	cnt := hi - lo
 	if cnt == 0 {
 		return p
 	}
@@ -92,7 +83,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Pr
 	}
 
 	if cnt <= int64(4*gc.Widthreg) {
-		for i = 0; i < cnt; i += int64(gc.Widthreg) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
 			p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
 		}
 	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
@@ -110,8 +101,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Pr
 }
 
 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
-	var q *obj.Prog
-	q = gc.Ctxt.NewProg()
+	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
 	q.As = int16(as)
 	q.Lineno = p.Lineno
@@ -136,14 +126,8 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int
   *	proc=3	normal call to C pointer (not Go func value)
 */
 func ginscall(f *gc.Node, proc int) {
-	var p *obj.Prog
-	var reg gc.Node
-	var stk gc.Node
-	var r1 gc.Node
-	var extra int32
-
 	if f.Type != nil {
-		extra = 0
+		extra := int32(0)
 		if proc == 1 || proc == 2 {
 			extra = 2 * int32(gc.Widthptr)
 		}
@@ -167,12 +151,13 @@ func ginscall(f *gc.Node, proc int) {
 				// x86 NOP 0x90 is really XCHG AX, AX; use that description
 				// because the NOP pseudo-instruction would be removed by
 				// the linker.
+				var reg gc.Node
 				gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
 
 				gins(x86.AXCHGL, &reg, &reg)
 			}
 
-			p = gins(obj.ACALL, nil, f)
+			p := gins(obj.ACALL, nil, f)
 			gc.Afunclit(&p.To, f)
 			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
@@ -180,7 +165,9 @@ func ginscall(f *gc.Node, proc int) {
 			break
 		}
 
+		var reg gc.Node
 		gc.Nodreg(&reg, gc.Types[gc.Tptr], x86.REG_DX)
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
 		gmove(f, &reg)
 		reg.Op = gc.OINDREG
@@ -193,12 +180,13 @@ func ginscall(f *gc.Node, proc int) {
 
 	case 1, // call in new proc (go)
 		2: // deferred call (defer)
-		stk = gc.Node{}
+		stk := gc.Node{}
 
 		stk.Op = gc.OINDREG
 		stk.Val.U.Reg = x86.REG_SP
 		stk.Xoffset = 0
 
+		var reg gc.Node
 		if gc.Widthptr == 8 {
 			// size of arguments at 0(SP)
 			ginscon(x86.AMOVQ, int64(gc.Argsize(f.Type)), &stk)
@@ -233,7 +221,7 @@ func ginscall(f *gc.Node, proc int) {
 		if proc == 2 {
 			gc.Nodreg(&reg, gc.Types[gc.TINT32], x86.REG_AX)
 			gins(x86.ATESTL, &reg, &reg)
-			p = gc.Gbranch(x86.AJEQ, nil, +1)
+			p := gc.Gbranch(x86.AJEQ, nil, +1)
 			cgen_ret(nil)
 			gc.Patch(p, gc.Pc)
 		}
@@ -245,20 +233,12 @@ func ginscall(f *gc.Node, proc int) {
  * generate res = n.
  */
 func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
-	var i *gc.Node
-	var f *gc.Node
-	var tmpi gc.Node
-	var nodi gc.Node
-	var nodo gc.Node
-	var nodr gc.Node
-	var nodsp gc.Node
-
-	i = n.Left
+	i := n.Left
 	if i.Op != gc.ODOTINTER {
 		gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
 	}
 
-	f = i.Right // field
+	f := i.Right // field
 	if f.Op != gc.ONAME {
 		gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
 	}
@@ -266,6 +246,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	i = i.Left // interface
 
 	if i.Addable == 0 {
+		var tmpi gc.Node
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -275,8 +256,10 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 
 	// i is now addable, prepare an indirected
 	// register to hold its address.
+	var nodi gc.Node
 	igen(i, &nodi, res) // REG = &inter
 
+	var nodsp gc.Node
 	gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
 
 	nodsp.Xoffset = 0
@@ -287,6 +270,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	nodi.Xoffset += int64(gc.Widthptr)
 	cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
 
+	var nodo gc.Node
 	regalloc(&nodo, gc.Types[gc.Tptr], res)
 
 	nodi.Type = gc.Types[gc.Tptr]
@@ -294,6 +278,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
 	regfree(&nodi)
 
+	var nodr gc.Node
 	regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
 	if n.Left.Xoffset == gc.BADWIDTH {
 		gc.Fatal("cgen_callinter: badwidth")
@@ -324,14 +309,11 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
  *	proc=2	defer call save away stack
  */
 func cgen_call(n *gc.Node, proc int) {
-	var t *gc.Type
-	var nod gc.Node
-	var afun gc.Node
-
 	if n == nil {
 		return
 	}
 
+	var afun gc.Node
 	if n.Left.Ullman >= gc.UINF {
 		// if name involves a fn call
 		// precompute the address of the fn
@@ -341,10 +323,11 @@ func cgen_call(n *gc.Node, proc int) {
 	}
 
 	gc.Genlist(n.List) // assign the args
-	t = n.Left.Type
+	t := n.Left.Type
 
 	// call tempname pointer
 	if n.Left.Ullman >= gc.UINF {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, &afun)
 		nod.Type = t
@@ -355,6 +338,7 @@ func cgen_call(n *gc.Node, proc int) {
 
 	// call pointer
 	if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, n.Left)
 		nod.Type = t
@@ -375,22 +359,18 @@ func cgen_call(n *gc.Node, proc int) {
  *	res = return value from call.
  */
 func cgen_callret(n *gc.Node, res *gc.Node) {
-	var nod gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_callret: nil")
 	}
 
-	nod = gc.Node{}
+	nod := gc.Node{}
 	nod.Op = gc.OINDREG
 	nod.Val.U.Reg = x86.REG_SP
 	nod.Addable = 1
@@ -406,23 +386,18 @@ func cgen_callret(n *gc.Node, res *gc.Node) {
  *	res = &return value from call.
  */
 func cgen_aret(n *gc.Node, res *gc.Node) {
-	var nod1 gc.Node
-	var nod2 gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if gc.Isptr[t.Etype] != 0 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_aret: nil")
 	}
 
-	nod1 = gc.Node{}
+	nod1 := gc.Node{}
 	nod1.Op = gc.OINDREG
 	nod1.Val.U.Reg = x86.REG_SP
 	nod1.Addable = 1
@@ -431,6 +406,7 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
 	nod1.Type = fp.Type
 
 	if res.Op != gc.OREGISTER {
+		var nod2 gc.Node
 		regalloc(&nod2, gc.Types[gc.Tptr], res)
 		gins(leaptr, &nod1, &nod2)
 		gins(movptr, &nod2, res)
@@ -445,8 +421,6 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
  * n->left is assignments to return values.
  */
 func cgen_ret(n *gc.Node) {
-	var p *obj.Prog
-
 	if n != nil {
 		gc.Genlist(n.List) // copy out args
 	}
@@ -454,7 +428,7 @@ func cgen_ret(n *gc.Node) {
 		ginscall(gc.Deferreturn, 0)
 	}
 	gc.Genlist(gc.Curfn.Exit)
-	p = gins(obj.ARET, nil, nil)
+	p := gins(obj.ARET, nil, nil)
 	if n != nil && n.Op == gc.ORETJMP {
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
@@ -470,21 +444,6 @@ func cgen_ret(n *gc.Node) {
  * according to op.
  */
 func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var a int
-	var check int
-	var n3 gc.Node
-	var n4 gc.Node
-	var t *gc.Type
-	var t0 *gc.Type
-	var ax gc.Node
-	var dx gc.Node
-	var ax1 gc.Node
-	var n31 gc.Node
-	var oldax gc.Node
-	var olddx gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will trap.
@@ -493,10 +452,10 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// Easiest way to avoid for int8, int16: use int32.
 	// For int32 and int64, use explicit test.
 	// Could use int64 hw for int32.
-	t = nl.Type
+	t := nl.Type
 
-	t0 = t
-	check = 0
+	t0 := t
+	check := 0
 	if gc.Issigned[t.Etype] != 0 {
 		check = 1
 		if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
@@ -515,9 +474,12 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		check = 0
 	}
 
-	a = optoas(op, t)
+	a := optoas(op, t)
 
+	var n3 gc.Node
 	regalloc(&n3, t0, nil)
+	var ax gc.Node
+	var oldax gc.Node
 	if nl.Ullman >= nr.Ullman {
 		savex(x86.REG_AX, &ax, &oldax, res, t0)
 		cgen(nl, &ax)
@@ -532,16 +494,17 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 
 	if t != t0 {
 		// Convert
-		ax1 = ax
+		ax1 := ax
 
-		n31 = n3
+		n31 := n3
 		ax.Type = t
 		n3.Type = t
 		gmove(&ax1, &ax)
 		gmove(&n31, &n3)
 	}
 
-	p2 = nil
+	p2 := (*obj.Prog)(nil)
+	var n4 gc.Node
 	if gc.Nacl {
 		// Native Client does not relay the divide-by-zero trap
 		// to the executing program, so we must insert a check
@@ -549,7 +512,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		gc.Nodconst(&n4, t, 0)
 
 		gins(optoas(gc.OCMP, t), &n3, &n4)
-		p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
 		if panicdiv == nil {
 			panicdiv = gc.Sysfunc("panicdivide")
 		}
@@ -560,7 +523,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	if check != 0 {
 		gc.Nodconst(&n4, t, -1)
 		gins(optoas(gc.OCMP, t), &n3, &n4)
-		p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
 		if op == gc.ODIV {
 			// a / (-1) is -a.
 			gins(optoas(gc.OMINUS, t), nil, &ax)
@@ -577,6 +540,8 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		gc.Patch(p1, gc.Pc)
 	}
 
+	var olddx gc.Node
+	var dx gc.Node
 	savex(x86.REG_DX, &dx, &olddx, res, t)
 	if gc.Issigned[t.Etype] == 0 {
 		gc.Nodconst(&n4, t, 0)
@@ -609,9 +574,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
  * known to be dead.
  */
 func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
-	var r int
-
-	r = int(reg[dr])
+	r := int(reg[dr])
 
 	// save current ax and dx if they are live
 	// and not the destination
@@ -643,12 +606,7 @@ func restx(x *gc.Node, oldx *gc.Node) {
  *	res = nl % nr
  */
 func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
 	var w int
-	var a int
-	var m gc.Magic
 
 	if nr.Op != gc.OLITERAL {
 		goto longdiv
@@ -663,6 +621,7 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		goto longdiv
 
 	case gc.TUINT64:
+		var m gc.Magic
 		m.W = w
 		m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
 		gc.Umagic(&m)
@@ -673,8 +632,11 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 			goto longmod
 		}
 
+		var n1 gc.Node
 		cgenr(nl, &n1, nil)
+		var n2 gc.Node
 		gc.Nodconst(&n2, nl.Type, int64(m.Um))
+		var n3 gc.Node
 		regalloc(&n3, nl.Type, res)
 		cgen_hmul(&n1, &n2, &n3)
 
@@ -697,6 +659,7 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		return
 
 	case gc.TINT64:
+		var m gc.Magic
 		m.W = w
 		m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
 		gc.Smagic(&m)
@@ -707,8 +670,11 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 			goto longmod
 		}
 
+		var n1 gc.Node
 		cgenr(nl, &n1, res)
+		var n2 gc.Node
 		gc.Nodconst(&n2, nl.Type, m.Sm)
+		var n3 gc.Node
 		regalloc(&n3, nl.Type, nil)
 		cgen_hmul(&n1, &n2, &n3)
 
@@ -748,12 +714,14 @@ longdiv:
 	// mod using formula A%B = A-(A/B*B) but
 	// we know that there is a fast algorithm for A/B
 longmod:
+	var n1 gc.Node
 	regalloc(&n1, nl.Type, res)
 
 	cgen(nl, &n1)
+	var n2 gc.Node
 	regalloc(&n2, nl.Type, nil)
 	cgen_div(gc.ODIV, &n1, nr, &n2)
-	a = optoas(gc.OMUL, nl.Type)
+	a := optoas(gc.OMUL, nl.Type)
 	if w == 8 {
 		// use 2-operand 16-bit multiply
 		// because there is no 2-operand 8-bit multiply
@@ -761,6 +729,7 @@ longmod:
 	}
 
 	if !gc.Smallintconst(nr) {
+		var n3 gc.Node
 		regalloc(&n3, nl.Type, nil)
 		cgen(nr, &n3)
 		gins(a, &n3, &n2)
@@ -779,30 +748,26 @@ longmod:
  *   res = (nl*nr) >> width
  */
 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var t *gc.Type
-	var a int
-	var n1 gc.Node
-	var n2 gc.Node
-	var ax gc.Node
-	var dx gc.Node
-	var tmp *gc.Node
-
-	t = nl.Type
-	a = optoas(gc.OHMUL, t)
+	t := nl.Type
+	a := optoas(gc.OHMUL, t)
 	if nl.Ullman < nr.Ullman {
-		tmp = nl
+		tmp := nl
 		nl = nr
 		nr = tmp
 	}
 
+	var n1 gc.Node
 	cgenr(nl, &n1, res)
+	var n2 gc.Node
 	cgenr(nr, &n2, nil)
+	var ax gc.Node
 	gc.Nodreg(&ax, t, x86.REG_AX)
 	gmove(&n1, &ax)
 	gins(a, &n2, nil)
 	regfree(&n2)
 	regfree(&n1)
 
+	var dx gc.Node
 	if t.Width == 1 {
 		// byte multiply behaves differently.
 		gc.Nodreg(&ax, t, x86.REG_AH)
@@ -824,24 +789,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	var n1 gc.Node
 	var n2 gc.Node
 	var n3 gc.Node
-	var n4 gc.Node
-	var n5 gc.Node
 	var cx gc.Node
 	var oldcx gc.Node
-	var a int
 	var rcx int
-	var p1 *obj.Prog
-	var sc uint64
 	var tcount *gc.Type
 
-	a = optoas(op, nl.Type)
+	a := optoas(op, nl.Type)
 
 	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		cgen(nl, &n1)
-		sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+		sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
 		if sc >= uint64(nl.Type.Width*8) {
 			// large shift gets 2 shifts by width-1
+			var n3 gc.Node
 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
 
 			gins(a, &n3, &n1)
@@ -855,12 +817,14 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	}
 
 	if nl.Ullman >= gc.UINF {
+		var n4 gc.Node
 		gc.Tempname(&n4, nl.Type)
 		cgen(nl, &n4)
 		nl = &n4
 	}
 
 	if nr.Ullman >= gc.UINF {
+		var n5 gc.Node
 		gc.Tempname(&n5, nr.Type)
 		cgen(nr, &n5)
 		nr = &n5
@@ -912,7 +876,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	if !bounded {
 		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
 		gins(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
 		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
 			gins(a, &n3, &n2)
@@ -947,37 +911,33 @@ ret:
  * we do a full-width multiplication and truncate afterwards.
  */
 func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-	var n1b gc.Node
-	var n2b gc.Node
-	var tmp *gc.Node
-	var t *gc.Type
-	var a int
-
 	// largest ullman on left.
 	if nl.Ullman < nr.Ullman {
-		tmp = nl
+		tmp := nl
 		nl = nr
 		nr = tmp
 	}
 
 	// generate operands in "8-bit" registers.
+	var n1b gc.Node
 	regalloc(&n1b, nl.Type, res)
 
 	cgen(nl, &n1b)
+	var n2b gc.Node
 	regalloc(&n2b, nr.Type, nil)
 	cgen(nr, &n2b)
 
 	// perform full-width multiplication.
-	t = gc.Types[gc.TUINT64]
+	t := gc.Types[gc.TUINT64]
 
 	if gc.Issigned[nl.Type.Etype] != 0 {
 		t = gc.Types[gc.TINT64]
 	}
+	var n1 gc.Node
 	gc.Nodreg(&n1, t, int(n1b.Val.U.Reg))
+	var n2 gc.Node
 	gc.Nodreg(&n2, t, int(n2b.Val.U.Reg))
-	a = optoas(op, t)
+	a := optoas(op, t)
 	gins(a, &n2, &n1)
 
 	// truncate.
@@ -988,31 +948,20 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 }
 
 func clearfat(nl *gc.Node) {
-	var w int64
-	var c int64
-	var q int64
-	var n1 gc.Node
-	var oldn1 gc.Node
-	var ax gc.Node
-	var oldax gc.Node
-	var di gc.Node
-	var z gc.Node
-	var p *obj.Prog
-
 	/* clear a fat object */
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nclearfat", nl)
 	}
 
-	w = nl.Type.Width
+	w := nl.Type.Width
 
 	// Avoid taking the address for simple enough types.
 	if componentgen(nil, nl) {
 		return
 	}
 
-	c = w % 8 // bytes
-	q = w / 8 // quads
+	c := w % 8 // bytes
+	q := w / 8 // quads
 
 	if q < 4 {
 		// Write sequence of MOV 0, off(base) instead of using STOSQ.
@@ -1021,9 +970,11 @@ func clearfat(nl *gc.Node) {
 		// than the unrolled STOSQ loop.
 		// NOTE: Must use agen, not igen, so that optimizer sees address
 		// being taken. We are not writing on field boundaries.
+		var n1 gc.Node
 		agenr(nl, &n1, nil)
 
 		n1.Op = gc.OINDREG
+		var z gc.Node
 		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
 		for {
 			tmp14 := q
@@ -1060,9 +1011,13 @@ func clearfat(nl *gc.Node) {
 		return
 	}
 
+	var oldn1 gc.Node
+	var n1 gc.Node
 	savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
 	agen(nl, &n1)
 
+	var ax gc.Node
+	var oldax gc.Node
 	savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
 	gconreg(x86.AMOVL, 0, x86.REG_AX)
 
@@ -1071,7 +1026,7 @@ func clearfat(nl *gc.Node) {
 		gins(x86.AREP, nil, nil)   // repeat
 		gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
 	} else {
-		p = gins(obj.ADUFFZERO, nil, nil)
+		p := gins(obj.ADUFFZERO, nil, nil)
 		p.To.Type = obj.TYPE_ADDR
 		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
 
@@ -1079,22 +1034,22 @@ func clearfat(nl *gc.Node) {
 		p.To.Offset = 2 * (128 - q)
 	}
 
-	z = ax
-	di = n1
+	z := ax
+	di := n1
 	if w >= 8 && c >= 4 {
 		di.Op = gc.OINDREG
 		z.Type = gc.Types[gc.TINT64]
 		di.Type = z.Type
-		p = gins(x86.AMOVQ, &z, &di)
+		p := gins(x86.AMOVQ, &z, &di)
 		p.To.Scale = 1
 		p.To.Offset = c - 8
 	} else if c >= 4 {
 		di.Op = gc.OINDREG
 		z.Type = gc.Types[gc.TINT32]
 		di.Type = z.Type
-		p = gins(x86.AMOVL, &z, &di)
+		gins(x86.AMOVL, &z, &di)
 		if c > 4 {
-			p = gins(x86.AMOVL, &z, &di)
+			p := gins(x86.AMOVL, &z, &di)
 			p.To.Scale = 1
 			p.To.Offset = c - 4
 		}
@@ -1112,11 +1067,10 @@ func clearfat(nl *gc.Node) {
 // Called after regopt and peep have run.
 // Expand CHECKNIL pseudo-op into actual nil pointer check.
 func expandchecks(firstp *obj.Prog) {
-	var p *obj.Prog
 	var p1 *obj.Prog
 	var p2 *obj.Prog
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if p.As != obj.ACHECKNIL {
 			continue
 		}
diff --git a/src/cmd/6g/gsubr.go b/src/cmd/6g/gsubr.go
index c440f8c5f7a5cb4fb67fa3da496970a98e0cc7e0..a8e47d3fd7520a05129aa014c4ad80fbd7652d9d 100644
--- a/src/cmd/6g/gsubr.go
+++ b/src/cmd/6g/gsubr.go
@@ -53,19 +53,17 @@ var resvd = []int{
 }
 
 func ginit() {
-	var i int
-
-	for i = 0; i < len(reg); i++ {
+	for i := 0; i < len(reg); i++ {
 		reg[i] = 1
 	}
-	for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+	for i := x86.REG_AX; i <= x86.REG_R15; i++ {
 		reg[i] = 0
 	}
-	for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+	for i := x86.REG_X0; i <= x86.REG_X15; i++ {
 		reg[i] = 0
 	}
 
-	for i = 0; i < len(resvd); i++ {
+	for i := 0; i < len(resvd); i++ {
 		reg[resvd[i]]++
 	}
 
@@ -79,9 +77,7 @@ func ginit() {
 }
 
 func gclean() {
-	var i int
-
-	for i = 0; i < len(resvd); i++ {
+	for i := 0; i < len(resvd); i++ {
 		reg[resvd[i]]--
 	}
 	if gc.Nacl {
@@ -91,12 +87,12 @@ func gclean() {
 		reg[x86.REG_BP]--
 	}
 
-	for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+	for i := x86.REG_AX; i <= x86.REG_R15; i++ {
 		if reg[i] != 0 {
 			gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
 		}
 	}
-	for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+	for i := x86.REG_X0; i <= x86.REG_X15; i++ {
 		if reg[i] != 0 {
 			gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
 		}
@@ -104,10 +100,9 @@ func gclean() {
 }
 
 func anyregalloc() bool {
-	var i int
 	var j int
 
-	for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+	for i := x86.REG_AX; i <= x86.REG_R15; i++ {
 		if reg[i] == 0 {
 			goto ok
 		}
@@ -131,14 +126,12 @@ var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
  * caller must regfree(n).
  */
 func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
-	var i int
-	var et int
-
 	if t == nil {
 		gc.Fatal("regalloc: t nil")
 	}
-	et = int(gc.Simtype[t.Etype])
+	et := int(gc.Simtype[t.Etype])
 
+	var i int
 	switch et {
 	case gc.TINT8,
 		gc.TUINT8,
@@ -166,7 +159,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 		}
 
 		gc.Flusherrors()
-		for i = 0; i+x86.REG_AX <= x86.REG_R15; i++ {
+		for i := 0; i+x86.REG_AX <= x86.REG_R15; i++ {
 			fmt.Printf("%d %p\n", i, regpc[i])
 		}
 		gc.Fatal("out of fixed registers")
@@ -202,15 +195,13 @@ out:
 }
 
 func regfree(n *gc.Node) {
-	var i int
-
 	if n.Op == gc.ONAME {
 		return
 	}
 	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
 		gc.Fatal("regfree: not a register")
 	}
-	i = int(n.Val.U.Reg)
+	i := int(n.Val.U.Reg)
 	if i == x86.REG_SP {
 		return
 	}
@@ -252,7 +243,6 @@ func gconreg(as int, c int64, reg int) {
  */
 func ginscon(as int, c int64, n2 *gc.Node) {
 	var n1 gc.Node
-	var ntmp gc.Node
 
 	switch as {
 	case x86.AADDL,
@@ -267,6 +257,7 @@ func ginscon(as int, c int64, n2 *gc.Node) {
 	if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
 		// cannot have 64-bit immediate in ADD, etc.
 		// instead, MOV into register first.
+		var ntmp gc.Node
 		regalloc(&ntmp, gc.Types[gc.TINT64], nil)
 
 		gins(x86.AMOVQ, &n1, &ntmp)
@@ -309,27 +300,13 @@ func bignodes() {
  * hard part is conversions.
  */
 func gmove(f *gc.Node, t *gc.Node) {
-	var a int
-	var ft int
-	var tt int
-	var cvt *gc.Type
-	var r1 gc.Node
-	var r2 gc.Node
-	var r3 gc.Node
-	var r4 gc.Node
-	var zero gc.Node
-	var one gc.Node
-	var con gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	if gc.Debug['M'] != 0 {
 		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
 	}
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
-	cvt = t.Type
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
 
 	if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
 		gc.Complexmove(f, t)
@@ -337,12 +314,15 @@ func gmove(f *gc.Node, t *gc.Node) {
 	}
 
 	// cannot have two memory operands
+	var r1 gc.Node
+	var a int
 	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
 	// convert constant to desired type
 	if f.Op == gc.OLITERAL {
+		var con gc.Node
 		gc.Convconst(&con, t.Type, &f.Val)
 		f = &con
 		ft = tt // so big switch will choose a simple mov
@@ -550,21 +530,25 @@ func gmove(f *gc.Node, t *gc.Node) {
 	//	otherwise, subtract 2^63, convert, and add it back.
 	case gc.TFLOAT32<<16 | gc.TUINT64,
 		gc.TFLOAT64<<16 | gc.TUINT64:
-		a = x86.ACVTTSS2SQ
+		a := x86.ACVTTSS2SQ
 
 		if ft == gc.TFLOAT64 {
 			a = x86.ACVTTSD2SQ
 		}
 		bignodes()
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[ft], nil)
+		var r2 gc.Node
 		regalloc(&r2, gc.Types[tt], t)
+		var r3 gc.Node
 		regalloc(&r3, gc.Types[ft], nil)
+		var r4 gc.Node
 		regalloc(&r4, gc.Types[tt], nil)
 		gins(optoas(gc.OAS, f.Type), f, &r1)
 		gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
-		p1 = gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
 		gins(a, &r1, &r2)
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		gins(optoas(gc.OAS, f.Type), &bigf, &r3)
 		gins(optoas(gc.OSUB, f.Type), &r3, &r1)
@@ -624,22 +608,28 @@ func gmove(f *gc.Node, t *gc.Node) {
 	//	otherwise, halve (rounding to odd?), convert, and double.
 	case gc.TUINT64<<16 | gc.TFLOAT32,
 		gc.TUINT64<<16 | gc.TFLOAT64:
-		a = x86.ACVTSQ2SS
+		a := x86.ACVTSQ2SS
 
 		if tt == gc.TFLOAT64 {
 			a = x86.ACVTSQ2SD
 		}
+		var zero gc.Node
 		gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
+		var one gc.Node
 		gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
+		var r1 gc.Node
 		regalloc(&r1, f.Type, f)
+		var r2 gc.Node
 		regalloc(&r2, t.Type, t)
+		var r3 gc.Node
 		regalloc(&r3, f.Type, nil)
+		var r4 gc.Node
 		regalloc(&r4, f.Type, nil)
 		gmove(f, &r1)
 		gins(x86.ACMPQ, &r1, &zero)
-		p1 = gc.Gbranch(x86.AJLT, nil, +1)
+		p1 := gc.Gbranch(x86.AJLT, nil, +1)
 		gins(a, &r1, &r2)
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		gmove(&r1, &r3)
 		gins(x86.ASHRQ, &one, &r3)
@@ -717,13 +707,8 @@ func samaddr(f *gc.Node, t *gc.Node) bool {
  *	as f, t
  */
 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
-	var w int32
-	var p *obj.Prog
 	//	Node nod;
 
-	var af obj.Addr
-	var at obj.Addr
-
 	//	if(f != N && f->op == OINDEX) {
 	//		regalloc(&nod, &regnode, Z);
 	//		v = constnode.vconst;
@@ -758,15 +743,15 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 		}
 	}
 
-	af = obj.Addr{}
-	at = obj.Addr{}
+	af := obj.Addr{}
+	at := obj.Addr{}
 	if f != nil {
 		gc.Naddr(f, &af, 1)
 	}
 	if t != nil {
 		gc.Naddr(t, &at, 1)
 	}
-	p = gc.Prog(as)
+	p := gc.Prog(as)
 	if f != nil {
 		p.From = af
 	}
@@ -777,7 +762,7 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 		fmt.Printf("%v\n", p)
 	}
 
-	w = 0
+	w := int32(0)
 	switch as {
 	case x86.AMOVB:
 		w = 1
@@ -806,8 +791,6 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 }
 
 func fixlargeoffset(n *gc.Node) {
-	var a gc.Node
-
 	if n == nil {
 		return
 	}
@@ -819,7 +802,7 @@ func fixlargeoffset(n *gc.Node) {
 	}
 	if n.Xoffset != int64(int32(n.Xoffset)) {
 		// offset too large, add to register instead.
-		a = *n
+		a := *n
 
 		a.Op = gc.OREGISTER
 		a.Type = gc.Types[gc.Tptr]
@@ -834,13 +817,11 @@ func fixlargeoffset(n *gc.Node) {
  * return Axxx for Oxxx on type t.
  */
 func optoas(op int, t *gc.Type) int {
-	var a int
-
 	if t == nil {
 		gc.Fatal("optoas: t is nil")
 	}
 
-	a = obj.AXXX
+	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
@@ -1362,35 +1343,24 @@ func sudoclean() {
  * to release the register used for a.
  */
 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
-	var o int
-	var i int
-	var oary [10]int64
-	var v int64
-	var w int64
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var n4 gc.Node
-	var nn *gc.Node
-	var l *gc.Node
-	var r *gc.Node
-	var reg *gc.Node
-	var reg1 *gc.Node
-	var p1 *obj.Prog
-	var t *gc.Type
-
 	if n.Type == nil {
 		return false
 	}
 
 	*a = obj.Addr{}
 
+	var o int
+	var n1 gc.Node
+	var oary [10]int64
+	var nn *gc.Node
+	var reg *gc.Node
+	var reg1 *gc.Node
 	switch n.Op {
 	case gc.OLITERAL:
 		if !gc.Isconst(n, gc.CTINT) {
 			break
 		}
-		v = gc.Mpgetfix(n.Val.U.Xval)
+		v := gc.Mpgetfix(n.Val.U.Xval)
 		if v >= 32000 || v <= -32000 {
 			break
 		}
@@ -1400,20 +1370,13 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
 		gc.ODOTPTR:
 		cleani += 2
 		reg = &clean[cleani-1]
-		reg1 = &clean[cleani-2]
+		reg1 := &clean[cleani-2]
 		reg.Op = gc.OEMPTY
 		reg1.Op = gc.OEMPTY
 		goto odot
 
 	case gc.OINDEX:
 		return false
-
-		// disabled: OINDEX case is now covered by agenr
-		// for a more suitable register allocation pattern.
-		if n.Left.Type.Etype == gc.TSTRING {
-			return false
-		}
-		goto oindex
 	}
 
 	return false
@@ -1474,7 +1437,7 @@ odot:
 
 	if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
 		// directly addressable set of DOTs
-		n1 = *nn
+		n1 := *nn
 
 		n1.Type = n.Type
 		n1.Xoffset += oary[0]
@@ -1494,7 +1457,7 @@ odot:
 		n1.Xoffset = -(oary[0] + 1)
 	}
 
-	for i = 1; i < o; i++ {
+	for i := 1; i < o; i++ {
 		if oary[i] >= 0 {
 			gc.Fatal("can't happen")
 		}
@@ -1509,240 +1472,6 @@ odot:
 	gc.Naddr(&n1, a, 1)
 	goto yes
 
-oindex:
-	l = n.Left
-	r = n.Right
-	if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
-		return false
-	}
-
-	// set o to type of array
-	o = 0
-
-	if gc.Isptr[l.Type.Etype] != 0 {
-		gc.Fatal("ptr ary")
-	}
-	if l.Type.Etype != gc.TARRAY {
-		gc.Fatal("not ary")
-	}
-	if l.Type.Bound < 0 {
-		o |= ODynam
-	}
-
-	w = n.Type.Width
-	if gc.Isconst(r, gc.CTINT) {
-		goto oindex_const
-	}
-
-	switch w {
-	default:
-		return false
-
-	case 1,
-		2,
-		4,
-		8:
-		break
-	}
-
-	cleani += 2
-	reg = &clean[cleani-1]
-	reg1 = &clean[cleani-2]
-	reg.Op = gc.OEMPTY
-	reg1.Op = gc.OEMPTY
-
-	// load the array (reg)
-	if l.Ullman > r.Ullman {
-		if xgen(l, reg, o) {
-			o |= OAddable
-		}
-	}
-
-	// load the index (reg1)
-	t = gc.Types[gc.TUINT64]
-
-	if gc.Issigned[r.Type.Etype] != 0 {
-		t = gc.Types[gc.TINT64]
-	}
-	regalloc(reg1, t, nil)
-	regalloc(&n3, r.Type, reg1)
-	cgen(r, &n3)
-	gmove(&n3, reg1)
-	regfree(&n3)
-
-	// load the array (reg)
-	if l.Ullman <= r.Ullman {
-		if xgen(l, reg, o) {
-			o |= OAddable
-		}
-	}
-
-	// check bounds
-	if gc.Debug['B'] == 0 && !n.Bounded {
-		// check bounds
-		n4.Op = gc.OXXX
-
-		t = gc.Types[gc.Simtype[gc.TUINT]]
-		if o&ODynam != 0 {
-			if o&OAddable != 0 {
-				n2 = *l
-				n2.Xoffset += int64(gc.Array_nel)
-				n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
-			} else {
-				n2 = *reg
-				n2.Xoffset = int64(gc.Array_nel)
-				n2.Op = gc.OINDREG
-				n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
-			}
-		} else {
-			if gc.Is64(r.Type) {
-				t = gc.Types[gc.TUINT64]
-			}
-			gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
-		}
-
-		gins(optoas(gc.OCMP, t), reg1, &n2)
-		p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
-		if n4.Op != gc.OXXX {
-			regfree(&n4)
-		}
-		ginscall(gc.Panicindex, -1)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	if o&ODynam != 0 {
-		if o&OAddable != 0 {
-			n2 = *l
-			n2.Xoffset += int64(gc.Array_array)
-			n2.Type = gc.Types[gc.Tptr]
-			gmove(&n2, reg)
-		} else {
-			n2 = *reg
-			n2.Op = gc.OINDREG
-			n2.Xoffset = int64(gc.Array_array)
-			n2.Type = gc.Types[gc.Tptr]
-			gmove(&n2, reg)
-		}
-	}
-
-	if o&OAddable != 0 {
-		gc.Naddr(reg1, a, 1)
-		a.Offset = 0
-		a.Scale = int8(w)
-		a.Index = a.Reg
-		a.Type = obj.TYPE_MEM
-		a.Reg = reg.Val.U.Reg
-	} else {
-		gc.Naddr(reg1, a, 1)
-		a.Offset = 0
-		a.Scale = int8(w)
-		a.Index = a.Reg
-		a.Type = obj.TYPE_MEM
-		a.Reg = reg.Val.U.Reg
-	}
-
-	goto yes
-
-	// index is constant
-	// can check statically and
-	// can multiply by width statically
-
-oindex_const:
-	v = gc.Mpgetfix(r.Val.U.Xval)
-
-	if sudoaddable(as, l, a) {
-		goto oindex_const_sudo
-	}
-
-	cleani += 2
-	reg = &clean[cleani-1]
-	reg1 = &clean[cleani-2]
-	reg.Op = gc.OEMPTY
-	reg1.Op = gc.OEMPTY
-
-	if o&ODynam != 0 {
-		regalloc(reg, gc.Types[gc.Tptr], nil)
-		agen(l, reg)
-
-		if gc.Debug['B'] == 0 && !n.Bounded {
-			n1 = *reg
-			n1.Op = gc.OINDREG
-			n1.Type = gc.Types[gc.Tptr]
-			n1.Xoffset = int64(gc.Array_nel)
-			gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
-			gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
-			ginscall(gc.Panicindex, -1)
-			gc.Patch(p1, gc.Pc)
-		}
-
-		n1 = *reg
-		n1.Op = gc.OINDREG
-		n1.Type = gc.Types[gc.Tptr]
-		n1.Xoffset = int64(gc.Array_array)
-		gmove(&n1, reg)
-
-		n2 = *reg
-		n2.Op = gc.OINDREG
-		n2.Xoffset = v * w
-		fixlargeoffset(&n2)
-		a.Type = obj.TYPE_NONE
-		a.Index = obj.TYPE_NONE
-		gc.Naddr(&n2, a, 1)
-		goto yes
-	}
-
-	igen(l, &n1, nil)
-	if n1.Op == gc.OINDREG {
-		*reg = n1
-		reg.Op = gc.OREGISTER
-	}
-
-	n1.Xoffset += v * w
-	fixlargeoffset(&n1)
-	a.Type = obj.TYPE_NONE
-	a.Index = obj.TYPE_NONE
-	gc.Naddr(&n1, a, 1)
-	goto yes
-
-oindex_const_sudo:
-	if o&ODynam == 0 {
-		// array indexed by a constant
-		a.Offset += v * w
-
-		goto yes
-	}
-
-	// slice indexed by a constant
-	if gc.Debug['B'] == 0 && !n.Bounded {
-		a.Offset += int64(gc.Array_nel)
-		gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
-		p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
-		p1.From = *a
-		p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
-		ginscall(gc.Panicindex, -1)
-		gc.Patch(p1, gc.Pc)
-		a.Offset -= int64(gc.Array_nel)
-	}
-
-	a.Offset += int64(gc.Array_array)
-	reg = &clean[cleani-1]
-	if reg.Op == gc.OEMPTY {
-		regalloc(reg, gc.Types[gc.Tptr], nil)
-	}
-
-	p1 = gins(movptr, nil, reg)
-	p1.From = *a
-
-	n2 = *reg
-	n2.Op = gc.OINDREG
-	n2.Xoffset = v * w
-	fixlargeoffset(&n2)
-	a.Type = obj.TYPE_NONE
-	a.Index = obj.TYPE_NONE
-	gc.Naddr(&n2, a, 1)
-	goto yes
-
 yes:
 	return true
 
diff --git a/src/cmd/6g/peep.go b/src/cmd/6g/peep.go
index 9870ca5e4e792ebf0a6c2aa0d29c712342f7bde2..ed582d7befdb9fc00ee154c48c01a94f699736e6 100644
--- a/src/cmd/6g/peep.go
+++ b/src/cmd/6g/peep.go
@@ -62,10 +62,9 @@ func needc(p *obj.Prog) bool {
 }
 
 func rnops(r *gc.Flow) *gc.Flow {
-	var p *obj.Prog
-	var r1 *gc.Flow
-
 	if r != nil {
+		var p *obj.Prog
+		var r1 *gc.Flow
 		for {
 			p = r.Prog
 			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
@@ -83,14 +82,7 @@ func rnops(r *gc.Flow) *gc.Flow {
 }
 
 func peep(firstp *obj.Prog) {
-	var r *gc.Flow
-	var r1 *gc.Flow
-	var g *gc.Graph
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var t int
-
-	g = gc.Flowstart(firstp, nil)
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
 	if g == nil {
 		return
 	}
@@ -103,7 +95,8 @@ func peep(firstp *obj.Prog) {
 	// find MOV $con,R followed by
 	// another MOV $con,R without
 	// setting R in the interim
-	for r = g.Start; r != nil; r = r.Link {
+	var p *obj.Prog
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		case x86.ALEAL,
@@ -130,6 +123,10 @@ func peep(firstp *obj.Prog) {
 		}
 	}
 
+	var r *gc.Flow
+	var r1 *gc.Flow
+	var p1 *obj.Prog
+	var t int
 loop1:
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		gc.Dumpit("loop1", g.Start, 0)
@@ -266,7 +263,7 @@ loop1:
 	// can be replaced by MOVAPD, which moves the pair of float64s
 	// instead of just the lower one.  We only use the lower one, but
 	// the processor can do better if we do moves using both.
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		if p.As == x86.AMOVLQZX {
 			if regtyp(&p.From) {
@@ -290,7 +287,7 @@ loop1:
 	// load pipelining
 	// push any load from memory as early as possible
 	// to give it time to complete before use.
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		case x86.AMOVB,
@@ -309,13 +306,10 @@ loop1:
 
 func pushback(r0 *gc.Flow) {
 	var r *gc.Flow
-	var b *gc.Flow
-	var p0 *obj.Prog
 	var p *obj.Prog
-	var t obj.Prog
 
-	b = nil
-	p0 = r0.Prog
+	b := (*gc.Flow)(nil)
+	p0 := (*obj.Prog)(r0.Prog)
 	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
 		p = r.Prog
 		if p.As != obj.ANOP {
@@ -346,7 +340,7 @@ func pushback(r0 *gc.Flow) {
 
 	if gc.Debug['v'] != 0 {
 		fmt.Printf("pushback\n")
-		for r = b; ; r = r.Link {
+		for r := (*gc.Flow)(b); ; r = r.Link {
 			fmt.Printf("\t%v\n", r.Prog)
 			if r == r0 {
 				break
@@ -354,7 +348,7 @@ func pushback(r0 *gc.Flow) {
 		}
 	}
 
-	t = *r0.Prog
+	t := obj.Prog(*r0.Prog)
 	for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
 		p0 = r.Link.Prog
 		p = r.Prog
@@ -376,7 +370,7 @@ func pushback(r0 *gc.Flow) {
 
 	if gc.Debug['v'] != 0 {
 		fmt.Printf("\tafter\n")
-		for r = b; ; r = r.Link {
+		for r := (*gc.Flow)(b); ; r = r.Link {
 			fmt.Printf("\t%v\n", r.Prog)
 			if r == r0 {
 				break
@@ -386,9 +380,7 @@ func pushback(r0 *gc.Flow) {
 }
 
 func excise(r *gc.Flow) {
-	var p *obj.Prog
-
-	p = r.Prog
+	p := (*obj.Prog)(r.Prog)
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		fmt.Printf("%v ===delete===\n", p)
 	}
@@ -414,9 +406,8 @@ func regtyp(a *obj.Addr) bool {
 // seems unnecessary, and it makes the instructions longer.
 func elimshortmov(g *gc.Graph) {
 	var p *obj.Prog
-	var r *gc.Flow
 
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		if regtyp(&p.To) {
 			switch p.As {
@@ -518,10 +509,9 @@ func regconsttyp(a *obj.Addr) bool {
 // is reg guaranteed to be truncated by a previous L instruction?
 func prevl(r0 *gc.Flow, reg int) bool {
 	var p *obj.Prog
-	var r *gc.Flow
 	var info gc.ProgInfo
 
-	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+	for r := (*gc.Flow)(gc.Uniqp(r0)); r != nil; r = gc.Uniqp(r) {
 		p = r.Prog
 		if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
 			proginfo(&info, p)
@@ -552,18 +542,11 @@ func prevl(r0 *gc.Flow, reg int) bool {
  * will be eliminated by copy propagation.
  */
 func subprop(r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var info gc.ProgInfo
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-	var r *gc.Flow
-	var t int
-
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		fmt.Printf("subprop %v\n", r0.Prog)
 	}
-	p = r0.Prog
-	v1 = &p.From
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
 	if !regtyp(v1) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
@@ -571,7 +554,7 @@ func subprop(r0 *gc.Flow) bool {
 		return false
 	}
 
-	v2 = &p.To
+	v2 := (*obj.Addr)(&p.To)
 	if !regtyp(v2) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
@@ -579,6 +562,8 @@ func subprop(r0 *gc.Flow) bool {
 		return false
 	}
 
+	var info gc.ProgInfo
+	var r *gc.Flow
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\t? %v\n", r.Prog)
@@ -652,7 +637,7 @@ gotit:
 		}
 	}
 
-	t = int(v1.Reg)
+	t := int(int(v1.Reg))
 	v1.Reg = v2.Reg
 	v2.Reg = int16(t)
 	if gc.Debug['P'] != 0 {
@@ -674,16 +659,12 @@ gotit:
  *	set v2	return success
  */
 func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		fmt.Printf("copyprop %v\n", r0.Prog)
 	}
-	p = r0.Prog
-	v1 = &p.From
-	v2 = &p.To
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
 	if copyas(v1, v2) {
 		return true
 	}
@@ -692,9 +673,6 @@ func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
 }
 
 func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
-	var t int
-	var p *obj.Prog
-
 	if uint32(r.Active) == gactive {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
@@ -706,6 +684,8 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
 	}
+	var t int
+	var p *obj.Prog
 	for ; r != nil; r = r.S1 {
 		p = r.Prog
 		if gc.Debug['P'] != 0 {
@@ -796,8 +776,6 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
  * 0 otherwise (not touched)
  */
 func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	var info gc.ProgInfo
-
 	switch p.As {
 	case obj.AJMP:
 		if s != nil {
@@ -851,6 +829,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
 	if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
 		return 0
 	}
+	var info gc.ProgInfo
 	proginfo(&info, p)
 
 	if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
@@ -977,10 +956,8 @@ func copyau(a *obj.Addr, v *obj.Addr) bool {
  * return failure to substitute
  */
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
-	var reg int
-
 	if copyas(a, v) {
-		reg = int(s.Reg)
+		reg := int(int(s.Reg))
 		if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
 			if f != 0 {
 				a.Reg = int16(reg)
@@ -991,7 +968,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	}
 
 	if regtyp(v) {
-		reg = int(v.Reg)
+		reg := int(int(v.Reg))
 		if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
 			if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
 				return 1 /* can't use BP-base with index */
@@ -1016,15 +993,12 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 }
 
 func conprop(r0 *gc.Flow) {
-	var r *gc.Flow
 	var p *obj.Prog
-	var p0 *obj.Prog
 	var t int
-	var v0 *obj.Addr
 
-	p0 = r0.Prog
-	v0 = &p0.To
-	r = r0
+	p0 := (*obj.Prog)(r0.Prog)
+	v0 := (*obj.Addr)(&p0.To)
+	r := (*gc.Flow)(r0)
 
 loop:
 	r = gc.Uniqs(r)
diff --git a/src/cmd/6g/reg.go b/src/cmd/6g/reg.go
index 0629a6248d19ef965197fbfa3976ebd39d871545..3c5a69945f917a8e9905d6e279dab417e780c11e 100644
--- a/src/cmd/6g/reg.go
+++ b/src/cmd/6g/reg.go
@@ -85,9 +85,7 @@ func excludedregs() uint64 {
 }
 
 func doregbits(r int) uint64 {
-	var b uint64
-
-	b = 0
+	b := uint64(0)
 	if r >= x86.REG_AX && r <= x86.REG_R15 {
 		b |= RtoB(r)
 	} else if r >= x86.REG_AL && r <= x86.REG_R15B {
diff --git a/src/cmd/8g/cgen.go b/src/cmd/8g/cgen.go
index 9f736b17451c2d2e0463e86d10dc5b104e626f87..d36bef7f01311ac667112ed7c8e08410b594351d 100644
--- a/src/cmd/8g/cgen.go
+++ b/src/cmd/8g/cgen.go
@@ -19,8 +19,6 @@ import "cmd/internal/gc"
  * peep.c
  */
 func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
-	var n2 gc.Node
-
 	n1.Op = gc.OEMPTY
 
 	if n.Addable != 0 {
@@ -34,7 +32,7 @@ func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
 	gc.Tempname(n1, n.Type)
 	cgen(n, n1)
 	if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] != 0 {
-		n2 = *n1
+		n2 := *n1
 		regalloc(n1, n.Type, rg)
 		gmove(&n2, n1)
 	}
@@ -55,17 +53,6 @@ func mfree(n *gc.Node) {
  *	sudoaddable
  */
 func cgen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var nt gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var a int
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\ncgen-n", n)
 		gc.Dump("cgen-res", res)
@@ -85,6 +72,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -95,6 +83,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	case gc.OEFACE:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -110,6 +99,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	// function calls on both sides?  introduce temporary
 	if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		cgen(n, &n1)
 		cgen(&n1, res)
@@ -154,6 +144,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	if n.Addable == 0 && res.Addable == 0 {
 		// could use regalloc here sometimes,
 		// but have to check for ullman >= UINF.
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 
 		cgen(n, &n1)
@@ -164,6 +155,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	// if result is not addressable directly but n is,
 	// compute its address and then store via the address.
 	if res.Addable == 0 {
+		var n1 gc.Node
 		igen(res, &n1, nil)
 		cgen(n, &n1)
 		regfree(&n1)
@@ -180,16 +172,17 @@ func cgen(n *gc.Node, res *gc.Node) {
 	// let's do some computation.
 
 	// use ullman to pick operand to eval first.
-	nl = n.Left
+	nl := n.Left
 
-	nr = n.Right
+	nr := n.Right
 	if nl != nil && nl.Ullman >= gc.UINF {
 		if nr != nil && nr.Ullman >= gc.UINF {
 			// both are hard
+			var n1 gc.Node
 			gc.Tempname(&n1, nl.Type)
 
 			cgen(nl, &n1)
-			n2 = *n
+			n2 := *n
 			n2.Left = &n1
 			cgen(&n2, res)
 			return
@@ -222,6 +215,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		return
 	}
 
+	var a int
 	switch n.Op {
 	default:
 		gc.Dump("cgen", n)
@@ -243,11 +237,11 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OGE,
 		gc.OGT,
 		gc.ONOT:
-		p1 = gc.Gbranch(obj.AJMP, nil, 0)
+		p1 := gc.Gbranch(obj.AJMP, nil, 0)
 
-		p2 = gc.Pc
+		p2 := gc.Pc
 		gmove(gc.Nodbool(true), res)
-		p3 = gc.Gbranch(obj.AJMP, nil, 0)
+		p3 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
 		gmove(gc.Nodbool(false), res)
@@ -293,7 +287,9 @@ func cgen(n *gc.Node, res *gc.Node) {
 			break
 		}
 
+		var n2 gc.Node
 		gc.Tempname(&n2, n.Type)
+		var n1 gc.Node
 		mgen(nl, &n1, res)
 		gmove(&n1, &n2)
 		gmove(&n2, res)
@@ -304,12 +300,14 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OINDEX,
 		gc.OIND,
 		gc.ONAME: // PHEAP or PPARAMREF var
+		var n1 gc.Node
 		igen(n, &n1, res)
 
 		gmove(&n1, res)
 		regfree(&n1)
 
 	case gc.OITAB:
+		var n1 gc.Node
 		igen(nl, &n1, res)
 		n1.Type = gc.Ptrto(gc.Types[gc.TUINTPTR])
 		gmove(&n1, res)
@@ -318,14 +316,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
 		if gc.Isconst(nl, gc.CTSTR) {
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
-			p1 = gins(i386.ALEAL, nil, &n1)
+			p1 := gins(i386.ALEAL, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			gmove(&n1, res)
 			regfree(&n1)
 			break
 		}
 
+		var n1 gc.Node
 		igen(nl, &n1, res)
 		n1.Type = n.Type
 		gmove(&n1, res)
@@ -335,16 +335,18 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map has len in the first 32-bit word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			gc.Tempname(&n1, gc.Types[gc.Tptr])
 
 			cgen(nl, &n1)
+			var n2 gc.Node
 			regalloc(&n2, gc.Types[gc.Tptr], nil)
 			gmove(&n1, &n2)
 			n1 = n2
 
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -360,6 +362,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
+			var n1 gc.Node
 			igen(nl, &n1, res)
 
 			n1.Type = gc.Types[gc.TUINT32]
@@ -375,16 +378,18 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second 32-bit word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			gc.Tempname(&n1, gc.Types[gc.Tptr])
 
 			cgen(nl, &n1)
+			var n2 gc.Node
 			regalloc(&n2, gc.Types[gc.Tptr], nil)
 			gmove(&n1, &n2)
 			n1 = n2
 
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -400,6 +405,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if gc.Isslice(nl.Type) {
+			var n1 gc.Node
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.TUINT32]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -439,14 +445,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 sbop: // symmetric binary
 	if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
-		r = nl
+		r := nl
 		nl = nr
 		nr = r
 	}
 
 abop: // asymmetric binary
 	if gc.Smallintconst(nr) {
+		var n1 gc.Node
 		mgen(nl, &n1, res)
+		var n2 gc.Node
 		regalloc(&n2, nl.Type, &n1)
 		gmove(&n1, &n2)
 		gins(a, nr, &n2)
@@ -454,9 +462,12 @@ abop: // asymmetric binary
 		regfree(&n2)
 		mfree(&n1)
 	} else if nl.Ullman >= nr.Ullman {
+		var nt gc.Node
 		gc.Tempname(&nt, nl.Type)
 		cgen(nl, &nt)
+		var n2 gc.Node
 		mgen(nr, &n2, nil)
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		gmove(&nt, &n1)
 		gins(a, &n2, &n1)
@@ -464,8 +475,10 @@ abop: // asymmetric binary
 		regfree(&n1)
 		mfree(&n2)
 	} else {
+		var n2 gc.Node
 		regalloc(&n2, nr.Type, res)
 		cgen(nr, &n2)
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, nil)
 		cgen(nl, &n1)
 		gins(a, &n2, &n1)
@@ -477,6 +490,7 @@ abop: // asymmetric binary
 	return
 
 uop: // unary
+	var n1 gc.Node
 	gc.Tempname(&n1, nl.Type)
 
 	cgen(nl, &n1)
@@ -491,11 +505,6 @@ uop: // unary
  * returns Prog* to patch to panic call.
  */
 func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
-	var tmp gc.Node
-	var lo gc.Node
-	var hi gc.Node
-	var zero gc.Node
-
 	if !gc.Is64(n.Type) {
 		if n.Addable != 0 {
 			// nothing to do.
@@ -508,8 +517,11 @@ func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
 		return nil
 	}
 
+	var tmp gc.Node
 	gc.Tempname(&tmp, gc.Types[gc.TINT64])
 	cgen(n, &tmp)
+	var lo gc.Node
+	var hi gc.Node
 	split64(&tmp, &lo, &hi)
 	gc.Tempname(res, gc.Types[gc.TUINT32])
 	gmove(&lo, res)
@@ -518,6 +530,7 @@ func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
 		return nil
 	}
 
+	var zero gc.Node
 	gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
 	gins(i386.ACMPL, &hi, &zero)
 	splitclean()
@@ -530,20 +543,6 @@ func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
  * The generated code checks that the result is not nil.
  */
 func agen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var tmp gc.Node
-	var nlen gc.Node
-	var t *gc.Type
-	var w uint32
-	var v uint64
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var bounded bool
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nagen-res", res)
 		gc.Dump("agen-r", n)
@@ -562,10 +561,12 @@ func agen(n *gc.Node, res *gc.Node) {
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
 		// be terribly efficient. See issue 3670.
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 
 		gc.Gvardef(&n1)
 		clearfat(&n1)
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.Tptr], res)
 		gins(i386.ALEAL, &n1, &n2)
 		gmove(&n2, res)
@@ -578,6 +579,7 @@ func agen(n *gc.Node, res *gc.Node) {
 		if n.Op == gc.OREGISTER {
 			gc.Fatal("agen OREGISTER")
 		}
+		var n1 gc.Node
 		regalloc(&n1, gc.Types[gc.Tptr], res)
 		gins(i386.ALEAL, n, &n1)
 		gmove(&n1, res)
@@ -586,9 +588,9 @@ func agen(n *gc.Node, res *gc.Node) {
 	}
 
 	// let's compute
-	nl = n.Left
+	nl := n.Left
 
-	nr = n.Right
+	nr := n.Right
 
 	switch n.Op {
 	default:
@@ -611,19 +613,24 @@ func agen(n *gc.Node, res *gc.Node) {
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_slice(n, &n1)
 		agen(&n1, res)
 
 	case gc.OEFACE:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_eface(n, &n1)
 		agen(&n1, res)
 
 	case gc.OINDEX:
-		p2 = nil // to be patched to panicindex.
-		w = uint32(n.Type.Width)
-		bounded = gc.Debug['B'] != 0 || n.Bounded
+		p2 := (*obj.Prog)(nil) // to be patched to panicindex.
+		w := uint32(n.Type.Width)
+		bounded := gc.Debug['B'] != 0 || n.Bounded
+		var n3 gc.Node
+		var tmp gc.Node
+		var n1 gc.Node
 		if nr.Addable != 0 {
 			// Generate &nl first, and move nr into register.
 			if !gc.Isconst(nl, gc.CTSTR) {
@@ -656,6 +663,7 @@ func agen(n *gc.Node, res *gc.Node) {
 		}
 
 		// For fixed array we really want the pointer in n3.
+		var n2 gc.Node
 		if gc.Isfixedarray(nl.Type) {
 			regalloc(&n2, gc.Types[gc.Tptr], &n3)
 			agen(&n3, &n2)
@@ -673,15 +681,15 @@ func agen(n *gc.Node, res *gc.Node) {
 			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index") // front end should handle
 			}
-			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
 			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				if gc.Debug['B'] == 0 && !n.Bounded {
-					nlen = n3
+					nlen := n3
 					nlen.Type = gc.Types[gc.TUINT32]
 					nlen.Xoffset += int64(gc.Array_nel)
 					gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
 					gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &nlen, &n2)
-					p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
 					ginscall(gc.Panicindex, -1)
 					gc.Patch(p1, gc.Pc)
 				}
@@ -705,7 +713,7 @@ func agen(n *gc.Node, res *gc.Node) {
 		}
 
 		// i is in register n1, extend to 32 bits.
-		t = gc.Types[gc.TUINT32]
+		t := gc.Types[gc.TUINT32]
 
 		if gc.Issigned[n1.Type.Etype] != 0 {
 			t = gc.Types[gc.TINT32]
@@ -717,8 +725,9 @@ func agen(n *gc.Node, res *gc.Node) {
 
 		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
-			t = gc.Types[gc.TUINT32]
+			t := gc.Types[gc.TUINT32]
 
+			var nlen gc.Node
 			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
 			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
@@ -730,7 +739,7 @@ func agen(n *gc.Node, res *gc.Node) {
 			}
 
 			gins(optoas(gc.OCMP, t), &n2, &nlen)
-			p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+			p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
 			if p2 != nil {
 				gc.Patch(p2, gc.Pc)
 			}
@@ -740,7 +749,7 @@ func agen(n *gc.Node, res *gc.Node) {
 
 		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
-			p1 = gins(i386.ALEAL, nil, &n3)
+			p1 := gins(i386.ALEAL, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			p1.From.Scale = 1
 			p1.From.Index = n2.Val.U.Reg
@@ -763,7 +772,7 @@ func agen(n *gc.Node, res *gc.Node) {
 		} else // nothing to do
 		if w == 1 || w == 2 || w == 4 || w == 8 {
 			// LEAL (n3)(n2*w), n3
-			p1 = gins(i386.ALEAL, &n2, &n3)
+			p1 := gins(i386.ALEAL, &n2, &n3)
 
 			p1.From.Scale = int8(w)
 			p1.From.Type = obj.TYPE_MEM
@@ -795,6 +804,7 @@ func agen(n *gc.Node, res *gc.Node) {
 
 		cgen(n.Heapaddr, res)
 		if n.Xoffset != 0 {
+			var n1 gc.Node
 			gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
 			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
 		}
@@ -806,18 +816,20 @@ func agen(n *gc.Node, res *gc.Node) {
 	case gc.ODOT:
 		agen(nl, res)
 		if n.Xoffset != 0 {
+			var n1 gc.Node
 			gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
 			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
 		}
 
 	case gc.ODOTPTR:
-		t = nl.Type
+		t := nl.Type
 		if gc.Isptr[t.Etype] == 0 {
 			gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
 		}
 		cgen(nl, res)
 		gc.Cgen_checknil(res)
 		if n.Xoffset != 0 {
+			var n1 gc.Node
 			gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
 			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
 		}
@@ -834,10 +846,6 @@ func agen(n *gc.Node, res *gc.Node) {
  * The generated code checks that the result is not *nil.
  */
 func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var fp *gc.Type
-	var flist gc.Iter
-	var n1 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nigen-n", n)
 	}
@@ -873,6 +881,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 			gc.OCALLFUNC,
 			gc.OCALLMETH,
 			gc.OCALLINTER:
+			var n1 gc.Node
 			igen(n.Left, &n1, res)
 
 			regalloc(a, gc.Types[gc.Tptr], &n1)
@@ -904,7 +913,8 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 			cgen_callinter(n, nil, 0)
 		}
 
-		fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+		var flist gc.Iter
+		fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
 		*a = gc.Node{}
 		a.Op = gc.OINDREG
 		a.Val.U.Reg = i386.REG_SP
@@ -924,6 +934,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 				if gc.Isptr[n.Left.Type.Etype] == 0 {
 					igen(n.Left, a, res)
 				} else {
+					var n1 gc.Node
 					igen(n.Left, &n1, res)
 					gc.Cgen_checknil(&n1)
 					regalloc(a, gc.Types[gc.Tptr], res)
@@ -946,6 +957,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 	if res != nil && res.Op == gc.OREGISTER {
 		reg[res.Val.U.Reg]--
 	}
+	var n1 gc.Node
 	gc.Tempname(&n1, gc.Types[gc.Tptr])
 	agen(n, &n1)
 	if res != nil && res.Op == gc.OREGISTER {
@@ -962,17 +974,6 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
  *	if(n == true) goto to;
  */
 func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
-	var et int
-	var a int
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var tmp gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nbgen", n)
 	}
@@ -992,7 +993,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		}
 	}
 
-	et = int(n.Type.Etype)
+	et := int(n.Type.Etype)
 	if et != gc.TBOOL {
 		gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
 		gc.Patch(gins(obj.AEND, nil, nil), to)
@@ -1006,8 +1007,8 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		}
 	}
 
-	nl = n.Left
-	nr = nil
+	nl := n.Left
+	nr := (*gc.Node)(nil)
 
 	if nl != nil && gc.Isfloat[nl.Type.Etype] != 0 {
 		bgen_float(n, bool2int(true_), likely, to)
@@ -1029,9 +1030,10 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		if n.Addable == 0 {
 			goto def
 		}
+		var n1 gc.Node
 		gc.Nodconst(&n1, n.Type, 0)
 		gins(optoas(gc.OCMP, n.Type), n, &n1)
-		a = i386.AJNE
+		a := i386.AJNE
 		if !true_ {
 			a = i386.AJEQ
 		}
@@ -1041,8 +1043,8 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 	case gc.OANDAND,
 		gc.OOROR:
 		if (n.Op == gc.OANDAND) == true_ {
-			p1 = gc.Gbranch(obj.AJMP, nil, 0)
-			p2 = gc.Gbranch(obj.AJMP, nil, 0)
+			p1 := gc.Gbranch(obj.AJMP, nil, 0)
+			p2 := gc.Gbranch(obj.AJMP, nil, 0)
 			gc.Patch(p1, gc.Pc)
 			bgen(n.Left, !true_, -likely, p2)
 			bgen(n.Right, !true_, -likely, p2)
@@ -1086,7 +1088,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.OGT,
 		gc.OLE,
 		gc.OGE:
-		a = int(n.Op)
+		a := int(n.Op)
 		if !true_ {
 			a = gc.Brcom(a)
 			true_ = !true_
@@ -1095,7 +1097,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		// make simplest on right
 		if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
 			a = gc.Brrev(a)
-			r = nl
+			r := nl
 			nl = nr
 			nr = r
 		}
@@ -1108,9 +1110,11 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			}
 
 			a = optoas(a, gc.Types[gc.Tptr])
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Xoffset += int64(gc.Array_array)
 			n1.Type = gc.Types[gc.Tptr]
+			var tmp gc.Node
 			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
 			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1126,8 +1130,10 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			}
 
 			a = optoas(a, gc.Types[gc.Tptr])
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Type = gc.Types[gc.Tptr]
+			var tmp gc.Node
 			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
 			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1142,12 +1148,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 
 		if gc.Is64(nr.Type) {
 			if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
+				var n1 gc.Node
 				gc.Tempname(&n1, nl.Type)
 				cgen(nl, &n1)
 				nl = &n1
 			}
 
 			if nr.Addable == 0 {
+				var n2 gc.Node
 				gc.Tempname(&n2, nr.Type)
 				cgen(nr, &n2)
 				nr = &n2
@@ -1157,19 +1165,23 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			break
 		}
 
+		var n2 gc.Node
 		if nr.Ullman >= gc.UINF {
 			if nl.Addable == 0 {
+				var n1 gc.Node
 				gc.Tempname(&n1, nl.Type)
 				cgen(nl, &n1)
 				nl = &n1
 			}
 
 			if nr.Addable == 0 {
+				var tmp gc.Node
 				gc.Tempname(&tmp, nr.Type)
 				cgen(nr, &tmp)
 				nr = &tmp
 			}
 
+			var n2 gc.Node
 			regalloc(&n2, nr.Type, nil)
 			cgen(nr, &n2)
 			nr = &n2
@@ -1177,6 +1189,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		}
 
 		if nl.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, nl.Type)
 			cgen(nl, &n1)
 			nl = &n1
@@ -1189,6 +1202,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		}
 
 		if nr.Addable == 0 {
+			var tmp gc.Node
 			gc.Tempname(&tmp, nr.Type)
 			cgen(nr, &tmp)
 			nr = &tmp
@@ -1211,11 +1225,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 	return
 
 def:
+	var n1 gc.Node
 	regalloc(&n1, n.Type, nil)
 	cgen(n, &n1)
+	var n2 gc.Node
 	gc.Nodconst(&n2, n.Type, 0)
 	gins(optoas(gc.OCMP, n.Type), &n1, &n2)
-	a = i386.AJNE
+	a := i386.AJNE
 	if !true_ {
 		a = i386.AJEQ
 	}
@@ -1230,31 +1246,27 @@ def:
  * return n's offset from SP.
  */
 func stkof(n *gc.Node) int32 {
-	var t *gc.Type
-	var flist gc.Iter
-	var off int32
-
 	switch n.Op {
 	case gc.OINDREG:
 		return int32(n.Xoffset)
 
 	case gc.ODOT:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
 		return int32(int64(off) + n.Xoffset)
 
 	case gc.OINDEX:
-		t = n.Left.Type
+		t := n.Left.Type
 		if !gc.Isfixedarray(t) {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
@@ -1266,11 +1278,12 @@ func stkof(n *gc.Node) int32 {
 	case gc.OCALLMETH,
 		gc.OCALLINTER,
 		gc.OCALLFUNC:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
 
+		var flist gc.Iter
 		t = gc.Structfirst(&flist, gc.Getoutarg(t))
 		if t != nil {
 			return int32(t.Width)
@@ -1287,18 +1300,6 @@ func stkof(n *gc.Node) int32 {
  *	memmove(&res, &n, w);
  */
 func sgen(n *gc.Node, res *gc.Node, w int64) {
-	var dst gc.Node
-	var src gc.Node
-	var tdst gc.Node
-	var tsrc gc.Node
-	var cx gc.Node
-	var c int32
-	var q int32
-	var odst int32
-	var osrc int32
-	var l *gc.NodeList
-	var p *obj.Prog
-
 	if gc.Debug['g'] != 0 {
 		fmt.Printf("\nsgen w=%d\n", w)
 		gc.Dump("r", n)
@@ -1315,6 +1316,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 
 	if w == 0 {
 		// evaluate side effects only.
+		var tdst gc.Node
 		gc.Tempname(&tdst, gc.Types[gc.Tptr])
 
 		agen(res, &tdst)
@@ -1325,7 +1327,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 	// If copying .args, that's all the results, so record definition sites
 	// for them for the liveness analysis.
 	if res.Op == gc.ONAME && res.Sym.Name == ".args" {
-		for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 			if l.N.Class == gc.PPARAMOUT {
 				gc.Gvardef(l.N)
 			}
@@ -1338,15 +1340,16 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 	}
 
 	// offset on the stack
-	osrc = stkof(n)
+	osrc := stkof(n)
 
-	odst = stkof(res)
+	odst := stkof(res)
 
 	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
 		// osrc and odst both on stack, and at least one is in
 		// an unknown position.  Could generate code to test
 		// for forward/backward copy, but instead just copy
 		// to a temporary location first.
+		var tsrc gc.Node
 		gc.Tempname(&tsrc, n.Type)
 
 		sgen(n, &tsrc, w)
@@ -1354,10 +1357,14 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 		return
 	}
 
+	var dst gc.Node
 	gc.Nodreg(&dst, gc.Types[gc.Tptr], i386.REG_DI)
+	var src gc.Node
 	gc.Nodreg(&src, gc.Types[gc.Tptr], i386.REG_SI)
 
+	var tsrc gc.Node
 	gc.Tempname(&tsrc, gc.Types[gc.Tptr])
+	var tdst gc.Node
 	gc.Tempname(&tdst, gc.Types[gc.Tptr])
 	if n.Addable == 0 {
 		agen(n, &tsrc)
@@ -1381,8 +1388,8 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 		gmove(&tdst, &dst)
 	}
 
-	c = int32(w % 4) // bytes
-	q = int32(w / 4) // doublewords
+	c := int32(w % 4) // bytes
+	q := int32(w / 4) // doublewords
 
 	// if we are copying forward on the stack and
 	// the src and dst overlap, then reverse direction
@@ -1423,13 +1430,14 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
 			gins(i386.AREP, nil, nil)   // repeat
 			gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
 		} else if q >= 4 {
-			p = gins(obj.ADUFFCOPY, nil, nil)
+			p := gins(obj.ADUFFCOPY, nil, nil)
 			p.To.Type = obj.TYPE_ADDR
 			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
 
 			// 10 and 128 = magic constants: see ../../runtime/asm_386.s
 			p.To.Offset = 10 * (128 - int64(q))
 		} else if !gc.Nacl && c == 0 {
+			var cx gc.Node
 			gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
 
 			// We don't need the MOVSL side-effect of updating SI and DI,
@@ -1484,23 +1492,16 @@ func cadable(n *gc.Node) bool {
 func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
-	var tmp gc.Node
-	var t *gc.Type
-	var freel int
-	var freer int
-	var fldcount int64
-	var loffset int64
-	var roffset int64
 
-	freel = 0
-	freer = 0
+	freel := 0
+	freer := 0
 
 	switch nl.Type.Etype {
 	default:
 		goto no
 
 	case gc.TARRAY:
-		t = nl.Type
+		t := nl.Type
 
 		// Slices are ok.
 		if gc.Isslice(t) {
@@ -1517,9 +1518,9 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		// Small structs with non-fat types are ok.
 	// Zero-sized structs are treated separately elsewhere.
 	case gc.TSTRUCT:
-		fldcount = 0
+		fldcount := int64(0)
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			if gc.Isfat(t.Type) {
 				goto no
 			}
@@ -1555,6 +1556,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		}
 	} else {
 		// When zeroing, prepare a register containing zero.
+		var tmp gc.Node
 		gc.Nodconst(&tmp, nl.Type, 0)
 
 		regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1576,11 +1578,11 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		t = nl.Type
+		t := nl.Type
 		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
-			for fldcount = 0; fldcount < t.Bound; fldcount++ {
+			for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
 				if nr == nil {
 					gc.Clearslim(&nodl)
 				} else {
@@ -1683,8 +1685,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		loffset = nodl.Xoffset
-		roffset = nodr.Xoffset
+		loffset := nodl.Xoffset
+		roffset := nodr.Xoffset
 
 		// funarg structs may not begin at offset zero.
 		if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1694,7 +1696,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 			roffset -= nr.Type.Type.Width
 		}
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			nodl.Xoffset = loffset + t.Width
 			nodl.Type = t.Type
 
diff --git a/src/cmd/8g/cgen64.go b/src/cmd/8g/cgen64.go
index 1937ae0941a0ee121cea6ebc3ee7fb4310a87dda..0755f0e1b7b8cec725ab39402d4bc80856303440 100644
--- a/src/cmd/8g/cgen64.go
+++ b/src/cmd/8g/cgen64.go
@@ -16,25 +16,6 @@ import "cmd/internal/gc"
  * return 1 on success, 0 if op not handled.
  */
 func cgen64(n *gc.Node, res *gc.Node) {
-	var t1 gc.Node
-	var t2 gc.Node
-	var ax gc.Node
-	var dx gc.Node
-	var cx gc.Node
-	var ex gc.Node
-	var fx gc.Node
-	var l *gc.Node
-	var r *gc.Node
-	var lo1 gc.Node
-	var lo2 gc.Node
-	var hi1 gc.Node
-	var hi2 gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var v uint64
-	var lv uint32
-	var hv uint32
-
 	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
 		gc.Dump("n", n)
 		gc.Dump("res", res)
@@ -47,6 +28,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 	case gc.OMINUS:
 		cgen(n.Left, res)
+		var hi1 gc.Node
+		var lo1 gc.Node
 		split64(res, &lo1, &hi1)
 		gins(i386.ANEGL, nil, &lo1)
 		gins(i386.AADCL, ncon(0), &hi1)
@@ -56,6 +39,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 	case gc.OCOM:
 		cgen(n.Left, res)
+		var lo1 gc.Node
+		var hi1 gc.Node
 		split64(res, &lo1, &hi1)
 		gins(i386.ANOTL, nil, &lo1)
 		gins(i386.ANOTL, nil, &hi1)
@@ -76,27 +61,36 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		break
 	}
 
-	l = n.Left
-	r = n.Right
+	l := n.Left
+	r := n.Right
 	if l.Addable == 0 {
+		var t1 gc.Node
 		gc.Tempname(&t1, l.Type)
 		cgen(l, &t1)
 		l = &t1
 	}
 
 	if r != nil && r.Addable == 0 {
+		var t2 gc.Node
 		gc.Tempname(&t2, r.Type)
 		cgen(r, &t2)
 		r = &t2
 	}
 
+	var ax gc.Node
 	gc.Nodreg(&ax, gc.Types[gc.TINT32], i386.REG_AX)
+	var cx gc.Node
 	gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
+	var dx gc.Node
 	gc.Nodreg(&dx, gc.Types[gc.TINT32], i386.REG_DX)
 
 	// Setup for binary operation.
+	var hi1 gc.Node
+	var lo1 gc.Node
 	split64(l, &lo1, &hi1)
 
+	var lo2 gc.Node
+	var hi2 gc.Node
 	if gc.Is64(r.Type) {
 		split64(r, &lo2, &hi2)
 	}
@@ -121,8 +115,10 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		// let's call the next two EX and FX.
 	case gc.OMUL:
+		var ex gc.Node
 		regalloc(&ex, gc.Types[gc.TPTR32], nil)
 
+		var fx gc.Node
 		regalloc(&fx, gc.Types[gc.TPTR32], nil)
 
 		// load args into DX:AX and EX:CX.
@@ -136,9 +132,9 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		gins(i386.AMOVL, &dx, &fx)
 
 		gins(i386.AORL, &ex, &fx)
-		p1 = gc.Gbranch(i386.AJNE, nil, 0)
+		p1 := gc.Gbranch(i386.AJNE, nil, 0)
 		gins(i386.AMULL, &cx, nil) // implicit &ax
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 
 		// full 64x64 -> 64, from 32x32 -> 64.
@@ -166,7 +162,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 	//	shld hi:lo, c
 	//	shld lo:t, c
 	case gc.OLROT:
-		v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+		v := uint64(gc.Mpgetfix(r.Val.U.Xval))
 
 		if v >= 32 {
 			// reverse during load to do the first 32 bits of rotate
@@ -183,7 +179,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		} else // done
 		{
 			gins(i386.AMOVL, &dx, &cx)
-			p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+			p1 := gins(i386.ASHLL, ncon(uint32(v)), &dx)
 			p1.From.Index = i386.REG_AX // double-width shift
 			p1.From.Scale = 0
 			p1 = gins(i386.ASHLL, ncon(uint32(v)), &ax)
@@ -193,7 +189,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 	case gc.OLSH:
 		if r.Op == gc.OLITERAL {
-			v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
 			if v >= 64 {
 				if gc.Is64(r.Type) {
 					splitclean()
@@ -226,7 +222,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 			gins(i386.AMOVL, &lo1, &ax)
 
 			gins(i386.AMOVL, &hi1, &dx)
-			p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+			p1 := gins(i386.ASHLL, ncon(uint32(v)), &dx)
 			p1.From.Index = i386.REG_AX // double-width shift
 			p1.From.Scale = 0
 			gins(i386.ASHLL, ncon(uint32(v)), &ax)
@@ -240,7 +236,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		// load shift value into register.
 		// if high bits are set, zero value.
-		p1 = nil
+		p1 := (*obj.Prog)(nil)
 
 		if gc.Is64(r.Type) {
 			gins(i386.ACMPL, &hi2, ncon(0))
@@ -254,7 +250,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		// if shift count is >=64, zero value
 		gins(i386.ACMPL, &cx, ncon(64))
 
-		p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
 		if p1 != nil {
 			gc.Patch(p1, gc.Pc)
 		}
@@ -282,7 +278,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 	case gc.ORSH:
 		if r.Op == gc.OLITERAL {
-			v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
 			if v >= 64 {
 				if gc.Is64(r.Type) {
 					splitclean()
@@ -327,7 +323,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 			gins(i386.AMOVL, &lo1, &ax)
 
 			gins(i386.AMOVL, &hi1, &dx)
-			p1 = gins(i386.ASHRL, ncon(uint32(v)), &ax)
+			p1 := gins(i386.ASHRL, ncon(uint32(v)), &ax)
 			p1.From.Index = i386.REG_DX // double-width shift
 			p1.From.Scale = 0
 			gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
@@ -341,7 +337,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		// load shift value into register.
 		// if high bits are set, zero value.
-		p1 = nil
+		p1 := (*obj.Prog)(nil)
 
 		if gc.Is64(r.Type) {
 			gins(i386.ACMPL, &hi2, ncon(0))
@@ -355,7 +351,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
 		// if shift count is >=64, zero or sign-extend value
 		gins(i386.ACMPL, &cx, ncon(64))
 
-		p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
 		if p1 != nil {
 			gc.Patch(p1, gc.Pc)
 		}
@@ -404,9 +400,9 @@ func cgen64(n *gc.Node, res *gc.Node) {
 
 		if lo2.Op == gc.OLITERAL {
 			// special cases for constants.
-			lv = uint32(gc.Mpgetfix(lo2.Val.U.Xval))
+			lv := uint32(gc.Mpgetfix(lo2.Val.U.Xval))
 
-			hv = uint32(gc.Mpgetfix(hi2.Val.U.Xval))
+			hv := uint32(gc.Mpgetfix(hi2.Val.U.Xval))
 			splitclean() // right side
 			split64(res, &lo2, &hi2)
 			switch n.Op {
@@ -518,15 +514,13 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
 	var lo2 gc.Node
 	var hi2 gc.Node
 	var rr gc.Node
-	var br *obj.Prog
-	var t *gc.Type
 
 	split64(nl, &lo1, &hi1)
 	split64(nr, &lo2, &hi2)
 
 	// compare most significant word;
 	// if they differ, we're done.
-	t = hi1.Type
+	t := hi1.Type
 
 	if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
 		gins(i386.ACMPL, &hi1, &hi2)
@@ -537,7 +531,7 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
 		regfree(&rr)
 	}
 
-	br = nil
+	br := (*obj.Prog)(nil)
 	switch op {
 	default:
 		gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
diff --git a/src/cmd/8g/ggen.go b/src/cmd/8g/ggen.go
index f72beda21a3b782fc300c68c5285ffa322f920f7..ca2a79fb7926cc39d05052b09437afff009cc5c1 100644
--- a/src/cmd/8g/ggen.go
+++ b/src/cmd/8g/ggen.go
@@ -11,30 +11,24 @@ import (
 import "cmd/internal/gc"
 
 func defframe(ptxt *obj.Prog) {
-	var frame uint32
-	var ax uint32
-	var p *obj.Prog
-	var lo int64
-	var hi int64
-	var l *gc.NodeList
 	var n *gc.Node
 
 	// fill in argument size, stack size
 	ptxt.To.Type = obj.TYPE_TEXTSIZE
 
 	ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
-	frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
 	ptxt.To.Offset = int64(frame)
 
 	// insert code to zero ambiguously live variables
 	// so that the garbage collector only sees initialized values
 	// when it looks for pointers.
-	p = ptxt
+	p := ptxt
 
-	hi = 0
-	lo = hi
-	ax = 0
-	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+	hi := int64(0)
+	lo := hi
+	ax := uint32(0)
+	for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Needzero == 0 {
 			continue
@@ -66,10 +60,7 @@ func defframe(ptxt *obj.Prog) {
 }
 
 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
-	var cnt int64
-	var i int64
-
-	cnt = hi - lo
+	cnt := hi - lo
 	if cnt == 0 {
 		return p
 	}
@@ -79,7 +70,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Pr
 	}
 
 	if cnt <= int64(4*gc.Widthreg) {
-		for i = 0; i < cnt; i += int64(gc.Widthreg) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
 			p = appendpp(p, i386.AMOVL, obj.TYPE_REG, i386.REG_AX, 0, obj.TYPE_MEM, i386.REG_SP, frame+lo+i)
 		}
 	} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
@@ -97,8 +88,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Pr
 }
 
 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
-	var q *obj.Prog
-	q = gc.Ctxt.NewProg()
+	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
 	q.As = int16(as)
 	q.Lineno = p.Lineno
@@ -114,27 +104,20 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int
 }
 
 func clearfat(nl *gc.Node) {
-	var w uint32
-	var c uint32
-	var q uint32
-	var n1 gc.Node
-	var z gc.Node
-	var p *obj.Prog
-
 	/* clear a fat object */
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nclearfat", nl)
 	}
 
-	w = uint32(nl.Type.Width)
+	w := uint32(nl.Type.Width)
 
 	// Avoid taking the address for simple enough types.
 	if componentgen(nil, nl) {
 		return
 	}
 
-	c = w % 4 // bytes
-	q = w / 4 // quads
+	c := w % 4 // bytes
+	q := w / 4 // quads
 
 	if q < 4 {
 		// Write sequence of MOV 0, off(base) instead of using STOSL.
@@ -143,10 +126,12 @@ func clearfat(nl *gc.Node) {
 		// than the unrolled STOSL loop.
 		// NOTE: Must use agen, not igen, so that optimizer sees address
 		// being taken. We are not writing on field boundaries.
+		var n1 gc.Node
 		regalloc(&n1, gc.Types[gc.Tptr], nil)
 
 		agen(nl, &n1)
 		n1.Op = gc.OINDREG
+		var z gc.Node
 		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
 		for {
 			tmp14 := q
@@ -175,6 +160,7 @@ func clearfat(nl *gc.Node) {
 		return
 	}
 
+	var n1 gc.Node
 	gc.Nodreg(&n1, gc.Types[gc.Tptr], i386.REG_DI)
 	agen(nl, &n1)
 	gconreg(i386.AMOVL, 0, i386.REG_AX)
@@ -184,7 +170,7 @@ func clearfat(nl *gc.Node) {
 		gins(i386.AREP, nil, nil)   // repeat
 		gins(i386.ASTOSL, nil, nil) // STOL AL,*(DI)+
 	} else if q >= 4 {
-		p = gins(obj.ADUFFZERO, nil, nil)
+		p := gins(obj.ADUFFZERO, nil, nil)
 		p.To.Type = obj.TYPE_ADDR
 		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
 
@@ -213,15 +199,8 @@ func clearfat(nl *gc.Node) {
   *	proc=3	normal call to C pointer (not Go func value)
 */
 func ginscall(f *gc.Node, proc int) {
-	var p *obj.Prog
-	var reg gc.Node
-	var r1 gc.Node
-	var con gc.Node
-	var stk gc.Node
-	var extra int32
-
 	if f.Type != nil {
-		extra = 0
+		extra := int32(0)
 		if proc == 1 || proc == 2 {
 			extra = 2 * int32(gc.Widthptr)
 		}
@@ -245,12 +224,13 @@ func ginscall(f *gc.Node, proc int) {
 				// x86 NOP 0x90 is really XCHG AX, AX; use that description
 				// because the NOP pseudo-instruction will be removed by
 				// the linker.
+				var reg gc.Node
 				gc.Nodreg(&reg, gc.Types[gc.TINT], i386.REG_AX)
 
 				gins(i386.AXCHGL, &reg, &reg)
 			}
 
-			p = gins(obj.ACALL, nil, f)
+			p := gins(obj.ACALL, nil, f)
 			gc.Afunclit(&p.To, f)
 			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
@@ -258,7 +238,9 @@ func ginscall(f *gc.Node, proc int) {
 			break
 		}
 
+		var reg gc.Node
 		gc.Nodreg(&reg, gc.Types[gc.Tptr], i386.REG_DX)
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[gc.Tptr], i386.REG_BX)
 		gmove(f, &reg)
 		reg.Op = gc.OINDREG
@@ -271,13 +253,14 @@ func ginscall(f *gc.Node, proc int) {
 
 	case 1, // call in new proc (go)
 		2: // deferred call (defer)
-		stk = gc.Node{}
+		stk := gc.Node{}
 
 		stk.Op = gc.OINDREG
 		stk.Val.U.Reg = i386.REG_SP
 		stk.Xoffset = 0
 
 		// size of arguments at 0(SP)
+		var con gc.Node
 		gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
 
 		gins(i386.AMOVL, &con, &stk)
@@ -293,9 +276,10 @@ func ginscall(f *gc.Node, proc int) {
 			ginscall(gc.Deferproc, 0)
 		}
 		if proc == 2 {
+			var reg gc.Node
 			gc.Nodreg(&reg, gc.Types[gc.TINT32], i386.REG_AX)
 			gins(i386.ATESTL, &reg, &reg)
-			p = gc.Gbranch(i386.AJEQ, nil, +1)
+			p := gc.Gbranch(i386.AJEQ, nil, +1)
 			cgen_ret(nil)
 			gc.Patch(p, gc.Pc)
 		}
@@ -307,20 +291,12 @@ func ginscall(f *gc.Node, proc int) {
  * generate res = n.
  */
 func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
-	var i *gc.Node
-	var f *gc.Node
-	var tmpi gc.Node
-	var nodi gc.Node
-	var nodo gc.Node
-	var nodr gc.Node
-	var nodsp gc.Node
-
-	i = n.Left
+	i := n.Left
 	if i.Op != gc.ODOTINTER {
 		gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
 	}
 
-	f = i.Right // field
+	f := i.Right // field
 	if f.Op != gc.ONAME {
 		gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
 	}
@@ -328,6 +304,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	i = i.Left // interface
 
 	if i.Addable == 0 {
+		var tmpi gc.Node
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -337,8 +314,10 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 
 	// i is now addable, prepare an indirected
 	// register to hold its address.
+	var nodi gc.Node
 	igen(i, &nodi, res) // REG = &inter
 
+	var nodsp gc.Node
 	gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], i386.REG_SP)
 
 	nodsp.Xoffset = 0
@@ -349,6 +328,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	nodi.Xoffset += int64(gc.Widthptr)
 	cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
 
+	var nodo gc.Node
 	regalloc(&nodo, gc.Types[gc.Tptr], res)
 
 	nodi.Type = gc.Types[gc.Tptr]
@@ -356,6 +336,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
 	regfree(&nodi)
 
+	var nodr gc.Node
 	regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
 	if n.Left.Xoffset == gc.BADWIDTH {
 		gc.Fatal("cgen_callinter: badwidth")
@@ -387,14 +368,11 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
  *	proc=2	defer call save away stack
  */
 func cgen_call(n *gc.Node, proc int) {
-	var t *gc.Type
-	var nod gc.Node
-	var afun gc.Node
-
 	if n == nil {
 		return
 	}
 
+	var afun gc.Node
 	if n.Left.Ullman >= gc.UINF {
 		// if name involves a fn call
 		// precompute the address of the fn
@@ -404,10 +382,11 @@ func cgen_call(n *gc.Node, proc int) {
 	}
 
 	gc.Genlist(n.List) // assign the args
-	t = n.Left.Type
+	t := n.Left.Type
 
 	// call tempname pointer
 	if n.Left.Ullman >= gc.UINF {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, &afun)
 		nod.Type = t
@@ -418,6 +397,7 @@ func cgen_call(n *gc.Node, proc int) {
 
 	// call pointer
 	if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, n.Left)
 		nod.Type = t
@@ -438,22 +418,18 @@ func cgen_call(n *gc.Node, proc int) {
  *	res = return value from call.
  */
 func cgen_callret(n *gc.Node, res *gc.Node) {
-	var nod gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_callret: nil")
 	}
 
-	nod = gc.Node{}
+	nod := gc.Node{}
 	nod.Op = gc.OINDREG
 	nod.Val.U.Reg = i386.REG_SP
 	nod.Addable = 1
@@ -469,23 +445,18 @@ func cgen_callret(n *gc.Node, res *gc.Node) {
  *	res = &return value from call.
  */
 func cgen_aret(n *gc.Node, res *gc.Node) {
-	var nod1 gc.Node
-	var nod2 gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if gc.Isptr[t.Etype] != 0 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_aret: nil")
 	}
 
-	nod1 = gc.Node{}
+	nod1 := gc.Node{}
 	nod1.Op = gc.OINDREG
 	nod1.Val.U.Reg = i386.REG_SP
 	nod1.Addable = 1
@@ -494,6 +465,7 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
 	nod1.Type = fp.Type
 
 	if res.Op != gc.OREGISTER {
+		var nod2 gc.Node
 		regalloc(&nod2, gc.Types[gc.Tptr], res)
 		gins(i386.ALEAL, &nod1, &nod2)
 		gins(i386.AMOVL, &nod2, res)
@@ -508,8 +480,6 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
  * n->left is assignments to return values.
  */
 func cgen_ret(n *gc.Node) {
-	var p *obj.Prog
-
 	if n != nil {
 		gc.Genlist(n.List) // copy out args
 	}
@@ -517,7 +487,7 @@ func cgen_ret(n *gc.Node) {
 		ginscall(gc.Deferreturn, 0)
 	}
 	gc.Genlist(gc.Curfn.Exit)
-	p = gins(obj.ARET, nil, nil)
+	p := gins(obj.ARET, nil, nil)
 	if n != nil && n.Op == gc.ORETJMP {
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
@@ -536,19 +506,6 @@ func cgen_ret(n *gc.Node) {
  * according to op.
  */
 func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
-	var check int
-	var n1 gc.Node
-	var t1 gc.Node
-	var t2 gc.Node
-	var t3 gc.Node
-	var t4 gc.Node
-	var n4 gc.Node
-	var nz gc.Node
-	var t *gc.Type
-	var t0 *gc.Type
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will trap.
@@ -557,10 +514,10 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 	// Easiest way to avoid for int8, int16: use int32.
 	// For int32 and int64, use explicit test.
 	// Could use int64 hw for int32.
-	t = nl.Type
+	t := nl.Type
 
-	t0 = t
-	check = 0
+	t0 := t
+	check := 0
 	if gc.Issigned[t.Etype] != 0 {
 		check = 1
 		if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
@@ -579,10 +536,14 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 		check = 0
 	}
 
+	var t1 gc.Node
 	gc.Tempname(&t1, t)
+	var t2 gc.Node
 	gc.Tempname(&t2, t)
 	if t0 != t {
+		var t3 gc.Node
 		gc.Tempname(&t3, t0)
+		var t4 gc.Node
 		gc.Tempname(&t4, t0)
 		cgen(nl, &t3)
 		cgen(nr, &t4)
@@ -596,6 +557,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 		cgen(nr, &t2)
 	}
 
+	var n1 gc.Node
 	if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
 		regalloc(&n1, t, res)
 	} else {
@@ -603,7 +565,8 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 	}
 	gmove(&t2, &n1)
 	gmove(&t1, ax)
-	p2 = nil
+	p2 := (*obj.Prog)(nil)
+	var n4 gc.Node
 	if gc.Nacl {
 		// Native Client does not relay the divide-by-zero trap
 		// to the executing program, so we must insert a check
@@ -611,7 +574,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 		gc.Nodconst(&n4, t, 0)
 
 		gins(optoas(gc.OCMP, t), &n1, &n4)
-		p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
 		if panicdiv == nil {
 			panicdiv = gc.Sysfunc("panicdivide")
 		}
@@ -622,7 +585,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 	if check != 0 {
 		gc.Nodconst(&n4, t, -1)
 		gins(optoas(gc.OCMP, t), &n1, &n4)
-		p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
 		if op == gc.ODIV {
 			// a / (-1) is -a.
 			gins(optoas(gc.OMINUS, t), nil, ax)
@@ -640,6 +603,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 	}
 
 	if gc.Issigned[t.Etype] == 0 {
+		var nz gc.Node
 		gc.Nodconst(&nz, t, 0)
 		gmove(&nz, dx)
 	} else {
@@ -659,9 +623,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
 }
 
 func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
-	var r int
-
-	r = int(reg[dr])
+	r := int(reg[dr])
 	gc.Nodreg(x, gc.Types[gc.TINT32], dr)
 
 	// save current ax and dx if they are live
@@ -691,22 +653,21 @@ func restx(x *gc.Node, oldx *gc.Node) {
  *	res = nl % nr
  */
 func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var ax gc.Node
-	var dx gc.Node
-	var oldax gc.Node
-	var olddx gc.Node
-	var t *gc.Type
-
 	if gc.Is64(nl.Type) {
 		gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
 	}
 
+	var t *gc.Type
 	if gc.Issigned[nl.Type.Etype] != 0 {
 		t = gc.Types[gc.TINT32]
 	} else {
 		t = gc.Types[gc.TUINT32]
 	}
+	var ax gc.Node
+	var oldax gc.Node
 	savex(i386.REG_AX, &ax, &oldax, res, t)
+	var olddx gc.Node
+	var dx gc.Node
 	savex(i386.REG_DX, &dx, &olddx, res, t)
 	dodiv(op, nl, nr, res, &ax, &dx)
 	restx(&dx, &olddx)
@@ -719,33 +680,22 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
  *	res = nl >> nr
  */
 func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-	var nt gc.Node
-	var cx gc.Node
-	var oldcx gc.Node
-	var hi gc.Node
-	var lo gc.Node
-	var a int
-	var w int
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var sc uint64
-
 	if nl.Type.Width > 4 {
 		gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
 	}
 
-	w = int(nl.Type.Width * 8)
+	w := int(nl.Type.Width * 8)
 
-	a = optoas(op, nl.Type)
+	a := optoas(op, nl.Type)
 
 	if nr.Op == gc.OLITERAL {
+		var n2 gc.Node
 		gc.Tempname(&n2, nl.Type)
 		cgen(nl, &n2)
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		gmove(&n2, &n1)
-		sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+		sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
 		if sc >= uint64(nl.Type.Width*8) {
 			// large shift gets 2 shifts by width-1
 			gins(a, ncon(uint32(w)-1), &n1)
@@ -759,13 +709,16 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		return
 	}
 
-	oldcx = gc.Node{}
+	oldcx := gc.Node{}
+	var cx gc.Node
 	gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
 	if reg[i386.REG_CX] > 1 && !gc.Samereg(&cx, res) {
 		gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
 		gmove(&cx, &oldcx)
 	}
 
+	var n1 gc.Node
+	var nt gc.Node
 	if nr.Type.Width > 4 {
 		gc.Tempname(&nt, nr.Type)
 		n1 = nt
@@ -774,6 +727,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
 	}
 
+	var n2 gc.Node
 	if gc.Samereg(&cx, res) {
 		regalloc(&n2, nl.Type, nil)
 	} else {
@@ -794,20 +748,25 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 			gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
 
 			regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+			var lo gc.Node
+			var hi gc.Node
 			split64(&nt, &lo, &hi)
 			gmove(&lo, &n1)
 			splitclean()
 		}
 	} else {
+		var p1 *obj.Prog
 		if nr.Type.Width > 4 {
 			// delayed reg alloc
 			gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
 
 			regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+			var lo gc.Node
+			var hi gc.Node
 			split64(&nt, &lo, &hi)
 			gmove(&lo, &n1)
 			gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
-			p2 = gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
+			p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
 			gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
 			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
 			splitclean()
@@ -845,15 +804,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
  * we do a full-width multiplication and truncate afterwards.
  */
 func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-	var nt gc.Node
-	var tmp *gc.Node
-	var t *gc.Type
-	var a int
-
 	// copy from byte to full registers
-	t = gc.Types[gc.TUINT32]
+	t := gc.Types[gc.TUINT32]
 
 	if gc.Issigned[nl.Type.Etype] != 0 {
 		t = gc.Types[gc.TINT32]
@@ -861,18 +813,21 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 
 	// largest ullman on left.
 	if nl.Ullman < nr.Ullman {
-		tmp = nl
+		tmp := nl
 		nl = nr
 		nr = tmp
 	}
 
+	var nt gc.Node
 	gc.Tempname(&nt, nl.Type)
 	cgen(nl, &nt)
+	var n1 gc.Node
 	regalloc(&n1, t, res)
 	cgen(nr, &n1)
+	var n2 gc.Node
 	regalloc(&n2, t, nil)
 	gmove(&nt, &n2)
-	a = optoas(op, t)
+	a := optoas(op, t)
 	gins(a, &n2, &n1)
 	regfree(&n2)
 	gmove(&n1, res)
@@ -884,15 +839,13 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
  *   res = (nl*nr) >> width
  */
 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var t *gc.Type
-	var a int
 	var n1 gc.Node
 	var n2 gc.Node
 	var ax gc.Node
 	var dx gc.Node
 
-	t = nl.Type
-	a = optoas(gc.OHMUL, t)
+	t := nl.Type
+	a := optoas(gc.OHMUL, t)
 
 	// gen nl in n1.
 	gc.Tempname(&n1, t)
@@ -927,24 +880,17 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
  * generate floating-point operation.
  */
 func cgen_float(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-
-	nl = n.Left
+	nl := n.Left
 	switch n.Op {
 	case gc.OEQ,
 		gc.ONE,
 		gc.OLT,
 		gc.OLE,
 		gc.OGE:
-		p1 = gc.Gbranch(obj.AJMP, nil, 0)
-		p2 = gc.Pc
+		p1 := gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Pc
 		gmove(gc.Nodbool(true), res)
-		p3 = gc.Gbranch(obj.AJMP, nil, 0)
+		p3 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
 		gmove(gc.Nodbool(false), res)
@@ -961,7 +907,9 @@ func cgen_float(n *gc.Node, res *gc.Node) {
 			return
 		}
 
+		var n2 gc.Node
 		gc.Tempname(&n2, n.Type)
+		var n1 gc.Node
 		mgen(nl, &n1, res)
 		gmove(&n1, &n2)
 		gmove(&n2, res)
@@ -980,11 +928,9 @@ func cgen_float(n *gc.Node, res *gc.Node) {
 func cgen_float387(n *gc.Node, res *gc.Node) {
 	var f0 gc.Node
 	var f1 gc.Node
-	var nl *gc.Node
-	var nr *gc.Node
 
-	nl = n.Left
-	nr = n.Right
+	nl := n.Left
+	nr := n.Right
 	gc.Nodreg(&f0, nl.Type, i386.REG_F0)
 	gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
 	if nr != nil {
@@ -1024,16 +970,10 @@ flt2: // binary
 }
 
 func cgen_floatsse(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var nt gc.Node
 	var a int
 
-	nl = n.Left
-	nr = n.Right
+	nl := n.Left
+	nr := n.Right
 	switch n.Op {
 	default:
 		gc.Dump("cgen_floatsse", n)
@@ -1065,16 +1005,19 @@ func cgen_floatsse(n *gc.Node, res *gc.Node) {
 
 sbop: // symmetric binary
 	if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
-		r = nl
+		r := nl
 		nl = nr
 		nr = r
 	}
 
 abop: // asymmetric binary
 	if nl.Ullman >= nr.Ullman {
+		var nt gc.Node
 		gc.Tempname(&nt, nl.Type)
 		cgen(nl, &nt)
+		var n2 gc.Node
 		mgen(nr, &n2, nil)
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		gmove(&nt, &n1)
 		gins(a, &n2, &n1)
@@ -1082,8 +1025,10 @@ abop: // asymmetric binary
 		regfree(&n1)
 		mfree(&n2)
 	} else {
+		var n2 gc.Node
 		regalloc(&n2, nr.Type, res)
 		cgen(nr, &n2)
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, nil)
 		cgen(nl, &n1)
 		gins(a, &n2, &n1)
@@ -1096,29 +1041,14 @@ abop: // asymmetric binary
 }
 
 func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
-	var et int
-	var a int
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var tmp gc.Node
-	var t1 gc.Node
-	var t2 gc.Node
-	var ax gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
-	nl = n.Left
-	nr = n.Right
-	a = int(n.Op)
+	nl := n.Left
+	nr := n.Right
+	a := int(n.Op)
 	if true_ == 0 {
 		// brcom is not valid on floats when NaN is involved.
-		p1 = gc.Gbranch(obj.AJMP, nil, 0)
+		p1 := gc.Gbranch(obj.AJMP, nil, 0)
 
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 
 		// No need to avoid re-genning ninit.
@@ -1129,6 +1059,10 @@ func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
 		return
 	}
 
+	var tmp gc.Node
+	var et int
+	var n2 gc.Node
+	var ax gc.Node
 	if gc.Use_sse != 0 {
 		goto sse
 	} else {
@@ -1139,7 +1073,7 @@ x87:
 	a = gc.Brrev(a) // because the args are stacked
 	if a == gc.OGE || a == gc.OGT {
 		// only < and <= work right with NaN; reverse if needed
-		r = nr
+		r := nr
 
 		nr = nl
 		nl = r
@@ -1169,8 +1103,10 @@ x87:
 		// all the other ops have the same problem.
 		// We need to figure out what the right general
 		// solution is, besides telling people to use float64.
+		var t1 gc.Node
 		gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
 
+		var t2 gc.Node
 		gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
 		cgen(nr, &t1)
 		cgen(nl, &t2)
@@ -1184,12 +1120,14 @@ x87:
 
 sse:
 	if nl.Addable == 0 {
+		var n1 gc.Node
 		gc.Tempname(&n1, nl.Type)
 		cgen(nl, &n1)
 		nl = &n1
 	}
 
 	if nr.Addable == 0 {
+		var tmp gc.Node
 		gc.Tempname(&tmp, nr.Type)
 		cgen(nr, &tmp)
 		nr = &tmp
@@ -1200,6 +1138,7 @@ sse:
 	nr = &n2
 
 	if nl.Op != gc.OREGISTER {
+		var n3 gc.Node
 		regalloc(&n3, nl.Type, nil)
 		gmove(nl, &n3)
 		nl = &n3
@@ -1207,7 +1146,7 @@ sse:
 
 	if a == gc.OGE || a == gc.OGT {
 		// only < and <= work right with NaN; reverse if needed
-		r = nr
+		r := nr
 
 		nr = nl
 		nl = r
@@ -1223,9 +1162,9 @@ sse:
 ret:
 	if a == gc.OEQ {
 		// neither NE nor P
-		p1 = gc.Gbranch(i386.AJNE, nil, -likely)
+		p1 := gc.Gbranch(i386.AJNE, nil, -likely)
 
-		p2 = gc.Gbranch(i386.AJPS, nil, -likely)
+		p2 := gc.Gbranch(i386.AJPS, nil, -likely)
 		gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
 		gc.Patch(p1, gc.Pc)
 		gc.Patch(p2, gc.Pc)
@@ -1242,11 +1181,10 @@ ret:
 // Called after regopt and peep have run.
 // Expand CHECKNIL pseudo-op into actual nil pointer check.
 func expandchecks(firstp *obj.Prog) {
-	var p *obj.Prog
 	var p1 *obj.Prog
 	var p2 *obj.Prog
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if p.As != obj.ACHECKNIL {
 			continue
 		}
diff --git a/src/cmd/8g/gsubr.go b/src/cmd/8g/gsubr.go
index 2728c2a27658dc071a7ff25b239c63bf8a97050a..ac3ad4111cfee33ba78f74400ba90cd0c6f7e561 100644
--- a/src/cmd/8g/gsubr.go
+++ b/src/cmd/8g/gsubr.go
@@ -46,13 +46,11 @@ var unmappedzero uint32 = 4096
  * return Axxx for Oxxx on type t.
  */
 func optoas(op int, t *gc.Type) int {
-	var a int
-
 	if t == nil {
 		gc.Fatal("optoas: t is nil")
 	}
 
-	a = obj.AXXX
+	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
@@ -401,11 +399,8 @@ func optoas(op int, t *gc.Type) int {
 }
 
 func foptoas(op int, t *gc.Type, flg int) int {
-	var et int
-	var a int
-
-	a = obj.AXXX
-	et = int(gc.Simtype[t.Etype])
+	a := obj.AXXX
+	et := int(gc.Simtype[t.Etype])
 
 	if gc.Use_sse != 0 {
 		goto sse
@@ -564,18 +559,16 @@ var resvd = []int{
 }
 
 func ginit() {
-	var i int
-
-	for i = 0; i < len(reg); i++ {
+	for i := 0; i < len(reg); i++ {
 		reg[i] = 1
 	}
-	for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+	for i := i386.REG_AX; i <= i386.REG_DI; i++ {
 		reg[i] = 0
 	}
-	for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+	for i := i386.REG_X0; i <= i386.REG_X7; i++ {
 		reg[i] = 0
 	}
-	for i = 0; i < len(resvd); i++ {
+	for i := 0; i < len(resvd); i++ {
 		reg[resvd[i]]++
 	}
 }
@@ -583,18 +576,16 @@ func ginit() {
 var regpc [i386.MAXREG]uint32
 
 func gclean() {
-	var i int
-
-	for i = 0; i < len(resvd); i++ {
+	for i := 0; i < len(resvd); i++ {
 		reg[resvd[i]]--
 	}
 
-	for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+	for i := i386.REG_AX; i <= i386.REG_DI; i++ {
 		if reg[i] != 0 {
 			gc.Yyerror("reg %v left allocated at %x", gc.Ctxt.Rconv(i), regpc[i])
 		}
 	}
-	for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+	for i := i386.REG_X0; i <= i386.REG_X7; i++ {
 		if reg[i] != 0 {
 			gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
 		}
@@ -602,10 +593,9 @@ func gclean() {
 }
 
 func anyregalloc() bool {
-	var i int
 	var j int
 
-	for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+	for i := i386.REG_AX; i <= i386.REG_DI; i++ {
 		if reg[i] == 0 {
 			goto ok
 		}
@@ -618,7 +608,7 @@ func anyregalloc() bool {
 	ok:
 	}
 
-	for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+	for i := i386.REG_X0; i <= i386.REG_X7; i++ {
 		if reg[i] != 0 {
 			return true
 		}
@@ -632,14 +622,12 @@ func anyregalloc() bool {
  * caller must regfree(n).
  */
 func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
-	var i int
-	var et int
-
 	if t == nil {
 		gc.Fatal("regalloc: t nil")
 	}
-	et = int(gc.Simtype[t.Etype])
+	et := int(gc.Simtype[t.Etype])
 
+	var i int
 	switch et {
 	case gc.TINT64,
 		gc.TUINT64:
@@ -668,7 +656,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 		}
 
 		fmt.Printf("registers allocated at\n")
-		for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+		for i := i386.REG_AX; i <= i386.REG_DI; i++ {
 			fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
 		}
 		gc.Fatal("out of fixed registers")
@@ -694,7 +682,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 			}
 		}
 		fmt.Printf("registers allocated at\n")
-		for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+		for i := i386.REG_X0; i <= i386.REG_X7; i++ {
 			fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
 		}
 		gc.Fatal("out of floating registers")
@@ -723,15 +711,13 @@ out:
 }
 
 func regfree(n *gc.Node) {
-	var i int
-
 	if n.Op == gc.ONAME {
 		return
 	}
 	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
 		gc.Fatal("regfree: not a register")
 	}
-	i = int(n.Val.U.Reg)
+	i := int(n.Val.U.Reg)
 	if i == i386.REG_SP {
 		return
 	}
@@ -764,9 +750,7 @@ func gconreg(as int, c int64, reg int) {
  * swap node contents
  */
 func nswap(a *gc.Node, b *gc.Node) {
-	var t gc.Node
-
-	t = *a
+	t := *a
 	*a = *b
 	*b = t
 }
@@ -794,9 +778,6 @@ var nsclean int
  * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
  */
 func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
-	var n1 gc.Node
-	var i int64
-
 	if !gc.Is64(n.Type) {
 		gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
 	}
@@ -810,6 +791,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
 	default:
 		switch n.Op {
 		default:
+			var n1 gc.Node
 			if !dotaddable(n, &n1) {
 				igen(n, &n1, nil)
 				sclean[nsclean-1] = n1
@@ -819,6 +801,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
 
 		case gc.ONAME:
 			if n.Class == gc.PPARAMREF {
+				var n1 gc.Node
 				cgen(n.Heapaddr, &n1)
 				sclean[nsclean-1] = n1
 				n = &n1
@@ -840,8 +823,9 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
 		hi.Xoffset += 4
 
 	case gc.OLITERAL:
+		var n1 gc.Node
 		gc.Convconst(&n1, n.Type, &n.Val)
-		i = gc.Mpgetfix(n1.Val.U.Xval)
+		i := gc.Mpgetfix(n1.Val.U.Xval)
 		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
 		i >>= 32
 		if n.Type.Etype == gc.TINT64 {
@@ -901,25 +885,13 @@ func memname(n *gc.Node, t *gc.Type) {
 }
 
 func gmove(f *gc.Node, t *gc.Node) {
-	var a int
-	var ft int
-	var tt int
-	var cvt *gc.Type
-	var r1 gc.Node
-	var r2 gc.Node
-	var flo gc.Node
-	var fhi gc.Node
-	var tlo gc.Node
-	var thi gc.Node
-	var con gc.Node
-
 	if gc.Debug['M'] != 0 {
 		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
 	}
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
-	cvt = t.Type
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
 
 	if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
 		gc.Complexmove(f, t)
@@ -933,12 +905,15 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 	// cannot have two integer memory operands;
 	// except 64-bit, which always copies via registers anyway.
+	var r1 gc.Node
+	var a int
 	if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
 	// convert constant to desired type
 	if f.Op == gc.OLITERAL {
+		var con gc.Node
 		gc.Convconst(&con, t.Type, &f.Val)
 		f = &con
 		ft = gc.Simsimtype(con.Type)
@@ -980,8 +955,11 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TUINT64<<16 | gc.TINT8,
 		gc.TINT64<<16 | gc.TUINT8,
 		gc.TUINT64<<16 | gc.TUINT8:
+		var flo gc.Node
+		var fhi gc.Node
 		split64(f, &flo, &fhi)
 
+		var r1 gc.Node
 		gc.Nodreg(&r1, t.Type, i386.REG_AX)
 		gmove(&flo, &r1)
 		gins(i386.AMOVB, &r1, t)
@@ -1006,8 +984,11 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TUINT64<<16 | gc.TINT16,
 		gc.TINT64<<16 | gc.TUINT16,
 		gc.TUINT64<<16 | gc.TUINT16:
+		var flo gc.Node
+		var fhi gc.Node
 		split64(f, &flo, &fhi)
 
+		var r1 gc.Node
 		gc.Nodreg(&r1, t.Type, i386.REG_AX)
 		gmove(&flo, &r1)
 		gins(i386.AMOVW, &r1, t)
@@ -1024,8 +1005,11 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TUINT64<<16 | gc.TINT32,
 		gc.TINT64<<16 | gc.TUINT32,
 		gc.TUINT64<<16 | gc.TUINT32:
+		var fhi gc.Node
+		var flo gc.Node
 		split64(f, &flo, &fhi)
 
+		var r1 gc.Node
 		gc.Nodreg(&r1, t.Type, i386.REG_AX)
 		gmove(&flo, &r1)
 		gins(i386.AMOVL, &r1, t)
@@ -1036,14 +1020,20 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TINT64<<16 | gc.TUINT64,
 		gc.TUINT64<<16 | gc.TINT64,
 		gc.TUINT64<<16 | gc.TUINT64:
+		var fhi gc.Node
+		var flo gc.Node
 		split64(f, &flo, &fhi)
 
+		var tlo gc.Node
+		var thi gc.Node
 		split64(t, &tlo, &thi)
 		if f.Op == gc.OLITERAL {
 			gins(i386.AMOVL, &flo, &tlo)
 			gins(i386.AMOVL, &fhi, &thi)
 		} else {
+			var r1 gc.Node
 			gc.Nodreg(&r1, gc.Types[gc.TUINT32], i386.REG_AX)
+			var r2 gc.Node
 			gc.Nodreg(&r2, gc.Types[gc.TUINT32], i386.REG_DX)
 			gins(i386.AMOVL, &flo, &r1)
 			gins(i386.AMOVL, &fhi, &r2)
@@ -1118,9 +1108,13 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
 		gc.TINT32<<16 | gc.TUINT64:
+		var thi gc.Node
+		var tlo gc.Node
 		split64(t, &tlo, &thi)
 
+		var flo gc.Node
 		gc.Nodreg(&flo, tlo.Type, i386.REG_AX)
+		var fhi gc.Node
 		gc.Nodreg(&fhi, thi.Type, i386.REG_DX)
 		gmove(f, &flo)
 		gins(i386.ACDQ, nil, nil)
@@ -1131,6 +1125,8 @@ func gmove(f *gc.Node, t *gc.Node) {
 
 	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
 		gc.TUINT32<<16 | gc.TUINT64:
+		var tlo gc.Node
+		var thi gc.Node
 		split64(t, &tlo, &thi)
 
 		gmove(f, &tlo)
@@ -1176,27 +1172,10 @@ fatal:
 
 func floatmove(f *gc.Node, t *gc.Node) {
 	var r1 gc.Node
-	var r2 gc.Node
-	var t1 gc.Node
-	var t2 gc.Node
-	var tlo gc.Node
-	var thi gc.Node
-	var con gc.Node
-	var f0 gc.Node
-	var f1 gc.Node
-	var ax gc.Node
-	var dx gc.Node
-	var cx gc.Node
-	var cvt *gc.Type
-	var ft int
-	var tt int
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
-	cvt = t.Type
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
 
 	// cannot have two floating point memory operands.
 	if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) && gc.Ismem(t) {
@@ -1205,6 +1184,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
 
 	// convert constant to desired type
 	if f.Op == gc.OLITERAL {
+		var con gc.Node
 		gc.Convconst(&con, t.Type, &f.Val)
 		f = &con
 		ft = gc.Simsimtype(con.Type)
@@ -1242,6 +1222,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
 			goto hardmem
 		}
 
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
 		if ft == gc.TFLOAT32 {
 			gins(i386.AFMOVF, f, &r1)
@@ -1250,8 +1231,10 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		}
 
 		// set round to zero mode during conversion
+		var t1 gc.Node
 		memname(&t1, gc.Types[gc.TUINT16])
 
+		var t2 gc.Node
 		memname(&t2, gc.Types[gc.TUINT16])
 		gins(i386.AFSTCW, nil, &t1)
 		gins(i386.AMOVW, ncon(0xf7f), &t2)
@@ -1274,8 +1257,11 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		}
 
 		bignodes()
+		var f0 gc.Node
 		gc.Nodreg(&f0, gc.Types[ft], i386.REG_F0)
+		var f1 gc.Node
 		gc.Nodreg(&f1, gc.Types[ft], i386.REG_F0+1)
+		var ax gc.Node
 		gc.Nodreg(&ax, gc.Types[gc.TUINT16], i386.REG_AX)
 
 		if ft == gc.TFLOAT32 {
@@ -1288,15 +1274,17 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		gins(i386.AFMOVD, &zerof, &f0)
 
 		gins(i386.AFUCOMIP, &f0, &f1)
-		p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+		p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
 
 		// if 1<<64 <= v { answer = 0 too }
 		gins(i386.AFMOVD, &two64f, &f0)
 
 		gins(i386.AFUCOMIP, &f0, &f1)
-		p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+		p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
 		gc.Patch(p1, gc.Pc)
 		gins(i386.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
+		var thi gc.Node
+		var tlo gc.Node
 		split64(t, &tlo, &thi)
 		gins(i386.AMOVL, ncon(0), &tlo)
 		gins(i386.AMOVL, ncon(0), &thi)
@@ -1309,8 +1297,10 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		//	otherwise, subtract 2^63, convert, and add it back.
 
 		// set round to zero mode during conversion
+		var t1 gc.Node
 		memname(&t1, gc.Types[gc.TUINT16])
 
+		var t2 gc.Node
 		memname(&t2, gc.Types[gc.TUINT16])
 		gins(i386.AFSTCW, nil, &t1)
 		gins(i386.AMOVW, ncon(0xf7f), &t2)
@@ -1322,7 +1312,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		gins(i386.AFUCOMIP, &f0, &f1)
 		p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
 		gins(i386.AFMOVVP, &f0, t)
-		p3 = gc.Gbranch(obj.AJMP, nil, 0)
+		p3 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p2, gc.Pc)
 		gins(i386.AFMOVD, &two63f, &f0)
 		gins(i386.AFSUBDP, &f0, &f1)
@@ -1346,6 +1336,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		if t.Op == gc.OREGISTER {
 			goto hardmem
 		}
+		var f0 gc.Node
 		gc.Nodreg(&f0, t.Type, i386.REG_F0)
 		gins(i386.AFMOVV, f, &f0)
 		if tt == gc.TFLOAT32 {
@@ -1360,17 +1351,24 @@ func floatmove(f *gc.Node, t *gc.Node) {
 	//	otherwise, halve (rounding to odd?), convert, and double.
 	case gc.TUINT64<<16 | gc.TFLOAT32,
 		gc.TUINT64<<16 | gc.TFLOAT64:
+		var ax gc.Node
 		gc.Nodreg(&ax, gc.Types[gc.TUINT32], i386.REG_AX)
 
+		var dx gc.Node
 		gc.Nodreg(&dx, gc.Types[gc.TUINT32], i386.REG_DX)
+		var cx gc.Node
 		gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
+		var t1 gc.Node
 		gc.Tempname(&t1, f.Type)
+		var tlo gc.Node
+		var thi gc.Node
 		split64(&t1, &tlo, &thi)
 		gmove(f, &t1)
 		gins(i386.ACMPL, &thi, ncon(0))
-		p1 = gc.Gbranch(i386.AJLT, nil, 0)
+		p1 := gc.Gbranch(i386.AJLT, nil, 0)
 
 		// native
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
 
 		gins(i386.AFMOVV, &t1, &r1)
@@ -1379,7 +1377,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		} else {
 			gins(i386.AFMOVDP, &r1, t)
 		}
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
 
 		// simulated
 		gc.Patch(p1, gc.Pc)
@@ -1396,6 +1394,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
 		gmove(&dx, &thi)
 		gmove(&ax, &tlo)
 		gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
+		var r2 gc.Node
 		gc.Nodreg(&r2, gc.Types[tt], i386.REG_F0+1)
 		gins(i386.AFMOVV, &t1, &r1)
 		gins(i386.AFMOVD, &r1, &r1)
@@ -1430,19 +1429,11 @@ hardmem:
 
 func floatmove_387(f *gc.Node, t *gc.Node) {
 	var r1 gc.Node
-	var t1 gc.Node
-	var t2 gc.Node
-	var cvt *gc.Type
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
 	var a int
-	var ft int
-	var tt int
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
-	cvt = t.Type
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
 
 	switch uint32(ft)<<16 | uint32(tt) {
 	default:
@@ -1460,6 +1451,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
 		if t.Op == gc.OREGISTER {
 			goto hardmem
 		}
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
 		if f.Op != gc.OREGISTER {
 			if ft == gc.TFLOAT32 {
@@ -1470,8 +1462,10 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
 		}
 
 		// set round to zero mode during conversion
+		var t1 gc.Node
 		memname(&t1, gc.Types[gc.TUINT16])
 
+		var t2 gc.Node
 		memname(&t2, gc.Types[gc.TUINT16])
 		gins(i386.AFSTCW, nil, &t1)
 		gins(i386.AMOVW, ncon(0xf7f), &t2)
@@ -1493,6 +1487,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
 		gc.TFLOAT64<<16 | gc.TINT8,
 		gc.TFLOAT64<<16 | gc.TUINT16,
 		gc.TFLOAT64<<16 | gc.TUINT8:
+		var t1 gc.Node
 		gc.Tempname(&t1, gc.Types[gc.TINT32])
 
 		gmove(f, &t1)
@@ -1502,10 +1497,10 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
 
 		case gc.TINT8:
 			gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
+			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
 			gins(i386.ACMPL, &t1, ncon(0x7f))
-			p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
-			p3 = gc.Gbranch(obj.AJMP, nil, 0)
+			p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
+			p3 := gc.Gbranch(obj.AJMP, nil, 0)
 			gc.Patch(p1, gc.Pc)
 			gc.Patch(p2, gc.Pc)
 			gmove(ncon(-0x80&(1<<32-1)), &t1)
@@ -1514,14 +1509,14 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
 
 		case gc.TUINT8:
 			gins(i386.ATESTL, ncon(0xffffff00), &t1)
-			p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+			p1 := gc.Gbranch(i386.AJEQ, nil, +1)
 			gins(i386.AMOVL, ncon(0), &t1)
 			gc.Patch(p1, gc.Pc)
 			gmove(&t1, t)
 
 		case gc.TUINT16:
 			gins(i386.ATESTL, ncon(0xffff0000), &t1)
-			p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+			p1 := gc.Gbranch(i386.AJEQ, nil, +1)
 			gins(i386.AMOVL, ncon(0), &t1)
 			gc.Patch(p1, gc.Pc)
 			gmove(&t1, t)
@@ -1640,6 +1635,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
 			goto hard
 		}
 		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+			var r1 gc.Node
 			gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
 			gins(i386.AFMOVFP, f, &r1)
 			gins(i386.AFMOVF, &r1, t)
@@ -1685,11 +1681,9 @@ func floatmove_sse(f *gc.Node, t *gc.Node) {
 	var r1 gc.Node
 	var cvt *gc.Type
 	var a int
-	var ft int
-	var tt int
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
 
 	switch uint32(ft)<<16 | uint32(tt) {
 	// should not happen
@@ -1829,11 +1823,6 @@ func samaddr(f *gc.Node, t *gc.Node) bool {
  *	as f, t
  */
 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
-	var p *obj.Prog
-	var af obj.Addr
-	var at obj.Addr
-	var w int
-
 	if as == i386.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
 		gc.Fatal("gins MOVF reg, reg")
 	}
@@ -1858,15 +1847,15 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 		}
 	}
 
-	af = obj.Addr{}
-	at = obj.Addr{}
+	af := obj.Addr{}
+	at := obj.Addr{}
 	if f != nil {
 		gc.Naddr(f, &af, 1)
 	}
 	if t != nil {
 		gc.Naddr(t, &at, 1)
 	}
-	p = gc.Prog(as)
+	p := gc.Prog(as)
 	if f != nil {
 		p.From = af
 	}
@@ -1877,7 +1866,7 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 		fmt.Printf("%v\n", p)
 	}
 
-	w = 0
+	w := 0
 	switch as {
 	case i386.AMOVB:
 		w = 1
@@ -1903,15 +1892,13 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 }
 
 func dotaddable(n *gc.Node, n1 *gc.Node) bool {
-	var o int
-	var oary [10]int64
-	var nn *gc.Node
-
 	if n.Op != gc.ODOT {
 		return false
 	}
 
-	o = gc.Dotoffset(n, oary[:], &nn)
+	var oary [10]int64
+	var nn *gc.Node
+	o := gc.Dotoffset(n, oary[:], &nn)
 	if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
 		*n1 = *nn
 		n1.Type = n.Type
diff --git a/src/cmd/8g/peep.go b/src/cmd/8g/peep.go
index 0838882e384d203ff02998a7dc9ba05d75b2c1a5..8aa6e94c61cd415fb7c0f11647828db70e98f4c0 100644
--- a/src/cmd/8g/peep.go
+++ b/src/cmd/8g/peep.go
@@ -63,10 +63,9 @@ func needc(p *obj.Prog) bool {
 }
 
 func rnops(r *gc.Flow) *gc.Flow {
-	var p *obj.Prog
-	var r1 *gc.Flow
-
 	if r != nil {
+		var p *obj.Prog
+		var r1 *gc.Flow
 		for {
 			p = r.Prog
 			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
@@ -84,14 +83,7 @@ func rnops(r *gc.Flow) *gc.Flow {
 }
 
 func peep(firstp *obj.Prog) {
-	var r *gc.Flow
-	var r1 *gc.Flow
-	var g *gc.Graph
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var t int
-
-	g = gc.Flowstart(firstp, nil)
+	g := gc.Flowstart(firstp, nil)
 	if g == nil {
 		return
 	}
@@ -104,7 +96,8 @@ func peep(firstp *obj.Prog) {
 	// find MOV $con,R followed by
 	// another MOV $con,R without
 	// setting R in the interim
-	for r = g.Start; r != nil; r = r.Link {
+	var p *obj.Prog
+	for r := g.Start; r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		case i386.ALEAL:
@@ -129,6 +122,10 @@ func peep(firstp *obj.Prog) {
 		}
 	}
 
+	var r1 *gc.Flow
+	var p1 *obj.Prog
+	var r *gc.Flow
+	var t int
 loop1:
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		gc.Dumpit("loop1", g.Start, 0)
@@ -229,7 +226,7 @@ loop1:
 	// can be replaced by MOVAPD, which moves the pair of float64s
 	// instead of just the lower one.  We only use the lower one, but
 	// the processor can do better if we do moves using both.
-	for r = g.Start; r != nil; r = r.Link {
+	for r := g.Start; r != nil; r = r.Link {
 		p = r.Prog
 		if p.As == i386.AMOVSD {
 			if regtyp(&p.From) {
@@ -244,9 +241,7 @@ loop1:
 }
 
 func excise(r *gc.Flow) {
-	var p *obj.Prog
-
-	p = r.Prog
+	p := r.Prog
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		fmt.Printf("%v ===delete===\n", p)
 	}
@@ -269,9 +264,8 @@ func regtyp(a *obj.Addr) bool {
 // causing any trouble.
 func elimshortmov(g *gc.Graph) {
 	var p *obj.Prog
-	var r *gc.Flow
 
-	for r = g.Start; r != nil; r = r.Link {
+	for r := g.Start; r != nil; r = r.Link {
 		p = r.Prog
 		if regtyp(&p.To) {
 			switch p.As {
@@ -367,22 +361,17 @@ func elimshortmov(g *gc.Graph) {
  * will be eliminated by copy propagation.
  */
 func subprop(r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-	var r *gc.Flow
-	var t int
-	var info gc.ProgInfo
-
-	p = r0.Prog
-	v1 = &p.From
+	p := r0.Prog
+	v1 := &p.From
 	if !regtyp(v1) {
 		return false
 	}
-	v2 = &p.To
+	v2 := &p.To
 	if !regtyp(v2) {
 		return false
 	}
+	var info gc.ProgInfo
+	var r *gc.Flow
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\t? %v\n", r.Prog)
@@ -436,7 +425,7 @@ gotit:
 		}
 	}
 
-	t = int(v1.Reg)
+	t := int(v1.Reg)
 	v1.Reg = v2.Reg
 	v2.Reg = int16(t)
 	if gc.Debug['P'] != 0 {
@@ -458,13 +447,9 @@ gotit:
  *	set v2	return success
  */
 func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-
-	p = r0.Prog
-	v1 = &p.From
-	v2 = &p.To
+	p := r0.Prog
+	v1 := &p.From
+	v2 := &p.To
 	if copyas(v1, v2) {
 		return true
 	}
@@ -473,9 +458,6 @@ func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
 }
 
 func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
-	var t int
-	var p *obj.Prog
-
 	if uint32(r.Active) == gactive {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
@@ -487,6 +469,8 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
 	}
+	var t int
+	var p *obj.Prog
 	for ; r != nil; r = r.S1 {
 		p = r.Prog
 		if gc.Debug['P'] != 0 {
@@ -577,8 +561,6 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
  * 0 otherwise (not touched)
  */
 func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	var info gc.ProgInfo
-
 	switch p.As {
 	case obj.AJMP:
 		if s != nil {
@@ -632,6 +614,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
 	if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
 		return 0
 	}
+	var info gc.ProgInfo
 	proginfo(&info, p)
 
 	if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
@@ -747,10 +730,8 @@ func copyau(a *obj.Addr, v *obj.Addr) bool {
  * return failure to substitute
  */
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
-	var reg int
-
 	if copyas(a, v) {
-		reg = int(s.Reg)
+		reg := int(s.Reg)
 		if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
 			if f != 0 {
 				a.Reg = int16(reg)
@@ -761,7 +742,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	}
 
 	if regtyp(v) {
-		reg = int(v.Reg)
+		reg := int(v.Reg)
 		if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
 			if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
 				return 1 /* can't use BP-base with index */
@@ -786,15 +767,12 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 }
 
 func conprop(r0 *gc.Flow) {
-	var r *gc.Flow
 	var p *obj.Prog
-	var p0 *obj.Prog
 	var t int
-	var v0 *obj.Addr
 
-	p0 = r0.Prog
-	v0 = &p0.To
-	r = r0
+	p0 := r0.Prog
+	v0 := &p0.To
+	r := r0
 
 loop:
 	r = gc.Uniqs(r)
diff --git a/src/cmd/8g/reg.go b/src/cmd/8g/reg.go
index 76bd260f54ff0b6d5e5f7bd3ddea405ea5e50a1a..4d4d9a5e1ba67afb2e4fec93219514cec0a27ac1 100644
--- a/src/cmd/8g/reg.go
+++ b/src/cmd/8g/reg.go
@@ -66,9 +66,7 @@ func excludedregs() uint64 {
 }
 
 func doregbits(r int) uint64 {
-	var b uint64
-
-	b = 0
+	b := uint64(0)
 	if r >= i386.REG_AX && r <= i386.REG_DI {
 		b |= RtoB(r)
 	} else if r >= i386.REG_AL && r <= i386.REG_BL {
diff --git a/src/cmd/9g/cgen.go b/src/cmd/9g/cgen.go
index 7a1e96726769db87b1adae525afa5d8d67d2db1d..74accf28e024f4041240aa75b6e1312d1773fa86 100644
--- a/src/cmd/9g/cgen.go
+++ b/src/cmd/9g/cgen.go
@@ -20,24 +20,17 @@ import "cmd/internal/gc"
  * simplifies and calls gmove.
  */
 func cgen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var a int
-	var f int
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var addr obj.Addr
-
 	//print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\ncgen-n", n)
 		gc.Dump("cgen-res", res)
 	}
 
+	var a int
+	var nr *gc.Node
+	var nl *gc.Node
+	var n1 gc.Node
+	var n2 gc.Node
 	if n == nil || n.Type == nil {
 		goto ret
 	}
@@ -57,6 +50,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -67,6 +61,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	case gc.OEFACE:
 		if res.Op != gc.ONAME || res.Addable == 0 {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -81,6 +76,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			gc.Fatal("cgen: this is going to misscompile")
 		}
 		if res.Ullman >= gc.UINF {
+			var n1 gc.Node
 			gc.Tempname(&n1, n.Type)
 			cgen(n, &n1)
 			cgen(&n1, res)
@@ -98,6 +94,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	if res.Addable == 0 {
 		if n.Ullman > res.Ullman {
+			var n1 gc.Node
 			regalloc(&n1, n.Type, res)
 			cgen(n, &n1)
 			if n1.Ullman > res.Ullman {
@@ -111,6 +108,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 			goto ret
 		}
 
+		var f int
 		if res.Ullman >= gc.UINF {
 			goto gen
 		}
@@ -132,9 +130,12 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if gc.Iscomplex[n.Type.Etype] == 0 {
-			a = optoas(gc.OAS, res.Type)
+			a := optoas(gc.OAS, res.Type)
+			var addr obj.Addr
 			if sudoaddable(a, res, &addr) {
+				var p1 *obj.Prog
 				if f != 0 {
+					var n2 gc.Node
 					regalloc(&n2, res.Type, nil)
 					cgen(n, &n2)
 					p1 = gins(a, &n2, nil)
@@ -152,6 +153,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 	gen:
+		var n1 gc.Node
 		igen(res, &n1, nil)
 		cgen(n, &n1)
 		regfree(&n1)
@@ -187,6 +189,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
 			gmove(n, res)
 		} else {
+			var n1 gc.Node
 			regalloc(&n1, n.Type, nil)
 			gmove(n, &n1)
 			cgen(&n1, res)
@@ -201,9 +204,10 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 	if nl != nil && nl.Ullman >= gc.UINF {
 		if nr != nil && nr.Ullman >= gc.UINF {
+			var n1 gc.Node
 			gc.Tempname(&n1, nl.Type)
 			cgen(nl, &n1)
-			n2 = *n
+			n2 := *n
 			n2.Left = &n1
 			cgen(&n2, res)
 			goto ret
@@ -211,14 +215,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 	}
 
 	if gc.Iscomplex[n.Type.Etype] == 0 {
-		a = optoas(gc.OAS, n.Type)
+		a := optoas(gc.OAS, n.Type)
+		var addr obj.Addr
 		if sudoaddable(a, n, &addr) {
 			if res.Op == gc.OREGISTER {
-				p1 = gins(a, nil, res)
+				p1 := gins(a, nil, res)
 				p1.From = addr
 			} else {
+				var n2 gc.Node
 				regalloc(&n2, n.Type, nil)
-				p1 = gins(a, nil, &n2)
+				p1 := gins(a, nil, &n2)
 				p1.From = addr
 				gins(a, &n2, res)
 				regfree(&n2)
@@ -248,11 +254,11 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OGE,
 		gc.OGT,
 		gc.ONOT:
-		p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+		p1 := gc.Gbranch(ppc64.ABR, nil, 0)
 
-		p2 = gc.Pc
+		p2 := gc.Pc
 		gmove(gc.Nodbool(true), res)
-		p3 = gc.Gbranch(ppc64.ABR, nil, 0)
+		p3 := gc.Gbranch(ppc64.ABR, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
 		gmove(gc.Nodbool(false), res)
@@ -265,10 +271,12 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		// unary
 	case gc.OCOM:
-		a = optoas(gc.OXOR, nl.Type)
+		a := optoas(gc.OXOR, nl.Type)
 
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, nil)
 		cgen(nl, &n1)
+		var n2 gc.Node
 		gc.Nodconst(&n2, nl.Type, -1)
 		gins(a, &n2, &n1)
 		gmove(&n1, res)
@@ -315,7 +323,9 @@ func cgen(n *gc.Node, res *gc.Node) {
 				gc.OINDEX,
 				gc.OIND,
 				gc.ONAME:
+				var n1 gc.Node
 				igen(nl, &n1, res)
+				var n2 gc.Node
 				regalloc(&n2, n.Type, res)
 				gmove(&n1, &n2)
 				gmove(&n2, res)
@@ -325,7 +335,9 @@ func cgen(n *gc.Node, res *gc.Node) {
 			}
 		}
 
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
+		var n2 gc.Node
 		regalloc(&n2, n.Type, &n1)
 		cgen(nl, &n1)
 
@@ -343,6 +355,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		gc.OINDEX,
 		gc.OIND,
 		gc.ONAME: // PHEAP or PPARAMREF var
+		var n1 gc.Node
 		igen(n, &n1, res)
 
 		gmove(&n1, res)
@@ -350,6 +363,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 
 		// interface table is first word of interface value
 	case gc.OITAB:
+		var n1 gc.Node
 		igen(nl, &n1, res)
 
 		n1.Type = n.Type
@@ -359,14 +373,16 @@ func cgen(n *gc.Node, res *gc.Node) {
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
 		if gc.Isconst(nl, gc.CTSTR) {
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
-			p1 = gins(ppc64.AMOVD, nil, &n1)
+			p1 := gins(ppc64.AMOVD, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			gmove(&n1, res)
 			regfree(&n1)
 			break
 		}
 
+		var n1 gc.Node
 		igen(nl, &n1, res)
 		n1.Type = n.Type
 		gmove(&n1, res)
@@ -376,13 +392,15 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map and chan have len in the first int-sized word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 
 			cgen(nl, &n1)
 
+			var n2 gc.Node
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -399,6 +417,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			igen(nl, &n1, res)
 
 			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
@@ -414,13 +433,15 @@ func cgen(n *gc.Node, res *gc.Node) {
 		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second int-sized word.
 			// a zero pointer means zero length
+			var n1 gc.Node
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 
 			cgen(nl, &n1)
 
+			var n2 gc.Node
 			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
-			p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
 
 			n2 = n1
 			n2.Op = gc.OINDREG
@@ -436,6 +457,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if gc.Isslice(nl.Type) {
+			var n1 gc.Node
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -475,11 +497,13 @@ func cgen(n *gc.Node, res *gc.Node) {
 		}
 
 		if nl.Ullman >= nr.Ullman {
+			var n1 gc.Node
 			regalloc(&n1, nl.Type, res)
 			cgen(nl, &n1)
 			cgen_div(int(n.Op), &n1, nr, res)
 			regfree(&n1)
 		} else {
+			var n2 gc.Node
 			if !gc.Smallintconst(nr) {
 				regalloc(&n2, nr.Type, res)
 				cgen(nr, &n2)
@@ -519,7 +543,7 @@ func cgen(n *gc.Node, res *gc.Node) {
 	 */
 sbop: // symmetric binary
 	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
-		r = nl
+		r := nl
 		nl = nr
 		nr = r
 	}
@@ -606,8 +630,6 @@ ret:
  * The caller must call regfree(a).
  */
 func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("cgenr-n", n)
 	}
@@ -630,6 +652,7 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.OCALLFUNC,
 		gc.OCALLMETH,
 		gc.OCALLINTER:
+		var n1 gc.Node
 		igen(n, &n1, res)
 		regalloc(a, gc.Types[gc.Tptr], &n1)
 		gmove(&n1, a)
@@ -648,24 +671,12 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
  * The generated code checks that the result is not nil.
  */
 func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var nr *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var n4 gc.Node
-	var tmp gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var w uint32
-	var v uint64
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("agenr-n", n)
 	}
 
-	nl = n.Left
-	nr = n.Right
+	nl := n.Left
+	nr := n.Right
 
 	switch n.Op {
 	case gc.ODOT,
@@ -673,6 +684,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.OCALLFUNC,
 		gc.OCALLMETH,
 		gc.OCALLINTER:
+		var n1 gc.Node
 		igen(n, &n1, res)
 		regalloc(a, gc.Types[gc.Tptr], &n1)
 		agen(&n1, a)
@@ -683,11 +695,14 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 		gc.Cgen_checknil(a)
 
 	case gc.OINDEX:
-		p2 = nil // to be patched to panicindex.
-		w = uint32(n.Type.Width)
+		p2 := (*obj.Prog)(nil) // to be patched to panicindex.
+		w := uint32(n.Type.Width)
 
 		//bounded = debug['B'] || n->bounded;
+		var n3 gc.Node
+		var n1 gc.Node
 		if nr.Addable != 0 {
+			var tmp gc.Node
 			if !gc.Isconst(nr, gc.CTINT) {
 				gc.Tempname(&tmp, gc.Types[gc.TINT64])
 			}
@@ -701,6 +716,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			}
 		} else if nl.Addable != 0 {
 			if !gc.Isconst(nr, gc.CTINT) {
+				var tmp gc.Node
 				gc.Tempname(&tmp, gc.Types[gc.TINT64])
 				cgen(nr, &tmp)
 				regalloc(&n1, tmp.Type, nil)
@@ -711,6 +727,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 				agenr(nl, &n3, res)
 			}
 		} else {
+			var tmp gc.Node
 			gc.Tempname(&tmp, gc.Types[gc.TINT64])
 			cgen(nr, &tmp)
 			nr = &tmp
@@ -730,18 +747,19 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index")
 			}
-			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
 			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				if gc.Debug['B'] == 0 && !n.Bounded {
 					n1 = n3
 					n1.Op = gc.OINDREG
 					n1.Type = gc.Types[gc.Tptr]
 					n1.Xoffset = int64(gc.Array_nel)
+					var n4 gc.Node
 					regalloc(&n4, n1.Type, nil)
 					gmove(&n1, &n4)
 					ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
 					regfree(&n4)
-					p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
+					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
 					ginscall(gc.Panicindex, 0)
 					gc.Patch(p1, gc.Pc)
 				}
@@ -761,10 +779,12 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			break
 		}
 
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
 		gmove(&n1, &n2)
 		regfree(&n1)
 
+		var n4 gc.Node
 		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
 			if gc.Isconst(nl, gc.CTSTR) {
@@ -781,7 +801,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 					gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
 				} else {
 					regalloc(&n4, gc.Types[gc.TUINT64], nil)
-					p1 = gins(ppc64.AMOVD, nil, &n4)
+					p1 := gins(ppc64.AMOVD, nil, &n4)
 					p1.From.Type = obj.TYPE_CONST
 					p1.From.Offset = nl.Type.Bound
 				}
@@ -791,7 +811,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 			if n4.Op == gc.OREGISTER {
 				regfree(&n4)
 			}
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
 			if p2 != nil {
 				gc.Patch(p2, gc.Pc)
 			}
@@ -801,7 +821,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
 
 		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
-			p1 = gins(ppc64.AMOVD, nil, &n3)
+			p1 := gins(ppc64.AMOVD, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			p1.From.Type = obj.TYPE_ADDR
 		} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
@@ -854,11 +874,6 @@ func ginsadd(as int, off int64, dst *gc.Node) {
  * The generated code checks that the result is not nil.
  */
 func agen(n *gc.Node, res *gc.Node) {
-	var nl *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nagen-res", res)
 		gc.Dump("agen-r", n)
@@ -872,17 +887,20 @@ func agen(n *gc.Node, res *gc.Node) {
 		n = n.Left
 	}
 
+	var nl *gc.Node
 	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
 		// be terribly efficient. See issue 3670.
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 
 		gc.Gvardef(&n1)
 		clearfat(&n1)
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.Tptr], res)
-		n3 = gc.Node{}
+		n3 := gc.Node{}
 		n3.Op = gc.OADDR
 		n3.Left = &n1
 		gins(ppc64.AMOVD, &n3, &n2)
@@ -892,9 +910,10 @@ func agen(n *gc.Node, res *gc.Node) {
 	}
 
 	if n.Addable != 0 {
-		n1 = gc.Node{}
+		n1 := gc.Node{}
 		n1.Op = gc.OADDR
 		n1.Left = n
+		var n2 gc.Node
 		regalloc(&n2, gc.Types[gc.Tptr], res)
 		gins(ppc64.AMOVD, &n1, &n2)
 		gmove(&n2, res)
@@ -928,16 +947,19 @@ func agen(n *gc.Node, res *gc.Node) {
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_slice(n, &n1)
 		agen(&n1, res)
 
 	case gc.OEFACE:
+		var n1 gc.Node
 		gc.Tempname(&n1, n.Type)
 		gc.Cgen_eface(n, &n1)
 		agen(&n1, res)
 
 	case gc.OINDEX:
+		var n1 gc.Node
 		agenr(n, &n1, res)
 		gmove(&n1, res)
 		regfree(&n1)
@@ -991,10 +1013,6 @@ ret:
  * The generated code checks that the result is not *nil.
  */
 func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
-	var fp *gc.Type
-	var flist gc.Iter
-	var n1 gc.Node
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nigen-n", n)
 	}
@@ -1046,7 +1064,8 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 			cgen_callinter(n, nil, 0)
 		}
 
-		fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+		var flist gc.Iter
+		fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
 		*a = gc.Node{}
 		a.Op = gc.OINDREG
 		a.Val.U.Reg = ppc64.REGSP
@@ -1066,6 +1085,7 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
 				if gc.Isptr[n.Left.Type.Etype] == 0 {
 					igen(n.Left, a, res)
 				} else {
+					var n1 gc.Node
 					igen(n.Left, &n1, res)
 					gc.Cgen_checknil(&n1)
 					regalloc(a, gc.Types[gc.Tptr], res)
@@ -1094,19 +1114,6 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
  *	if(n == true) goto to;
  */
 func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
-	var et int
-	var a int
-	var nl *gc.Node
-	var nr *gc.Node
-	var l *gc.Node
-	var r *gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var tmp gc.Node
-	var ll *gc.NodeList
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nbgen", n)
 	}
@@ -1119,6 +1126,9 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.Genlist(n.Ninit)
 	}
 
+	var et int
+	var nl *gc.Node
+	var nr *gc.Node
 	if n.Type == nil {
 		gc.Convlit(&n, gc.Types[gc.TBOOL])
 		if n.Type == nil {
@@ -1144,11 +1154,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 
 	switch n.Op {
 	default:
+		var n1 gc.Node
 		regalloc(&n1, n.Type, nil)
 		cgen(n, &n1)
+		var n2 gc.Node
 		gc.Nodconst(&n2, n.Type, 0)
 		gins(optoas(gc.OCMP, n.Type), &n1, &n2)
-		a = ppc64.ABNE
+		a := ppc64.ABNE
 		if !true_ {
 			a = ppc64.ABEQ
 		}
@@ -1166,8 +1178,8 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 	case gc.OANDAND,
 		gc.OOROR:
 		if (n.Op == gc.OANDAND) == true_ {
-			p1 = gc.Gbranch(obj.AJMP, nil, 0)
-			p2 = gc.Gbranch(obj.AJMP, nil, 0)
+			p1 := gc.Gbranch(obj.AJMP, nil, 0)
+			p2 := gc.Gbranch(obj.AJMP, nil, 0)
 			gc.Patch(p1, gc.Pc)
 			bgen(n.Left, !true_, -likely, p2)
 			bgen(n.Right, !true_, -likely, p2)
@@ -1212,15 +1224,15 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		gc.OGT,
 		gc.OLE,
 		gc.OGE:
-		a = int(n.Op)
+		a := int(n.Op)
 		if !true_ {
 			if gc.Isfloat[nr.Type.Etype] != 0 {
 				// brcom is not valid on floats when NaN is involved.
-				p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+				p1 := gc.Gbranch(ppc64.ABR, nil, 0)
 
-				p2 = gc.Gbranch(ppc64.ABR, nil, 0)
+				p2 := gc.Gbranch(ppc64.ABR, nil, 0)
 				gc.Patch(p1, gc.Pc)
-				ll = n.Ninit // avoid re-genning ninit
+				ll := n.Ninit // avoid re-genning ninit
 				n.Ninit = nil
 				bgen(n, true, -likely, p2)
 				n.Ninit = ll
@@ -1236,7 +1248,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		// make simplest on right
 		if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
 			a = gc.Brrev(a)
-			r = nl
+			r := nl
 			nl = nr
 			nr = r
 		}
@@ -1249,10 +1261,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			}
 
 			a = optoas(a, gc.Types[gc.Tptr])
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Xoffset += int64(gc.Array_array)
 			n1.Type = gc.Types[gc.Tptr]
+			var tmp gc.Node
 			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+			var n2 gc.Node
 			regalloc(&n2, gc.Types[gc.Tptr], &n1)
 			gmove(&n1, &n2)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
@@ -1270,9 +1285,12 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			}
 
 			a = optoas(a, gc.Types[gc.Tptr])
+			var n1 gc.Node
 			igen(nl, &n1, nil)
 			n1.Type = gc.Types[gc.Tptr]
+			var tmp gc.Node
 			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+			var n2 gc.Node
 			regalloc(&n2, gc.Types[gc.Tptr], &n1)
 			gmove(&n1, &n2)
 			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
@@ -1287,10 +1305,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 			break
 		}
 
+		var n1 gc.Node
+		var n2 gc.Node
 		if nr.Ullman >= gc.UINF {
 			regalloc(&n1, nl.Type, nil)
 			cgen(nl, &n1)
 
+			var tmp gc.Node
 			gc.Tempname(&tmp, nl.Type)
 			gmove(&n1, &tmp)
 			regfree(&n1)
@@ -1321,8 +1342,8 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
 		cgen(nr, &n2)
 
 	cmp:
-		l = &n1
-		r = &n2
+		l := &n1
+		r := &n2
 		gins(optoas(gc.OCMP, nr.Type), l, r)
 		if gc.Isfloat[nr.Type.Etype] != 0 && (a == gc.OLE || a == gc.OGE) {
 			// To get NaN right, must rewrite x <= y into separate x < y or x = y.
@@ -1355,31 +1376,27 @@ ret:
  * return n's offset from SP.
  */
 func stkof(n *gc.Node) int64 {
-	var t *gc.Type
-	var flist gc.Iter
-	var off int64
-
 	switch n.Op {
 	case gc.OINDREG:
 		return n.Xoffset
 
 	case gc.ODOT:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
 		return off + n.Xoffset
 
 	case gc.OINDEX:
-		t = n.Left.Type
+		t := n.Left.Type
 		if !gc.Isfixedarray(t) {
 			break
 		}
-		off = stkof(n.Left)
+		off := stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
@@ -1391,11 +1408,12 @@ func stkof(n *gc.Node) int64 {
 	case gc.OCALLMETH,
 		gc.OCALLINTER,
 		gc.OCALLFUNC:
-		t = n.Left.Type
+		t := n.Left.Type
 		if gc.Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
 
+		var flist gc.Iter
 		t = gc.Structfirst(&flist, gc.Getoutarg(t))
 		if t != nil {
 			return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
@@ -1412,19 +1430,6 @@ func stkof(n *gc.Node) int64 {
  *	memmove(&ns, &n, w);
  */
 func sgen(n *gc.Node, ns *gc.Node, w int64) {
-	var dst gc.Node
-	var src gc.Node
-	var tmp gc.Node
-	var nend gc.Node
-	var c int32
-	var odst int32
-	var osrc int32
-	var dir int
-	var align int
-	var op int
-	var p *obj.Prog
-	var ploop *obj.Prog
-	var l *gc.NodeList
 	var res *gc.Node = ns
 
 	if gc.Debug['g'] != 0 {
@@ -1444,7 +1449,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	// If copying .args, that's all the results, so record definition sites
 	// for them for the liveness analysis.
 	if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
-		for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 			if l.N.Class == gc.PPARAMOUT {
 				gc.Gvardef(l.N)
 			}
@@ -1456,6 +1461,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	//	return;
 	if w == 0 {
 		// evaluate side effects only.
+		var dst gc.Node
 		regalloc(&dst, gc.Types[gc.Tptr], nil)
 
 		agen(res, &dst)
@@ -1468,8 +1474,9 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	// want to avoid unaligned access, so have to use
 	// smaller operations for less aligned types.
 	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
-	align = int(n.Type.Align)
+	align := int(n.Type.Align)
 
+	var op int
 	switch align {
 	default:
 		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
@@ -1490,17 +1497,18 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	if w%int64(align) != 0 {
 		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
 	}
-	c = int32(w / int64(align))
+	c := int32(w / int64(align))
 
 	// offset on the stack
-	osrc = int32(stkof(n))
+	osrc := int32(stkof(n))
 
-	odst = int32(stkof(res))
+	odst := int32(stkof(res))
 	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
 		// osrc and odst both on stack, and at least one is in
 		// an unknown position.  Could generate code to test
 		// for forward/backward copy, but instead just copy
 		// to a temporary location first.
+		var tmp gc.Node
 		gc.Tempname(&tmp, n.Type)
 
 		sgen(n, &tmp, w)
@@ -1514,12 +1522,14 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 
 	// if we are copying forward on the stack and
 	// the src and dst overlap, then reverse direction
-	dir = align
+	dir := align
 
 	if osrc < odst && int64(odst) < int64(osrc)+w {
 		dir = -dir
 	}
 
+	var dst gc.Node
+	var src gc.Node
 	if n.Ullman >= res.Ullman {
 		agenr(n, &dst, res) // temporarily use dst
 		regalloc(&src, gc.Types[gc.Tptr], nil)
@@ -1536,19 +1546,20 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 		agenr(n, &src, nil)
 	}
 
+	var tmp gc.Node
 	regalloc(&tmp, gc.Types[gc.Tptr], nil)
 
 	// set up end marker
-	nend = gc.Node{}
+	nend := gc.Node{}
 
 	// move src and dest to the end of block if necessary
 	if dir < 0 {
 		if c >= 4 {
 			regalloc(&nend, gc.Types[gc.Tptr], nil)
-			p = gins(ppc64.AMOVD, &src, &nend)
+			gins(ppc64.AMOVD, &src, &nend)
 		}
 
-		p = gins(ppc64.AADD, nil, &src)
+		p := gins(ppc64.AADD, nil, &src)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = w
 
@@ -1556,7 +1567,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = w
 	} else {
-		p = gins(ppc64.AADD, nil, &src)
+		p := gins(ppc64.AADD, nil, &src)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(-dir)
 
@@ -1566,7 +1577,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 
 		if c >= 4 {
 			regalloc(&nend, gc.Types[gc.Tptr], nil)
-			p = gins(ppc64.AMOVD, &src, &nend)
+			p := gins(ppc64.AMOVD, &src, &nend)
 			p.From.Type = obj.TYPE_ADDR
 			p.From.Offset = w
 		}
@@ -1575,10 +1586,10 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 	// move
 	// TODO: enable duffcopy for larger copies.
 	if c >= 4 {
-		p = gins(op, &src, &tmp)
+		p := gins(op, &src, &tmp)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Offset = int64(dir)
-		ploop = p
+		ploop := p
 
 		p = gins(op, &tmp, &dst)
 		p.To.Type = obj.TYPE_MEM
@@ -1594,6 +1605,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
 		// generate the offsets directly and eliminate the
 		// ADDs.  That will produce shorter, more
 		// pipeline-able code.
+		var p *obj.Prog
 		for {
 			tmp14 := c
 			c--
@@ -1642,23 +1654,16 @@ func cadable(n *gc.Node) bool {
 func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
-	var tmp gc.Node
-	var t *gc.Type
-	var freel int
-	var freer int
-	var fldcount int64
-	var loffset int64
-	var roffset int64
 
-	freel = 0
-	freer = 0
+	freel := 0
+	freer := 0
 
 	switch nl.Type.Etype {
 	default:
 		goto no
 
 	case gc.TARRAY:
-		t = nl.Type
+		t := nl.Type
 
 		// Slices are ok.
 		if gc.Isslice(t) {
@@ -1675,9 +1680,9 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		// Small structs with non-fat types are ok.
 	// Zero-sized structs are treated separately elsewhere.
 	case gc.TSTRUCT:
-		fldcount = 0
+		fldcount := int64(0)
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			if gc.Isfat(t.Type) {
 				goto no
 			}
@@ -1713,6 +1718,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		}
 	} else {
 		// When zeroing, prepare a register containing zero.
+		var tmp gc.Node
 		gc.Nodconst(&tmp, nl.Type, 0)
 
 		regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1734,11 +1740,11 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		t = nl.Type
+		t := nl.Type
 		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
-			for fldcount = 0; fldcount < t.Bound; fldcount++ {
+			for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
 				if nr == nil {
 					gc.Clearslim(&nodl)
 				} else {
@@ -1841,8 +1847,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 		if nl.Op == gc.ONAME {
 			gc.Gvardef(nl)
 		}
-		loffset = nodl.Xoffset
-		roffset = nodr.Xoffset
+		loffset := nodl.Xoffset
+		roffset := nodr.Xoffset
 
 		// funarg structs may not begin at offset zero.
 		if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1852,7 +1858,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) bool {
 			roffset -= nr.Type.Type.Width
 		}
 
-		for t = nl.Type.Type; t != nil; t = t.Down {
+		for t := nl.Type.Type; t != nil; t = t.Down {
 			nodl.Xoffset = loffset + t.Width
 			nodl.Type = t.Type
 
diff --git a/src/cmd/9g/ggen.go b/src/cmd/9g/ggen.go
index 54bebdda406cd6c526d5eaa2903b99e8c5fbaa30..3197e4643db255ed338689cb7e9e823dc47ab0a2 100644
--- a/src/cmd/9g/ggen.go
+++ b/src/cmd/9g/ggen.go
@@ -12,30 +12,25 @@ import (
 import "cmd/internal/gc"
 
 func defframe(ptxt *obj.Prog) {
-	var frame uint32
-	var p *obj.Prog
-	var hi int64
-	var lo int64
-	var l *gc.NodeList
 	var n *gc.Node
 
 	// fill in argument size, stack size
 	ptxt.To.Type = obj.TYPE_TEXTSIZE
 
 	ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
-	frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
 	ptxt.To.Offset = int64(frame)
 
 	// insert code to zero ambiguously live variables
 	// so that the garbage collector only sees initialized values
 	// when it looks for pointers.
-	p = ptxt
+	p := ptxt
 
-	hi = 0
-	lo = hi
+	hi := int64(0)
+	lo := hi
 
 	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+	for l := gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Needzero == 0 {
 			continue
@@ -68,24 +63,19 @@ func defframe(ptxt *obj.Prog) {
 }
 
 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
-	var cnt int64
-	var i int64
-	var p1 *obj.Prog
-	var f *gc.Node
-
-	cnt = hi - lo
+	cnt := hi - lo
 	if cnt == 0 {
 		return p
 	}
 	if cnt < int64(4*gc.Widthptr) {
-		for i = 0; i < cnt; i += int64(gc.Widthptr) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
 			p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
 		}
 	} else if cnt <= int64(128*gc.Widthptr) {
 		p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
 		p.Reg = ppc64.REGSP
 		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
-		f = gc.Sysfunc("duffzero")
+		f := gc.Sysfunc("duffzero")
 		gc.Naddr(f, &p.To, 1)
 		gc.Afunclit(&p.To, f)
 		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
@@ -97,7 +87,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
 		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
 		p.Reg = ppc64.REGRT1
 		p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
-		p1 = p
+		p1 := p
 		p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
 		p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
 		gc.Patch(p, p1)
@@ -107,8 +97,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
 }
 
 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
-	var q *obj.Prog
-	q = gc.Ctxt.NewProg()
+	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
 	q.As = int16(as)
 	q.Lineno = p.Lineno
@@ -129,8 +118,7 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int
  * On power, f must be moved to CTR first.
  */
 func ginsBL(reg *gc.Node, f *gc.Node) {
-	var p *obj.Prog
-	p = gins(ppc64.AMOVD, f, nil)
+	p := gins(ppc64.AMOVD, f, nil)
 	p.To.Type = obj.TYPE_REG
 	p.To.Reg = ppc64.REG_CTR
 	p = gins(ppc64.ABL, reg, nil)
@@ -148,15 +136,8 @@ func ginsBL(reg *gc.Node, f *gc.Node) {
   *	proc=3	normal call to C pointer (not Go func value)
 */
 func ginscall(f *gc.Node, proc int) {
-	var p *obj.Prog
-	var reg gc.Node
-	var con gc.Node
-	var reg2 gc.Node
-	var r1 gc.Node
-	var extra int32
-
 	if f.Type != nil {
-		extra = 0
+		extra := int32(0)
 		if proc == 1 || proc == 2 {
 			extra = 2 * int32(gc.Widthptr)
 		}
@@ -180,12 +161,13 @@ func ginscall(f *gc.Node, proc int) {
 				// The ppc64 NOP is really or r0, r0, r0; use that description
 				// because the NOP pseudo-instruction would be removed by
 				// the linker.
+				var reg gc.Node
 				gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
 
 				gins(ppc64.AOR, &reg, &reg)
 			}
 
-			p = gins(ppc64.ABL, nil, f)
+			p := gins(ppc64.ABL, nil, f)
 			gc.Afunclit(&p.To, f)
 			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
@@ -193,7 +175,9 @@ func ginscall(f *gc.Node, proc int) {
 			break
 		}
 
+		var reg gc.Node
 		gc.Nodreg(&reg, gc.Types[gc.Tptr], ppc64.REGCTXT)
+		var r1 gc.Node
 		gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
 		gmove(f, &reg)
 		reg.Op = gc.OINDREG
@@ -206,14 +190,17 @@ func ginscall(f *gc.Node, proc int) {
 
 	case 1, // call in new proc (go)
 		2: // deferred call (defer)
+		var con gc.Node
 		gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
 
+		var reg gc.Node
 		gc.Nodreg(&reg, gc.Types[gc.TINT64], ppc64.REG_R3)
+		var reg2 gc.Node
 		gc.Nodreg(&reg2, gc.Types[gc.TINT64], ppc64.REG_R4)
 		gmove(f, &reg)
 
 		gmove(&con, &reg2)
-		p = gins(ppc64.AMOVW, &reg2, nil)
+		p := gins(ppc64.AMOVW, &reg2, nil)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = ppc64.REGSP
 		p.To.Offset = 8
@@ -234,7 +221,7 @@ func ginscall(f *gc.Node, proc int) {
 
 		if proc == 2 {
 			gc.Nodreg(&reg, gc.Types[gc.TINT64], ppc64.REG_R3)
-			p = gins(ppc64.ACMP, &reg, nil)
+			p := gins(ppc64.ACMP, &reg, nil)
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = ppc64.REG_R0
 			p = gc.Gbranch(ppc64.ABEQ, nil, +1)
@@ -249,21 +236,12 @@ func ginscall(f *gc.Node, proc int) {
  * generate res = n.
  */
 func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
-	var i *gc.Node
-	var f *gc.Node
-	var tmpi gc.Node
-	var nodi gc.Node
-	var nodo gc.Node
-	var nodr gc.Node
-	var nodsp gc.Node
-	var p *obj.Prog
-
-	i = n.Left
+	i := n.Left
 	if i.Op != gc.ODOTINTER {
 		gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
 	}
 
-	f = i.Right // field
+	f := i.Right // field
 	if f.Op != gc.ONAME {
 		gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
 	}
@@ -271,6 +249,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	i = i.Left // interface
 
 	if i.Addable == 0 {
+		var tmpi gc.Node
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -280,8 +259,10 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 
 	// i is now addable, prepare an indirected
 	// register to hold its address.
+	var nodi gc.Node
 	igen(i, &nodi, res) // REG = &inter
 
+	var nodsp gc.Node
 	gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], ppc64.REGSP)
 
 	nodsp.Xoffset = int64(gc.Widthptr)
@@ -292,6 +273,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	nodi.Xoffset += int64(gc.Widthptr)
 	cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
 
+	var nodo gc.Node
 	regalloc(&nodo, gc.Types[gc.Tptr], res)
 
 	nodi.Type = gc.Types[gc.Tptr]
@@ -299,6 +281,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 	cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
 	regfree(&nodi)
 
+	var nodr gc.Node
 	regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
 	if n.Left.Xoffset == gc.BADWIDTH {
 		gc.Fatal("cgen_callinter: badwidth")
@@ -312,7 +295,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
 		proc = 3
 	} else {
 		// go/defer. generate go func value.
-		p = gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+		p := gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
 		p.From.Type = obj.TYPE_ADDR
 	}
 
@@ -330,14 +313,11 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
  *	proc=2	defer call save away stack
  */
 func cgen_call(n *gc.Node, proc int) {
-	var t *gc.Type
-	var nod gc.Node
-	var afun gc.Node
-
 	if n == nil {
 		return
 	}
 
+	var afun gc.Node
 	if n.Left.Ullman >= gc.UINF {
 		// if name involves a fn call
 		// precompute the address of the fn
@@ -347,10 +327,11 @@ func cgen_call(n *gc.Node, proc int) {
 	}
 
 	gc.Genlist(n.List) // assign the args
-	t = n.Left.Type
+	t := n.Left.Type
 
 	// call tempname pointer
 	if n.Left.Ullman >= gc.UINF {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, &afun)
 		nod.Type = t
@@ -361,6 +342,7 @@ func cgen_call(n *gc.Node, proc int) {
 
 	// call pointer
 	if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+		var nod gc.Node
 		regalloc(&nod, gc.Types[gc.Tptr], nil)
 		gc.Cgen_as(&nod, n.Left)
 		nod.Type = t
@@ -381,22 +363,18 @@ func cgen_call(n *gc.Node, proc int) {
  *	res = return value from call.
  */
 func cgen_callret(n *gc.Node, res *gc.Node) {
-	var nod gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_callret: nil")
 	}
 
-	nod = gc.Node{}
+	nod := gc.Node{}
 	nod.Op = gc.OINDREG
 	nod.Val.U.Reg = ppc64.REGSP
 	nod.Addable = 1
@@ -412,23 +390,18 @@ func cgen_callret(n *gc.Node, res *gc.Node) {
  *	res = &return value from call.
  */
 func cgen_aret(n *gc.Node, res *gc.Node) {
-	var nod1 gc.Node
-	var nod2 gc.Node
-	var fp *gc.Type
-	var t *gc.Type
-	var flist gc.Iter
-
-	t = n.Left.Type
+	t := n.Left.Type
 	if gc.Isptr[t.Etype] != 0 {
 		t = t.Type
 	}
 
-	fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+	var flist gc.Iter
+	fp := gc.Structfirst(&flist, gc.Getoutarg(t))
 	if fp == nil {
 		gc.Fatal("cgen_aret: nil")
 	}
 
-	nod1 = gc.Node{}
+	nod1 := gc.Node{}
 	nod1.Op = gc.OINDREG
 	nod1.Val.U.Reg = ppc64.REGSP
 	nod1.Addable = 1
@@ -437,6 +410,7 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
 	nod1.Type = fp.Type
 
 	if res.Op != gc.OREGISTER {
+		var nod2 gc.Node
 		regalloc(&nod2, gc.Types[gc.Tptr], res)
 		agen(&nod1, &nod2)
 		gins(ppc64.AMOVD, &nod2, res)
@@ -451,8 +425,6 @@ func cgen_aret(n *gc.Node, res *gc.Node) {
  * n->left is assignments to return values.
  */
 func cgen_ret(n *gc.Node) {
-	var p *obj.Prog
-
 	if n != nil {
 		gc.Genlist(n.List) // copy out args
 	}
@@ -460,7 +432,7 @@ func cgen_ret(n *gc.Node) {
 		ginscall(gc.Deferreturn, 0)
 	}
 	gc.Genlist(gc.Curfn.Exit)
-	p = gins(obj.ARET, nil, nil)
+	p := gins(obj.ARET, nil, nil)
 	if n != nil && n.Op == gc.ORETJMP {
 		p.To.Name = obj.NAME_EXTERN
 		p.To.Type = obj.TYPE_ADDR
@@ -476,20 +448,6 @@ func cgen_ret(n *gc.Node) {
  * according to op.
  */
 func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var a int
-	var check int
-	var t *gc.Type
-	var t0 *gc.Type
-	var tl gc.Node
-	var tr gc.Node
-	var tl2 gc.Node
-	var tr2 gc.Node
-	var nm1 gc.Node
-	var nz gc.Node
-	var tm gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will generate undefined result.
@@ -497,10 +455,10 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// the hardware will silently generate undefined result.
 	// DIVW will leave unpredicable result in higher 32-bit,
 	// so always use DIVD/DIVDU.
-	t = nl.Type
+	t := nl.Type
 
-	t0 = t
-	check = 0
+	t0 := t
+	check := 0
 	if gc.Issigned[t.Etype] != 0 {
 		check = 1
 		if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
@@ -519,9 +477,11 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		check = 0
 	}
 
-	a = optoas(gc.ODIV, t)
+	a := optoas(gc.ODIV, t)
 
+	var tl gc.Node
 	regalloc(&tl, t0, nil)
+	var tr gc.Node
 	regalloc(&tr, t0, nil)
 	if nl.Ullman >= nr.Ullman {
 		cgen(nl, &tl)
@@ -533,9 +493,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 
 	if t != t0 {
 		// Convert
-		tl2 = tl
+		tl2 := tl
 
-		tr2 = tr
+		tr2 := tr
 		tl.Type = t
 		tr.Type = t
 		gmove(&tl2, &tl)
@@ -543,7 +503,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	}
 
 	// Handle divide-by-zero panic.
-	p1 = gins(optoas(gc.OCMP, t), &tr, nil)
+	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
 
 	p1.To.Type = obj.TYPE_REG
 	p1.To.Reg = ppc64.REGZERO
@@ -554,10 +514,12 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	ginscall(panicdiv, -1)
 	gc.Patch(p1, gc.Pc)
 
+	var p2 *obj.Prog
 	if check != 0 {
+		var nm1 gc.Node
 		gc.Nodconst(&nm1, t, -1)
 		gins(optoas(gc.OCMP, t), &tr, &nm1)
-		p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
 		if op == gc.ODIV {
 			// a / (-1) is -a.
 			gins(optoas(gc.OMINUS, t), nil, &tl)
@@ -565,6 +527,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 			gmove(&tl, res)
 		} else {
 			// a % (-1) is 0.
+			var nz gc.Node
 			gc.Nodconst(&nz, t, 0)
 
 			gmove(&nz, res)
@@ -580,6 +543,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		gmove(&tl, res)
 	} else {
 		// A%B = A-(A/B*B)
+		var tm gc.Node
 		regalloc(&tm, t, nil)
 
 		// patch div to use the 3 register form
@@ -606,135 +570,15 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
  *	res = nl % nr
  */
 func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	var w int
-	var a int
-	var m gc.Magic
-
 	// TODO(minux): enable division by magic multiply (also need to fix longmod below)
 	//if(nr->op != OLITERAL)
 	goto longdiv
 
-	w = int(nl.Type.Width * 8)
-
-	// Front end handled 32-bit division. We only need to handle 64-bit.
-	// try to do division by multiply by (2^w)/d
-	// see hacker's delight chapter 10
-	switch gc.Simtype[nl.Type.Etype] {
-	default:
-		goto longdiv
-
-	case gc.TUINT64:
-		m.W = w
-		m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
-		gc.Umagic(&m)
-		if m.Bad != 0 {
-			break
-		}
-		if op == gc.OMOD {
-			goto longmod
-		}
-
-		cgenr(nl, &n1, nil)
-		gc.Nodconst(&n2, nl.Type, int64(m.Um))
-		regalloc(&n3, nl.Type, res)
-		cgen_hmul(&n1, &n2, &n3)
-
-		if m.Ua != 0 {
-			// need to add numerator accounting for overflow
-			gins(optoas(gc.OADD, nl.Type), &n1, &n3)
-
-			gc.Nodconst(&n2, nl.Type, 1)
-			gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
-			gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
-			gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
-		} else {
-			gc.Nodconst(&n2, nl.Type, int64(m.S))
-			gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
-		}
-
-		gmove(&n3, res)
-		regfree(&n1)
-		regfree(&n3)
-		return
-
-	case gc.TINT64:
-		m.W = w
-		m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
-		gc.Smagic(&m)
-		if m.Bad != 0 {
-			break
-		}
-		if op == gc.OMOD {
-			goto longmod
-		}
-
-		cgenr(nl, &n1, res)
-		gc.Nodconst(&n2, nl.Type, m.Sm)
-		regalloc(&n3, nl.Type, nil)
-		cgen_hmul(&n1, &n2, &n3)
-
-		if m.Sm < 0 {
-			// need to add numerator
-			gins(optoas(gc.OADD, nl.Type), &n1, &n3)
-		}
-
-		gc.Nodconst(&n2, nl.Type, int64(m.S))
-		gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
-
-		gc.Nodconst(&n2, nl.Type, int64(w)-1)
-
-		gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
-		gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
-
-		if m.Sd < 0 {
-			// this could probably be removed
-			// by factoring it into the multiplier
-			gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
-		}
-
-		gmove(&n3, res)
-		regfree(&n1)
-		regfree(&n3)
-		return
-	}
-
-	goto longdiv
-
 	// division and mod using (slow) hardware instruction
 longdiv:
 	dodiv(op, nl, nr, res)
 
 	return
-
-	// mod using formula A%B = A-(A/B*B) but
-	// we know that there is a fast algorithm for A/B
-longmod:
-	regalloc(&n1, nl.Type, res)
-
-	cgen(nl, &n1)
-	regalloc(&n2, nl.Type, nil)
-	cgen_div(gc.ODIV, &n1, nr, &n2)
-	a = optoas(gc.OMUL, nl.Type)
-	if w == 8 {
-	}
-	// use 2-operand 16-bit multiply
-	// because there is no 2-operand 8-bit multiply
-	//a = AIMULW;
-	if !gc.Smallintconst(nr) {
-		regalloc(&n3, nl.Type, nil)
-		cgen(nr, &n3)
-		gins(a, &n3, &n2)
-		regfree(&n3)
-	} else {
-		gins(a, nr, &n2)
-	}
-	gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
-	gmove(&n1, res)
-	regfree(&n1)
-	regfree(&n2)
 }
 
 /*
@@ -742,30 +586,25 @@ longmod:
  *   res = (nl*nr) >> width
  */
 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var w int
-	var n1 gc.Node
-	var n2 gc.Node
-	var tmp *gc.Node
-	var t *gc.Type
-	var p *obj.Prog
-
 	// largest ullman on left.
 	if nl.Ullman < nr.Ullman {
-		tmp = nl
+		tmp := (*gc.Node)(nl)
 		nl = nr
 		nr = tmp
 	}
 
-	t = nl.Type
-	w = int(t.Width * 8)
+	t := (*gc.Type)(nl.Type)
+	w := int(int(t.Width * 8))
+	var n1 gc.Node
 	cgenr(nl, &n1, res)
+	var n2 gc.Node
 	cgenr(nr, &n2, nil)
 	switch gc.Simtype[t.Etype] {
 	case gc.TINT8,
 		gc.TINT16,
 		gc.TINT32:
 		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p = gins(ppc64.ASRAD, nil, &n1)
+		p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(w)
 
@@ -773,16 +612,16 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
 		gc.TUINT16,
 		gc.TUINT32:
 		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p = gins(ppc64.ASRD, nil, &n1)
+		p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(w)
 
 	case gc.TINT64,
 		gc.TUINT64:
 		if gc.Issigned[t.Etype] != 0 {
-			p = gins(ppc64.AMULHD, &n2, &n1)
+			gins(ppc64.AMULHD, &n2, &n1)
 		} else {
-			p = gins(ppc64.AMULHDU, &n2, &n1)
+			gins(ppc64.AMULHDU, &n2, &n1)
 		}
 
 	default:
@@ -803,21 +642,18 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	var n1 gc.Node
 	var n2 gc.Node
 	var n3 gc.Node
-	var n4 gc.Node
-	var n5 gc.Node
-	var a int
-	var p1 *obj.Prog
-	var sc uint64
 	var tcount *gc.Type
 
-	a = optoas(op, nl.Type)
+	a := int(optoas(op, nl.Type))
 
 	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
 		regalloc(&n1, nl.Type, res)
 		cgen(nl, &n1)
-		sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+		sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
 		if sc >= uint64(nl.Type.Width*8) {
 			// large shift gets 2 shifts by width-1
+			var n3 gc.Node
 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
 
 			gins(a, &n3, &n1)
@@ -831,12 +667,14 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	}
 
 	if nl.Ullman >= gc.UINF {
+		var n4 gc.Node
 		gc.Tempname(&n4, nl.Type)
 		cgen(nl, &n4)
 		nl = &n4
 	}
 
 	if nr.Ullman >= gc.UINF {
+		var n5 gc.Node
 		gc.Tempname(&n5, nr.Type)
 		cgen(nr, &n5)
 		nr = &n5
@@ -872,7 +710,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	if !bounded {
 		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
 		gins(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+		p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
 		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
 			gins(a, &n3, &n2)
@@ -895,46 +733,39 @@ ret:
 }
 
 func clearfat(nl *gc.Node) {
-	var w uint64
-	var c uint64
-	var q uint64
-	var t uint64
-	var boff uint64
-	var dst gc.Node
-	var end gc.Node
-	var r0 gc.Node
-	var f *gc.Node
-	var p *obj.Prog
-	var pl *obj.Prog
-
 	/* clear a fat object */
 	if gc.Debug['g'] != 0 {
 		fmt.Printf("clearfat %v (%v, size: %d)\n", gc.Nconv(nl, 0), gc.Tconv(nl.Type, 0), nl.Type.Width)
 	}
 
-	w = uint64(nl.Type.Width)
+	w := uint64(uint64(nl.Type.Width))
 
 	// Avoid taking the address for simple enough types.
 	//if(componentgen(N, nl))
 	//	return;
 
-	c = w % 8 // bytes
-	q = w / 8 // dwords
+	c := uint64(w % 8) // bytes
+	q := uint64(w / 8) // dwords
 
 	if reg[ppc64.REGRT1] > 0 {
 		gc.Fatal("R%d in use during clearfat", ppc64.REGRT1)
 	}
 
+	var r0 gc.Node
 	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REG_R0) // r0 is always zero
+	var dst gc.Node
 	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
 	reg[ppc64.REGRT1]++
 	agen(nl, &dst)
 
+	var boff uint64
+	var p *obj.Prog
 	if q > 128 {
 		p = gins(ppc64.ASUB, nil, &dst)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 8
 
+		var end gc.Node
 		regalloc(&end, gc.Types[gc.Tptr], nil)
 		p = gins(ppc64.AMOVD, &dst, &end)
 		p.From.Type = obj.TYPE_ADDR
@@ -943,7 +774,7 @@ func clearfat(nl *gc.Node) {
 		p = gins(ppc64.AMOVDU, &r0, &dst)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Offset = 8
-		pl = p
+		pl := (*obj.Prog)(p)
 
 		p = gins(ppc64.ACMP, &dst, &end)
 		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
@@ -956,7 +787,7 @@ func clearfat(nl *gc.Node) {
 		p = gins(ppc64.ASUB, nil, &dst)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 8
-		f = gc.Sysfunc("duffzero")
+		f := (*gc.Node)(gc.Sysfunc("duffzero"))
 		p = gins(obj.ADUFFZERO, nil, f)
 		gc.Afunclit(&p.To, f)
 
@@ -966,7 +797,7 @@ func clearfat(nl *gc.Node) {
 		// duffzero leaves R3 on the last zeroed dword
 		boff = 8
 	} else {
-		for t = 0; t < q; t++ {
+		for t := uint64(0); t < q; t++ {
 			p = gins(ppc64.AMOVD, &r0, &dst)
 			p.To.Type = obj.TYPE_MEM
 			p.To.Offset = int64(8 * t)
@@ -975,7 +806,7 @@ func clearfat(nl *gc.Node) {
 		boff = 8 * q
 	}
 
-	for t = 0; t < c; t++ {
+	for t := uint64(0); t < c; t++ {
 		p = gins(ppc64.AMOVB, &r0, &dst)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Offset = int64(t + boff)
@@ -987,11 +818,10 @@ func clearfat(nl *gc.Node) {
 // Called after regopt and peep have run.
 // Expand CHECKNIL pseudo-op into actual nil pointer check.
 func expandchecks(firstp *obj.Prog) {
-	var p *obj.Prog
 	var p1 *obj.Prog
 	var p2 *obj.Prog
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
 		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
 			fmt.Printf("expandchecks: %v\n", p)
 		}
diff --git a/src/cmd/9g/gsubr.go b/src/cmd/9g/gsubr.go
index 91e87ff015c664ea93f0e30dc254be542b17c2af..9a00434735c67a303ebaa05e753711f8f33fa0a3 100644
--- a/src/cmd/9g/gsubr.go
+++ b/src/cmd/9g/gsubr.go
@@ -63,16 +63,14 @@ var resvd = []int{
 }
 
 func ginit() {
-	var i int
-
-	for i = 0; i < len(reg); i++ {
+	for i := int(0); i < len(reg); i++ {
 		reg[i] = 1
 	}
-	for i = 0; i < ppc64.NREG+ppc64.NFREG; i++ {
+	for i := int(0); i < ppc64.NREG+ppc64.NFREG; i++ {
 		reg[i] = 0
 	}
 
-	for i = 0; i < len(resvd); i++ {
+	for i := int(0); i < len(resvd); i++ {
 		reg[resvd[i]-ppc64.REG_R0]++
 	}
 }
@@ -80,13 +78,11 @@ func ginit() {
 var regpc [len(reg)]uint32
 
 func gclean() {
-	var i int
-
-	for i = 0; i < len(resvd); i++ {
+	for i := int(0); i < len(resvd); i++ {
 		reg[resvd[i]-ppc64.REG_R0]--
 	}
 
-	for i = 0; i < len(reg); i++ {
+	for i := int(0); i < len(reg); i++ {
 		if reg[i] != 0 {
 			gc.Yyerror("reg %v left allocated, %p\n", gc.Ctxt.Rconv(i+ppc64.REG_R0), regpc[i])
 		}
@@ -94,10 +90,9 @@ func gclean() {
 }
 
 func anyregalloc() bool {
-	var i int
 	var j int
 
-	for i = 0; i < len(reg); i++ {
+	for i := int(0); i < len(reg); i++ {
 		if reg[i] == 0 {
 			goto ok
 		}
@@ -119,20 +114,15 @@ func anyregalloc() bool {
  * caller must regfree(n).
  */
 func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
-	var i int
-	var et int
-	var fixfree int
-	var fltfree int
-
 	if t == nil {
 		gc.Fatal("regalloc: t nil")
 	}
-	et = int(gc.Simtype[t.Etype])
+	et := int(int(gc.Simtype[t.Etype]))
 
 	if gc.Debug['r'] != 0 {
-		fixfree = 0
-		fltfree = 0
-		for i = ppc64.REG_R0; i < ppc64.REG_F31; i++ {
+		fixfree := int(0)
+		fltfree := int(0)
+		for i := int(ppc64.REG_R0); i < ppc64.REG_F31; i++ {
 			if reg[i-ppc64.REG_R0] == 0 {
 				if i < ppc64.REG_F0 {
 					fixfree++
@@ -145,6 +135,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 		fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
 	}
 
+	var i int
 	switch et {
 	case gc.TINT8,
 		gc.TUINT8,
@@ -172,7 +163,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 		}
 
 		gc.Flusherrors()
-		for i = ppc64.REG_R0; i < ppc64.REG_R0+ppc64.NREG; i++ {
+		for i := int(ppc64.REG_R0); i < ppc64.REG_R0+ppc64.NREG; i++ {
 			fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
 		}
 		gc.Fatal("out of fixed registers")
@@ -194,7 +185,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
 		}
 
 		gc.Flusherrors()
-		for i = ppc64.REG_F0; i < ppc64.REG_F0+ppc64.NREG; i++ {
+		for i := int(ppc64.REG_F0); i < ppc64.REG_F0+ppc64.NREG; i++ {
 			fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
 		}
 		gc.Fatal("out of floating registers")
@@ -214,15 +205,13 @@ out:
 }
 
 func regfree(n *gc.Node) {
-	var i int
-
 	if n.Op == gc.ONAME {
 		return
 	}
 	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
 		gc.Fatal("regfree: not a register")
 	}
-	i = int(n.Val.U.Reg) - ppc64.REG_R0
+	i := int(int(n.Val.U.Reg) - ppc64.REG_R0)
 	if i == ppc64.REGSP-ppc64.REG_R0 {
 		return
 	}
@@ -244,13 +233,13 @@ func regfree(n *gc.Node) {
  */
 func ginscon(as int, c int64, n2 *gc.Node) {
 	var n1 gc.Node
-	var ntmp gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
 
 	if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
 		// cannot have more than 16-bit of immediate in ADD, etc.
 		// instead, MOV into register first.
+		var ntmp gc.Node
 		regalloc(&ntmp, gc.Types[gc.TINT64], nil)
 
 		gins(ppc64.AMOVD, &n1, &ntmp)
@@ -268,7 +257,6 @@ func ginscon(as int, c int64, n2 *gc.Node) {
  */
 func ginscon2(as int, n2 *gc.Node, c int64) {
 	var n1 gc.Node
-	var ntmp gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
 
@@ -290,6 +278,7 @@ func ginscon2(as int, n2 *gc.Node, c int64) {
 	}
 
 	// MOV n1 into register first
+	var ntmp gc.Node
 	regalloc(&ntmp, gc.Types[gc.TINT64], nil)
 
 	gins(ppc64.AMOVD, &n1, &ntmp)
@@ -328,24 +317,13 @@ func bignodes() {
  * hard part is conversions.
  */
 func gmove(f *gc.Node, t *gc.Node) {
-	var a int
-	var ft int
-	var tt int
-	var cvt *gc.Type
-	var r1 gc.Node
-	var r2 gc.Node
-	var r3 gc.Node
-	var con gc.Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
 	if gc.Debug['M'] != 0 {
 		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
 	}
 
-	ft = gc.Simsimtype(f.Type)
-	tt = gc.Simsimtype(t.Type)
-	cvt = t.Type
+	ft := int(gc.Simsimtype(f.Type))
+	tt := int(gc.Simsimtype(t.Type))
+	cvt := (*gc.Type)(t.Type)
 
 	if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
 		gc.Complexmove(f, t)
@@ -353,12 +331,16 @@ func gmove(f *gc.Node, t *gc.Node) {
 	}
 
 	// cannot have two memory operands
+	var r2 gc.Node
+	var r1 gc.Node
+	var a int
 	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
 	// convert constant to desired type
 	if f.Op == gc.OLITERAL {
+		var con gc.Node
 		switch tt {
 		default:
 			gc.Convconst(&con, t.Type, &f.Val)
@@ -366,7 +348,9 @@ func gmove(f *gc.Node, t *gc.Node) {
 		case gc.TINT32,
 			gc.TINT16,
 			gc.TINT8:
+			var con gc.Node
 			gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
+			var r1 gc.Node
 			regalloc(&r1, con.Type, t)
 			gins(ppc64.AMOVD, &con, &r1)
 			gmove(&r1, t)
@@ -376,7 +360,9 @@ func gmove(f *gc.Node, t *gc.Node) {
 		case gc.TUINT32,
 			gc.TUINT16,
 			gc.TUINT8:
+			var con gc.Node
 			gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
+			var r1 gc.Node
 			regalloc(&r1, con.Type, t)
 			gins(ppc64.AMOVD, &con, &r1)
 			gmove(&r1, t)
@@ -559,22 +545,24 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TFLOAT64<<16 | gc.TUINT64:
 		bignodes()
 
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[ft], f)
 		gmove(f, &r1)
 		if tt == gc.TUINT64 {
 			regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
 			gmove(&bigf, &r2)
 			gins(ppc64.AFCMPU, &r1, &r2)
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
 			gins(ppc64.AFSUB, &r2, &r1)
 			gc.Patch(p1, gc.Pc)
 			regfree(&r2)
 		}
 
 		regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+		var r3 gc.Node
 		regalloc(&r3, gc.Types[gc.TINT64], t)
 		gins(ppc64.AFCTIDZ, &r1, &r2)
-		p1 = gins(ppc64.AFMOVD, &r2, nil)
+		p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
 		p1.To.Type = obj.TYPE_MEM
 		p1.To.Reg = ppc64.REGSP
 		p1.To.Offset = -8
@@ -585,7 +573,7 @@ func gmove(f *gc.Node, t *gc.Node) {
 		regfree(&r2)
 		regfree(&r1)
 		if tt == gc.TUINT64 {
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
 			gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
 			gins(ppc64.AMOVD, &bigi, &r1)
 			gins(ppc64.AADD, &r1, &r3)
@@ -622,21 +610,22 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gc.TUINT64<<16 | gc.TFLOAT64:
 		bignodes()
 
+		var r1 gc.Node
 		regalloc(&r1, gc.Types[gc.TINT64], nil)
 		gmove(f, &r1)
 		if ft == gc.TUINT64 {
 			gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
 			gmove(&bigi, &r2)
 			gins(ppc64.ACMPU, &r1, &r2)
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
-			p2 = gins(ppc64.ASRD, nil, &r1)
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
+			p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
 			p2.From.Type = obj.TYPE_CONST
 			p2.From.Offset = 1
 			gc.Patch(p1, gc.Pc)
 		}
 
 		regalloc(&r2, gc.Types[gc.TFLOAT64], t)
-		p1 = gins(ppc64.AMOVD, &r1, nil)
+		p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
 		p1.To.Type = obj.TYPE_MEM
 		p1.To.Reg = ppc64.REGSP
 		p1.To.Offset = -8
@@ -647,7 +636,7 @@ func gmove(f *gc.Node, t *gc.Node) {
 		gins(ppc64.AFCFID, &r2, &r2)
 		regfree(&r1)
 		if ft == gc.TUINT64 {
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
 			gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
 			gins(ppc64.AFMUL, &r1, &r2)
 			gc.Patch(p1, gc.Pc)
@@ -702,24 +691,19 @@ hard:
  *	as f, t
  */
 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
-	var w int32
-	var p *obj.Prog
-	var af obj.Addr
-	var at obj.Addr
-
 	// TODO(austin): Add self-move test like in 6g (but be careful
 	// of truncation moves)
 
-	af = obj.Addr{}
+	af := obj.Addr(obj.Addr{})
 
-	at = obj.Addr{}
+	at := obj.Addr(obj.Addr{})
 	if f != nil {
 		gc.Naddr(f, &af, 1)
 	}
 	if t != nil {
 		gc.Naddr(t, &at, 1)
 	}
-	p = gc.Prog(as)
+	p := (*obj.Prog)(gc.Prog(as))
 	if f != nil {
 		p.From = af
 	}
@@ -730,7 +714,7 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 		fmt.Printf("%v\n", p)
 	}
 
-	w = 0
+	w := int32(0)
 	switch as {
 	case ppc64.AMOVB,
 		ppc64.AMOVBU,
@@ -768,8 +752,6 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 }
 
 func fixlargeoffset(n *gc.Node) {
-	var a gc.Node
-
 	if n == nil {
 		return
 	}
@@ -784,7 +766,7 @@ func fixlargeoffset(n *gc.Node) {
 		// this is used only in test/fixedbugs/issue6036.go.
 		gc.Fatal("offset too large: %v", gc.Nconv(n, 0))
 
-		a = *n
+		a := gc.Node(*n)
 		a.Op = gc.OREGISTER
 		a.Type = gc.Types[gc.Tptr]
 		a.Xoffset = 0
@@ -798,13 +780,11 @@ func fixlargeoffset(n *gc.Node) {
  * return Axxx for Oxxx on type t.
  */
 func optoas(op int, t *gc.Type) int {
-	var a int
-
 	if t == nil {
 		gc.Fatal("optoas: t is nil")
 	}
 
-	a = obj.AXXX
+	a := int(obj.AXXX)
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
diff --git a/src/cmd/9g/peep.go b/src/cmd/9g/peep.go
index 486b316dcfcd818e0fac8e55693c101739a97998..f7c0a95819926adbba3a652377467f0d5407f786 100644
--- a/src/cmd/9g/peep.go
+++ b/src/cmd/9g/peep.go
@@ -40,19 +40,15 @@ import "cmd/internal/gc"
 var gactive uint32
 
 func peep(firstp *obj.Prog) {
-	var g *gc.Graph
-	var r *gc.Flow
-	var r1 *gc.Flow
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var t int
-
-	g = gc.Flowstart(firstp, nil)
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
 	if g == nil {
 		return
 	}
 	gactive = 0
 
+	var p *obj.Prog
+	var r *gc.Flow
+	var t int
 loop1:
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		gc.Dumpit("loop1", g.Start, 0)
@@ -109,7 +105,9 @@ loop1:
 	/*
 	 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
 	 */
-	for r = g.Start; r != nil; r = r.Link {
+	var p1 *obj.Prog
+	var r1 *gc.Flow
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		default:
@@ -151,7 +149,7 @@ loop1:
 	 * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
 	 * when OP can set condition codes correctly
 	 */
-	for r = g.Start; r != nil; r = r.Link {
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
 		p = r.Prog
 		switch p.As {
 		case ppc64.ACMP,
@@ -350,9 +348,7 @@ ret:
 }
 
 func excise(r *gc.Flow) {
-	var p *obj.Prog
-
-	p = r.Prog
+	p := (*obj.Prog)(r.Prog)
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		fmt.Printf("%v ===delete===\n", p)
 	}
@@ -402,22 +398,17 @@ func regtyp(a *obj.Addr) bool {
  * above sequences.  This returns 1 if it modified any instructions.
  */
 func subprop(r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-	var r *gc.Flow
-	var t int
-	var info gc.ProgInfo
-
-	p = r0.Prog
-	v1 = &p.From
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
 	if !regtyp(v1) {
 		return false
 	}
-	v2 = &p.To
+	v2 := (*obj.Addr)(&p.To)
 	if !regtyp(v2) {
 		return false
 	}
+	var r *gc.Flow
+	var info gc.ProgInfo
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Uniqs(r) == nil {
 			break
@@ -469,7 +460,7 @@ gotit:
 		}
 	}
 
-	t = int(v1.Reg)
+	t := int(int(v1.Reg))
 	v1.Reg = v2.Reg
 	v2.Reg = int16(t)
 	if gc.Debug['P'] != 0 {
@@ -491,13 +482,9 @@ gotit:
  *	set v2	return success (caller can remove v1->v2 move)
  */
 func copyprop(r0 *gc.Flow) bool {
-	var p *obj.Prog
-	var v1 *obj.Addr
-	var v2 *obj.Addr
-
-	p = r0.Prog
-	v1 = &p.From
-	v2 = &p.To
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
 	if copyas(v1, v2) {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("eliminating self-move\n", r0.Prog)
@@ -515,9 +502,6 @@ func copyprop(r0 *gc.Flow) bool {
 // copy1 replaces uses of v2 with v1 starting at r and returns 1 if
 // all uses were rewritten.
 func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
-	var t int
-	var p *obj.Prog
-
 	if uint32(r.Active) == gactive {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
@@ -529,6 +513,8 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
 	}
+	var t int
+	var p *obj.Prog
 	for ; r != nil; r = r.S1 {
 		p = r.Prog
 		if gc.Debug['P'] != 0 {
diff --git a/src/cmd/9g/prog.go b/src/cmd/9g/prog.go
index e188f0dc6535a3b5e179e9d8336255afe601ba84..b97125604a86dcad182ae8cedd9d2d8fddabafd0 100644
--- a/src/cmd/9g/prog.go
+++ b/src/cmd/9g/prog.go
@@ -108,10 +108,6 @@ var initproginfo_initialized int
 
 func initproginfo() {
 	var addvariant = []int{V_CC, V_V, V_CC | V_V}
-	var as int
-	var as2 int
-	var i int
-	var variant int
 
 	if initproginfo_initialized != 0 {
 		return
@@ -120,7 +116,10 @@ func initproginfo() {
 
 	// Perform one-time expansion of instructions in progtable to
 	// their CC, V, and VCC variants
-	for as = 0; as < len(progtable); as++ {
+	var as2 int
+	var i int
+	var variant int
+	for as := int(0); as < len(progtable); as++ {
 		if progtable[as].Flags == 0 {
 			continue
 		}
@@ -272,15 +271,13 @@ var varianttable = [ppc64.ALAST][4]int{
 var initvariants_initialized int
 
 func initvariants() {
-	var i int
-	var j int
-
 	if initvariants_initialized != 0 {
 		return
 	}
 	initvariants_initialized = 1
 
-	for i = 0; i < len(varianttable); i++ {
+	var j int
+	for i := int(0); i < len(varianttable); i++ {
 		if varianttable[i][0] == 0 {
 			// Instruction has no variants
 			varianttable[i][0] = i
@@ -299,9 +296,8 @@ func initvariants() {
 
 // as2variant returns the variant (V_*) flags of instruction as.
 func as2variant(as int) int {
-	var i int
 	initvariants()
-	for i = 0; i < len(varianttable[as]); i++ {
+	for i := int(0); i < len(varianttable[as]); i++ {
 		if varianttable[as][i] == as {
 			return i
 		}
diff --git a/src/cmd/9g/reg.go b/src/cmd/9g/reg.go
index faed60d0ee061fe9a3ab572e8fd9025e11f0b56b..b1b681a6fbd5a4c12fc3622a517cb814ed870d3b 100644
--- a/src/cmd/9g/reg.go
+++ b/src/cmd/9g/reg.go
@@ -110,10 +110,8 @@ func regnames(n *int) []string {
 }
 
 func excludedregs() uint64 {
-	var regbits uint64
-
 	// Exclude registers with fixed functions
-	regbits = 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS)
+	regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
 
 	// Also exclude floating point registers with fixed constants
 	regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
diff --git a/src/cmd/internal/gc/align.go b/src/cmd/internal/gc/align.go
index 994b7a250862ed26089f43fdb80ce7e8a023ba3a..a588ca3d7ea1db6b1e3f70ccab98c5b35849b103 100644
--- a/src/cmd/internal/gc/align.go
+++ b/src/cmd/internal/gc/align.go
@@ -22,11 +22,8 @@ func Rnd(o int64, r int64) int64 {
 }
 
 func offmod(t *Type) {
-	var f *Type
-	var o int32
-
-	o = 0
-	for f = t.Type; f != nil; f = f.Down {
+	o := int32(0)
+	for f := t.Type; f != nil; f = f.Down {
 		if f.Etype != TFIELD {
 			Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
 		}
@@ -40,19 +37,14 @@ func offmod(t *Type) {
 }
 
 func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
-	var f *Type
-	var w int64
-	var maxalign int32
-	var starto int64
-	var lastzero int64
-
-	starto = o
-	maxalign = int32(flag)
+	starto := o
+	maxalign := int32(flag)
 	if maxalign < 1 {
 		maxalign = 1
 	}
-	lastzero = 0
-	for f = t.Type; f != nil; f = f.Down {
+	lastzero := int64(0)
+	var w int64
+	for f := t.Type; f != nil; f = f.Down {
 		if f.Etype != TFIELD {
 			Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
 		}
@@ -118,11 +110,6 @@ func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
 }
 
 func dowidth(t *Type) {
-	var et int32
-	var w int64
-	var lno int
-	var t1 *Type
-
 	if Widthptr == 0 {
 		Fatal("dowidth without betypeinit")
 	}
@@ -136,7 +123,7 @@ func dowidth(t *Type) {
 	}
 
 	if t.Width == -2 {
-		lno = int(lineno)
+		lno := int(lineno)
 		lineno = int32(t.Lineno)
 		if t.Broke == 0 {
 			t.Broke = 1
@@ -157,12 +144,12 @@ func dowidth(t *Type) {
 	// defer checkwidth calls until after we're done
 	defercalc++
 
-	lno = int(lineno)
+	lno := int(lineno)
 	lineno = int32(t.Lineno)
 	t.Width = -2
 	t.Align = 0
 
-	et = int32(t.Etype)
+	et := int32(t.Etype)
 	switch et {
 	case TFUNC,
 		TCHAN,
@@ -177,7 +164,7 @@ func dowidth(t *Type) {
 		}
 	}
 
-	w = 0
+	w := int64(0)
 	switch et {
 	default:
 		Fatal("dowidth: unknown type: %v", Tconv(t, 0))
@@ -233,13 +220,13 @@ func dowidth(t *Type) {
 
 		// make fake type to check later to
 		// trigger channel argument check.
-		t1 = typ(TCHANARGS)
+		t1 := typ(TCHANARGS)
 
 		t1.Type = t
 		checkwidth(t1)
 
 	case TCHANARGS:
-		t1 = t.Type
+		t1 := t.Type
 		dowidth(t.Type) // just in case
 		if t1.Type.Width >= 1<<16 {
 			Yyerror("channel element type too large (>64kB)")
@@ -277,11 +264,9 @@ func dowidth(t *Type) {
 			break
 		}
 		if t.Bound >= 0 {
-			var cap uint64
-
 			dowidth(t.Type)
 			if t.Type.Width != 0 {
-				cap = (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
+				cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
 				if uint64(t.Bound) > cap {
 					Yyerror("type %v larger than address space", Tconv(t, obj.FmtLong))
 				}
@@ -311,7 +296,7 @@ func dowidth(t *Type) {
 		// make fake type to check later to
 	// trigger function argument computation.
 	case TFUNC:
-		t1 = typ(TFUNCARGS)
+		t1 := typ(TFUNCARGS)
 
 		t1.Type = t
 		checkwidth(t1)
@@ -322,7 +307,7 @@ func dowidth(t *Type) {
 		// function is 3 cated structures;
 	// compute their widths as side-effect.
 	case TFUNCARGS:
-		t1 = t.Type
+		t1 := t.Type
 
 		w = widstruct(t.Type, *getthis(t1), 0, 0)
 		w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
@@ -382,8 +367,6 @@ var tlfree *TypeList
 var tlq *TypeList
 
 func checkwidth(t *Type) {
-	var l *TypeList
-
 	if t == nil {
 		return
 	}
@@ -404,7 +387,7 @@ func checkwidth(t *Type) {
 	}
 	t.Deferwidth = 1
 
-	l = tlfree
+	l := tlfree
 	if l != nil {
 		tlfree = l.next
 	} else {
@@ -425,12 +408,10 @@ func defercheckwidth() {
 }
 
 func resumecheckwidth() {
-	var l *TypeList
-
 	if defercalc == 0 {
 		Fatal("resumecheckwidth")
 	}
-	for l = tlq; l != nil; l = tlq {
+	for l := tlq; l != nil; l = tlq {
 		l.t.Deferwidth = 0
 		tlq = l.next
 		dowidth(l.t)
@@ -442,18 +423,11 @@ func resumecheckwidth() {
 }
 
 func typeinit() {
-	var i int
-	var etype int
-	var sameas int
-	var t *Type
-	var s *Sym
-	var s1 *Sym
-
 	if Widthptr == 0 {
 		Fatal("typeinit before betypeinit")
 	}
 
-	for i = 0; i < NTYPE; i++ {
+	for i := 0; i < NTYPE; i++ {
 		Simtype[i] = uint8(i)
 	}
 
@@ -463,7 +437,7 @@ func typeinit() {
 	Types[TPTR64] = typ(TPTR64)
 	dowidth(Types[TPTR64])
 
-	t = typ(TUNSAFEPTR)
+	t := typ(TUNSAFEPTR)
 	Types[TUNSAFEPTR] = t
 	t.Sym = Pkglookup("Pointer", unsafepkg)
 	t.Sym.Def = typenod(t)
@@ -475,7 +449,7 @@ func typeinit() {
 		Tptr = TPTR64
 	}
 
-	for i = TINT8; i <= TUINT64; i++ {
+	for i := TINT8; i <= TUINT64; i++ {
 		Isint[i] = 1
 	}
 	Isint[TINT] = 1
@@ -502,7 +476,7 @@ func typeinit() {
 	/*
 	 * initialize okfor
 	 */
-	for i = 0; i < NTYPE; i++ {
+	for i := 0; i < NTYPE; i++ {
 		if Isint[i] != 0 || i == TIDEAL {
 			okforeq[i] = 1
 			okforcmp[i] = 1
@@ -566,6 +540,7 @@ func typeinit() {
 
 	okforcmp[TSTRING] = 1
 
+	var i int
 	for i = 0; i < len(okfor); i++ {
 		okfor[i] = okfornone[:]
 	}
@@ -655,6 +630,10 @@ func typeinit() {
 	Simtype[TUNSAFEPTR] = uint8(Tptr)
 
 	/* pick up the backend thearch.typedefs */
+	var s1 *Sym
+	var etype int
+	var sameas int
+	var s *Sym
 	for i = range Thearch.Typedefs {
 		s = Lookup(Thearch.Typedefs[i].Name)
 		s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
@@ -703,13 +682,11 @@ func typeinit() {
  */
 func Argsize(t *Type) int {
 	var save Iter
-	var fp *Type
-	var w int64
 	var x int64
 
-	w = 0
+	w := int64(0)
 
-	fp = Structfirst(&save, Getoutarg(t))
+	fp := Structfirst(&save, Getoutarg(t))
 	for fp != nil {
 		x = fp.Width + fp.Type.Width
 		if x > w {
diff --git a/src/cmd/internal/gc/bits.go b/src/cmd/internal/gc/bits.go
index 23da356efecbeeb336744fd8dfad215b1a152310..95421e59f05b203c8ea15b31f10699355816cffd 100644
--- a/src/cmd/internal/gc/bits.go
+++ b/src/cmd/internal/gc/bits.go
@@ -67,9 +67,7 @@ bnot(Bits a)
 }
 */
 func bany(a *Bits) bool {
-	var i int
-
-	for i = 0; i < BITS; i++ {
+	for i := 0; i < BITS; i++ {
 		if a.b[i] != 0 {
 			return true
 		}
@@ -90,10 +88,9 @@ beq(Bits a, Bits b)
 }
 */
 func bnum(a Bits) int {
-	var i int
 	var b uint64
 
-	for i = 0; i < BITS; i++ {
+	for i := 0; i < BITS; i++ {
 		b = a.b[i]
 		if b != 0 {
 			return 64*i + Bitno(b)
@@ -105,9 +102,7 @@ func bnum(a Bits) int {
 }
 
 func blsh(n uint) Bits {
-	var c Bits
-
-	c = zbits
+	c := zbits
 	c.b[n/64] = 1 << (n % 64)
 	return c
 }
@@ -125,9 +120,7 @@ func biclr(a *Bits, n uint) {
 }
 
 func Bitno(b uint64) int {
-	var i int
-
-	for i = 0; i < 64; i++ {
+	for i := 0; i < 64; i++ {
 		if b&(1<<uint(i)) != 0 {
 			return i
 		}
@@ -140,9 +133,8 @@ func Qconv(bits Bits, flag int) string {
 	var fp string
 
 	var i int
-	var first int
 
-	first = 1
+	first := 1
 
 	for bany(&bits) {
 		i = bnum(bits)
diff --git a/src/cmd/internal/gc/bv.go b/src/cmd/internal/gc/bv.go
index 002b5a4135a6f456310639fded6f7d532d9dfc22..e7fdd70b71487cefc48f233a221abcb38282ba84 100644
--- a/src/cmd/internal/gc/bv.go
+++ b/src/cmd/internal/gc/bv.go
@@ -63,16 +63,13 @@ func bvcopy(dst *Bvec, src *Bvec) {
 }
 
 func bvconcat(src1 *Bvec, src2 *Bvec) *Bvec {
-	var dst *Bvec
-	var i int32
-
-	dst = bvalloc(src1.n + src2.n)
-	for i = 0; i < src1.n; i++ {
+	dst := bvalloc(src1.n + src2.n)
+	for i := int32(0); i < src1.n; i++ {
 		if bvget(src1, i) != 0 {
 			bvset(dst, i)
 		}
 	}
-	for i = 0; i < src2.n; i++ {
+	for i := int32(0); i < src2.n; i++ {
 		if bvget(src2, i) != 0 {
 			bvset(dst, i+src1.n)
 		}
@@ -90,8 +87,6 @@ func bvget(bv *Bvec, i int32) int {
 // bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
 // If there is no such index, bvnext returns -1.
 func bvnext(bv *Bvec, i int32) int {
-	var w uint32
-
 	if i >= bv.n {
 		return -1
 	}
@@ -110,7 +105,7 @@ func bvnext(bv *Bvec, i int32) int {
 	}
 
 	// Find 1 bit.
-	w = bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
+	w := bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
 
 	for w&1 == 0 {
 		w >>= 1
@@ -121,9 +116,7 @@ func bvnext(bv *Bvec, i int32) int {
 }
 
 func bvisempty(bv *Bvec) bool {
-	var i int32
-
-	for i = 0; i < bv.n; i += WORDBITS {
+	for i := int32(0); i < bv.n; i += WORDBITS {
 		if bv.b[i>>WORDSHIFT] != 0 {
 			return false
 		}
@@ -173,21 +166,17 @@ func bvand(dst *Bvec, src1 *Bvec, src2 *Bvec) {
 }
 
 func bvprint(bv *Bvec) {
-	var i int32
-
 	fmt.Printf("#*")
-	for i = 0; i < bv.n; i++ {
+	for i := int32(0); i < bv.n; i++ {
 		fmt.Printf("%d", bvget(bv, i))
 	}
 }
 
 func bvreset(bv *Bvec, i int32) {
-	var mask uint32
-
 	if i < 0 || i >= bv.n {
 		Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
 	}
-	mask = ^(1 << uint(i%WORDBITS))
+	mask := uint32(^(1 << uint(i%WORDBITS)))
 	bv.b[i/WORDBITS] &= mask
 }
 
@@ -198,11 +187,9 @@ func bvresetall(bv *Bvec) {
 }
 
 func bvset(bv *Bvec, i int32) {
-	var mask uint32
-
 	if i < 0 || i >= bv.n {
 		Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
 	}
-	mask = 1 << uint(i%WORDBITS)
+	mask := uint32(1 << uint(i%WORDBITS))
 	bv.b[i/WORDBITS] |= mask
 }
diff --git a/src/cmd/internal/gc/closure.go b/src/cmd/internal/gc/closure.go
index c2c802eb2d9bbc63a81d256f61293ff5d931cf90..20a0349f0a36bedadc88670119a828ae72f4d516 100644
--- a/src/cmd/internal/gc/closure.go
+++ b/src/cmd/internal/gc/closure.go
@@ -13,12 +13,10 @@ import (
  * function literals aka closures
  */
 func closurehdr(ntype *Node) {
-	var n *Node
 	var name *Node
 	var a *Node
-	var l *NodeList
 
-	n = Nod(OCLOSURE, nil, nil)
+	n := Nod(OCLOSURE, nil, nil)
 	n.Ntype = ntype
 	n.Funcdepth = Funcdepth
 	n.Outerfunc = Curfn
@@ -35,7 +33,7 @@ func closurehdr(ntype *Node) {
 	n.Rlist = ntype.Rlist
 	ntype.List = nil
 	ntype.Rlist = nil
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		name = l.N.Left
 		if name != nil {
 			name = newname(name.Sym)
@@ -48,7 +46,7 @@ func closurehdr(ntype *Node) {
 		ntype.List = list(ntype.List, a)
 	}
 
-	for l = n.Rlist; l != nil; l = l.Next {
+	for l := n.Rlist; l != nil; l = l.Next {
 		name = l.N.Left
 		if name != nil {
 			name = newname(name.Sym)
@@ -58,15 +56,11 @@ func closurehdr(ntype *Node) {
 }
 
 func closurebody(body *NodeList) *Node {
-	var func_ *Node
-	var v *Node
-	var l *NodeList
-
 	if body == nil {
 		body = list1(Nod(OEMPTY, nil, nil))
 	}
 
-	func_ = Curfn
+	func_ := Curfn
 	func_.Nbody = body
 	func_.Endlineno = lineno
 	funcbody(func_)
@@ -75,7 +69,8 @@ func closurebody(body *NodeList) *Node {
 	// ordinary ones in the symbol table; see oldname.
 	// unhook them.
 	// make the list of pointers for the closure call.
-	for l = func_.Cvars; l != nil; l = l.Next {
+	var v *Node
+	for l := func_.Cvars; l != nil; l = l.Next {
 		v = l.N
 		v.Closure.Closure = v.Outer
 		v.Outerexpr = oldname(v.Sym)
@@ -85,12 +80,9 @@ func closurebody(body *NodeList) *Node {
 }
 
 func typecheckclosure(func_ *Node, top int) {
-	var oldfn *Node
 	var n *Node
-	var l *NodeList
-	var olddd int
 
-	for l = func_.Cvars; l != nil; l = l.Next {
+	for l := func_.Cvars; l != nil; l = l.Next {
 		n = l.N.Closure
 		if n.Captured == 0 {
 			n.Captured = 1
@@ -106,13 +98,13 @@ func typecheckclosure(func_ *Node, top int) {
 		}
 	}
 
-	for l = func_.Dcl; l != nil; l = l.Next {
+	for l := func_.Dcl; l != nil; l = l.Next {
 		if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
 			l.N.Decldepth = 1
 		}
 	}
 
-	oldfn = Curfn
+	oldfn := Curfn
 	typecheck(&func_.Ntype, Etype)
 	func_.Type = func_.Ntype.Type
 	func_.Top = top
@@ -123,7 +115,7 @@ func typecheckclosure(func_ *Node, top int) {
 	// underlying closure function we create is added to xtop.
 	if Curfn != nil && func_.Type != nil {
 		Curfn = func_
-		olddd = decldepth
+		olddd := decldepth
 		decldepth = 1
 		typechecklist(func_.Nbody, Etop)
 		decldepth = olddd
@@ -143,16 +135,12 @@ func typecheckclosure(func_ *Node, top int) {
 var closurename_closgen int
 
 func closurename(n *Node) *Sym {
-	var outer string
-	var prefix string
-	var gen int
-
 	if n.Sym != nil {
 		return n.Sym
 	}
-	gen = 0
-	outer = ""
-	prefix = ""
+	gen := 0
+	outer := ""
+	prefix := ""
 	if n.Outerfunc == nil {
 		// Global closure.
 		outer = "glob"
@@ -192,20 +180,17 @@ func closurename(n *Node) *Sym {
 }
 
 func makeclosure(func_ *Node) *Node {
-	var xtype *Node
-	var xfunc *Node
-
 	/*
 	 * wrap body in external function
 	 * that begins by reading closure parameters.
 	 */
-	xtype = Nod(OTFUNC, nil, nil)
+	xtype := Nod(OTFUNC, nil, nil)
 
 	xtype.List = func_.List
 	xtype.Rlist = func_.Rlist
 
 	// create the function
-	xfunc = Nod(ODCLFUNC, nil, nil)
+	xfunc := Nod(ODCLFUNC, nil, nil)
 
 	xfunc.Nname = newname(closurename(func_))
 	xfunc.Nname.Sym.Flags |= SymExported // disable export
@@ -239,18 +224,15 @@ func makeclosure(func_ *Node) *Node {
 // We use value capturing for values <= 128 bytes that are never reassigned
 // after capturing (effectively constant).
 func capturevars(xfunc *Node) {
-	var func_ *Node
 	var v *Node
 	var outer *Node
-	var l *NodeList
-	var lno int
 
-	lno = int(lineno)
+	lno := int(lineno)
 	lineno = xfunc.Lineno
 
-	func_ = xfunc.Closure
+	func_ := xfunc.Closure
 	func_.Enter = nil
-	for l = func_.Cvars; l != nil; l = l.Next {
+	for l := func_.Cvars; l != nil; l = l.Next {
 		v = l.N
 		if v.Type == nil {
 			// if v->type is nil, it means v looked like it was
@@ -280,13 +262,11 @@ func capturevars(xfunc *Node) {
 		}
 
 		if Debug['m'] > 1 {
-			var name *Sym
-			var how string
-			name = nil
+			name := (*Sym)(nil)
 			if v.Curfn != nil && v.Curfn.Nname != nil {
 				name = v.Curfn.Nname.Sym
 			}
-			how = "ref"
+			how := "ref"
 			if v.Byval != 0 {
 				how = "value"
 			}
@@ -303,22 +283,9 @@ func capturevars(xfunc *Node) {
 // transformclosure is called in a separate phase after escape analysis.
 // It transform closure bodies to properly reference captured variables.
 func transformclosure(xfunc *Node) {
-	var func_ *Node
-	var cv *Node
-	var addr *Node
-	var v *Node
-	var f *Node
-	var l *NodeList
-	var body *NodeList
-	var param **Type
-	var fld *Type
-	var offset int64
-	var lno int
-	var nvar int
-
-	lno = int(lineno)
+	lno := int(lineno)
 	lineno = xfunc.Lineno
-	func_ = xfunc.Closure
+	func_ := xfunc.Closure
 
 	if func_.Top&Ecall != 0 {
 		// If the closure is directly called, we transform it to a plain function call
@@ -337,15 +304,18 @@ func transformclosure(xfunc *Node) {
 		//	}(42, byval, &byref)
 
 		// f is ONAME of the actual function.
-		f = xfunc.Nname
+		f := xfunc.Nname
 
 		// Get pointer to input arguments and rewind to the end.
 		// We are going to append captured variables to input args.
-		param = &getinargx(f.Type).Type
+		param := &getinargx(f.Type).Type
 
 		for ; *param != nil; param = &(*param).Down {
 		}
-		for l = func_.Cvars; l != nil; l = l.Next {
+		var v *Node
+		var addr *Node
+		var fld *Type
+		for l := func_.Cvars; l != nil; l = l.Next {
 			v = l.N
 			if v.Op == OXXX {
 				continue
@@ -390,11 +360,14 @@ func transformclosure(xfunc *Node) {
 		xfunc.Type = f.Type // update type of ODCLFUNC
 	} else {
 		// The closure is not called, so it is going to stay as closure.
-		nvar = 0
-
-		body = nil
-		offset = int64(Widthptr)
-		for l = func_.Cvars; l != nil; l = l.Next {
+		nvar := 0
+
+		body := (*NodeList)(nil)
+		offset := int64(Widthptr)
+		var addr *Node
+		var v *Node
+		var cv *Node
+		for l := func_.Cvars; l != nil; l = l.Next {
 			v = l.N
 			if v.Op == OXXX {
 				continue
@@ -450,12 +423,6 @@ func transformclosure(xfunc *Node) {
 }
 
 func walkclosure(func_ *Node, init **NodeList) *Node {
-	var clos *Node
-	var typ *Node
-	var typ1 *Node
-	var v *Node
-	var l *NodeList
-
 	// If no closure vars, don't bother wrapping.
 	if func_.Cvars == nil {
 		return func_.Closure.Nname
@@ -475,10 +442,12 @@ func walkclosure(func_ *Node, init **NodeList) *Node {
 	// the struct is unnamed so that closures in multiple packages with the
 	// same struct type can share the descriptor.
 
-	typ = Nod(OTSTRUCT, nil, nil)
+	typ := Nod(OTSTRUCT, nil, nil)
 
 	typ.List = list1(Nod(ODCLFIELD, newname(Lookup(".F")), typenod(Types[TUINTPTR])))
-	for l = func_.Cvars; l != nil; l = l.Next {
+	var typ1 *Node
+	var v *Node
+	for l := func_.Cvars; l != nil; l = l.Next {
 		v = l.N
 		if v.Op == OXXX {
 			continue
@@ -490,7 +459,7 @@ func walkclosure(func_ *Node, init **NodeList) *Node {
 		typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
 	}
 
-	clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+	clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
 	clos.Esc = func_.Esc
 	clos.Right.Implicit = 1
 	clos.List = concat(list1(Nod(OCFUNC, func_.Closure.Nname, nil)), func_.Enter)
@@ -541,34 +510,15 @@ func typecheckpartialcall(fn *Node, sym *Node) {
 var makepartialcall_gopkg *Pkg
 
 func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
-	var ptr *Node
-	var n *Node
-	var fld *Node
-	var call *Node
-	var xtype *Node
-	var xfunc *Node
-	var cv *Node
-	var savecurfn *Node
-	var rcvrtype *Type
-	var basetype *Type
-	var t *Type
-	var body *NodeList
-	var l *NodeList
-	var callargs *NodeList
-	var retargs *NodeList
 	var p string
-	var sym *Sym
-	var spkg *Pkg
-	var i int
-	var ddd int
 
-	rcvrtype = fn.Left.Type
+	rcvrtype := fn.Left.Type
 	if exportname(meth.Sym.Name) {
 		p = fmt.Sprintf("(%v).%s-fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), meth.Sym.Name)
 	} else {
 		p = fmt.Sprintf("(%v).(%v)-fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), Sconv(meth.Sym, obj.FmtLeft))
 	}
-	basetype = rcvrtype
+	basetype := rcvrtype
 	if Isptr[rcvrtype.Etype] != 0 {
 		basetype = basetype.Type
 	}
@@ -576,7 +526,7 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
 		Fatal("missing base type for %v", Tconv(rcvrtype, 0))
 	}
 
-	spkg = nil
+	spkg := (*Pkg)(nil)
 	if basetype.Sym != nil {
 		spkg = basetype.Sym.Pkg
 	}
@@ -587,24 +537,26 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
 		spkg = makepartialcall_gopkg
 	}
 
-	sym = Pkglookup(p, spkg)
+	sym := Pkglookup(p, spkg)
 
 	if sym.Flags&SymUniq != 0 {
 		return sym.Def
 	}
 	sym.Flags |= SymUniq
 
-	savecurfn = Curfn
+	savecurfn := Curfn
 	Curfn = nil
 
-	xtype = Nod(OTFUNC, nil, nil)
-	i = 0
-	l = nil
-	callargs = nil
-	ddd = 0
-	xfunc = Nod(ODCLFUNC, nil, nil)
+	xtype := Nod(OTFUNC, nil, nil)
+	i := 0
+	l := (*NodeList)(nil)
+	callargs := (*NodeList)(nil)
+	ddd := 0
+	xfunc := Nod(ODCLFUNC, nil, nil)
 	Curfn = xfunc
-	for t = getinargx(t0).Type; t != nil; t = t.Down {
+	var fld *Node
+	var n *Node
+	for t := getinargx(t0).Type; t != nil; t = t.Down {
 		namebuf = fmt.Sprintf("a%d", i)
 		i++
 		n = newname(Lookup(namebuf))
@@ -623,8 +575,8 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
 	xtype.List = l
 	i = 0
 	l = nil
-	retargs = nil
-	for t = getoutargx(t0).Type; t != nil; t = t.Down {
+	retargs := (*NodeList)(nil)
+	for t := getoutargx(t0).Type; t != nil; t = t.Down {
 		namebuf = fmt.Sprintf("r%d", i)
 		i++
 		n = newname(Lookup(namebuf))
@@ -644,16 +596,16 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
 	declare(xfunc.Nname, PFUNC)
 
 	// Declare and initialize variable holding receiver.
-	body = nil
+	body := (*NodeList)(nil)
 
 	xfunc.Needctxt = true
-	cv = Nod(OCLOSUREVAR, nil, nil)
+	cv := Nod(OCLOSUREVAR, nil, nil)
 	cv.Xoffset = int64(Widthptr)
 	cv.Type = rcvrtype
 	if int(cv.Type.Align) > Widthptr {
 		cv.Xoffset = int64(cv.Type.Align)
 	}
-	ptr = Nod(ONAME, nil, nil)
+	ptr := Nod(ONAME, nil, nil)
 	ptr.Sym = Lookup("rcvr")
 	ptr.Class = PAUTO
 	ptr.Addable = 1
@@ -669,13 +621,13 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
 		body = list(body, Nod(OAS, ptr, Nod(OADDR, cv, nil)))
 	}
 
-	call = Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
+	call := Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
 	call.List = callargs
 	call.Isddd = uint8(ddd)
 	if t0.Outtuple == 0 {
 		body = list(body, call)
 	} else {
-		n = Nod(OAS2, nil, nil)
+		n := Nod(OAS2, nil, nil)
 		n.List = retargs
 		n.Rlist = list1(call)
 		body = list(body, n)
@@ -694,9 +646,6 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
 }
 
 func walkpartialcall(n *Node, init **NodeList) *Node {
-	var clos *Node
-	var typ *Node
-
 	// Create closure in the form of a composite literal.
 	// For x.M with receiver (x) type T, the generated code looks like:
 	//
@@ -712,11 +661,11 @@ func walkpartialcall(n *Node, init **NodeList) *Node {
 		checknil(n.Left, init)
 	}
 
-	typ = Nod(OTSTRUCT, nil, nil)
+	typ := Nod(OTSTRUCT, nil, nil)
 	typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
 	typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
 
-	clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+	clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
 	clos.Esc = n.Esc
 	clos.Right.Implicit = 1
 	clos.List = list1(Nod(OCFUNC, n.Nname.Nname, nil))
diff --git a/src/cmd/internal/gc/const.go b/src/cmd/internal/gc/const.go
index 49d3bcc56874bf563dd4fd14d28789af39511667..43c8809fecfc1ed43344a65257eac0ad37be24c9 100644
--- a/src/cmd/internal/gc/const.go
+++ b/src/cmd/internal/gc/const.go
@@ -11,31 +11,27 @@ import "cmd/internal/obj"
  * according to type; return truncated value.
  */
 func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
-	var d float64
-	var fv *Mpflt
-	var v Val
-
 	if t == nil {
 		return oldv
 	}
 
-	v = Val{}
+	v := Val{}
 	v.Ctype = CTFLT
 	v.U.Fval = oldv
 	overflow(v, t)
 
-	fv = new(Mpflt)
+	fv := new(Mpflt)
 	*fv = *oldv
 
 	// convert large precision literal floating
 	// into limited precision (float64 or float32)
 	switch t.Etype {
 	case TFLOAT64:
-		d = mpgetflt(fv)
+		d := mpgetflt(fv)
 		Mpmovecflt(fv, d)
 
 	case TFLOAT32:
-		d = mpgetflt32(fv)
+		d := mpgetflt32(fv)
 		Mpmovecflt(fv, d)
 	}
 
@@ -56,12 +52,7 @@ func Convlit(np **Node, t *Type) {
  * (if n is a named constant, can't edit n->type directly).
  */
 func convlit1(np **Node, t *Type, explicit bool) {
-	var ct int
-	var et int
-	var n *Node
-	var nn *Node
-
-	n = *np
+	n := *np
 	if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
 		return
 	}
@@ -70,7 +61,7 @@ func convlit1(np **Node, t *Type, explicit bool) {
 	}
 
 	if n.Op == OLITERAL {
-		nn = Nod(OXXX, nil, nil)
+		nn := Nod(OXXX, nil, nil)
 		*nn = *n
 		n = nn
 		*np = n
@@ -148,7 +139,8 @@ func convlit1(np **Node, t *Type, explicit bool) {
 		return
 	}
 
-	ct = consttype(n)
+	ct := consttype(n)
+	var et int
 	if ct < 0 {
 		goto bad
 	}
@@ -214,7 +206,7 @@ func convlit1(np **Node, t *Type, explicit bool) {
 		CTRUNE,
 		CTFLT,
 		CTCPLX:
-		ct = int(n.Val.Ctype)
+		ct := int(n.Val.Ctype)
 		if Isint[et] != 0 {
 			switch ct {
 			default:
@@ -285,24 +277,20 @@ bad:
 }
 
 func copyval(v Val) Val {
-	var i *Mpint
-	var f *Mpflt
-	var c *Mpcplx
-
 	switch v.Ctype {
 	case CTINT,
 		CTRUNE:
-		i = new(Mpint)
+		i := new(Mpint)
 		mpmovefixfix(i, v.U.Xval)
 		v.U.Xval = i
 
 	case CTFLT:
-		f = new(Mpflt)
+		f := new(Mpflt)
 		mpmovefltflt(f, v.U.Fval)
 		v.U.Fval = f
 
 	case CTCPLX:
-		c = new(Mpcplx)
+		c := new(Mpcplx)
 		mpmovefltflt(&c.Real, &v.U.Cval.Real)
 		mpmovefltflt(&c.Imag, &v.U.Cval.Imag)
 		v.U.Cval = c
@@ -312,19 +300,17 @@ func copyval(v Val) Val {
 }
 
 func tocplx(v Val) Val {
-	var c *Mpcplx
-
 	switch v.Ctype {
 	case CTINT,
 		CTRUNE:
-		c = new(Mpcplx)
+		c := new(Mpcplx)
 		Mpmovefixflt(&c.Real, v.U.Xval)
 		Mpmovecflt(&c.Imag, 0.0)
 		v.Ctype = CTCPLX
 		v.U.Cval = c
 
 	case CTFLT:
-		c = new(Mpcplx)
+		c := new(Mpcplx)
 		mpmovefltflt(&c.Real, v.U.Fval)
 		Mpmovecflt(&c.Imag, 0.0)
 		v.Ctype = CTCPLX
@@ -335,18 +321,16 @@ func tocplx(v Val) Val {
 }
 
 func toflt(v Val) Val {
-	var f *Mpflt
-
 	switch v.Ctype {
 	case CTINT,
 		CTRUNE:
-		f = new(Mpflt)
+		f := new(Mpflt)
 		Mpmovefixflt(f, v.U.Xval)
 		v.Ctype = CTFLT
 		v.U.Fval = f
 
 	case CTCPLX:
-		f = new(Mpflt)
+		f := new(Mpflt)
 		mpmovefltflt(f, &v.U.Cval.Real)
 		if mpcmpfltc(&v.U.Cval.Imag, 0) != 0 {
 			Yyerror("constant %v%vi truncated to real", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
@@ -359,14 +343,12 @@ func toflt(v Val) Val {
 }
 
 func toint(v Val) Val {
-	var i *Mpint
-
 	switch v.Ctype {
 	case CTRUNE:
 		v.Ctype = CTINT
 
 	case CTFLT:
-		i = new(Mpint)
+		i := new(Mpint)
 		if mpmovefltfix(i, v.U.Fval) < 0 {
 			Yyerror("constant %v truncated to integer", Fconv(v.U.Fval, obj.FmtSharp))
 		}
@@ -374,7 +356,7 @@ func toint(v Val) Val {
 		v.U.Xval = i
 
 	case CTCPLX:
-		i = new(Mpint)
+		i := new(Mpint)
 		if mpmovefltfix(i, &v.U.Cval.Real) < 0 {
 			Yyerror("constant %v%vi truncated to integer", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
 		}
@@ -444,17 +426,14 @@ func overflow(v Val, t *Type) {
 }
 
 func tostr(v Val) Val {
-	var rune_ uint
-	var s *Strlit
-
 	switch v.Ctype {
 	case CTINT,
 		CTRUNE:
 		if Mpcmpfixfix(v.U.Xval, Minintval[TINT]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[TINT]) > 0 {
 			Yyerror("overflow in int -> string")
 		}
-		rune_ = uint(Mpgetfix(v.U.Xval))
-		s = &Strlit{S: string(rune_)}
+		rune_ := uint(Mpgetfix(v.U.Xval))
+		s := &Strlit{S: string(rune_)}
 		v = Val{}
 		v.Ctype = CTSTR
 		v.U.Sval = s
@@ -480,9 +459,7 @@ func consttype(n *Node) int {
 }
 
 func Isconst(n *Node, ct int) bool {
-	var t int
-
-	t = consttype(n)
+	t := consttype(n)
 
 	// If the caller is asking for CTINT, allow CTRUNE too.
 	// Makes life easier for back ends.
@@ -490,11 +467,9 @@ func Isconst(n *Node, ct int) bool {
 }
 
 func saveorig(n *Node) *Node {
-	var n1 *Node
-
 	if n == n.Orig {
 		// duplicate node for n->orig.
-		n1 = Nod(OLITERAL, nil, nil)
+		n1 := Nod(OLITERAL, nil, nil)
 
 		n.Orig = n1
 		*n1 = *n
@@ -507,20 +482,6 @@ func saveorig(n *Node) *Node {
  * if n is constant, rewrite as OLITERAL node.
  */
 func evconst(n *Node) {
-	var nl *Node
-	var nr *Node
-	var norig *Node
-	var str *Strlit
-	var wl int
-	var wr int
-	var lno int
-	var et int
-	var v Val
-	var rv Val
-	var b Mpint
-	var l1 *NodeList
-	var l2 *NodeList
-
 	// pick off just the opcodes that can be
 	// constant evaluated.
 	switch n.Op {
@@ -563,7 +524,11 @@ func evconst(n *Node) {
 
 		// merge adjacent constants in the argument list.
 	case OADDSTR:
-		for l1 = n.List; l1 != nil; l1 = l1.Next {
+		var nr *Node
+		var nl *Node
+		var str *Strlit
+		var l2 *NodeList
+		for l1 := n.List; l1 != nil; l1 = l1.Next {
 			if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
 				// merge from l1 up to but not including l2
 				str = new(Strlit)
@@ -585,7 +550,7 @@ func evconst(n *Node) {
 		}
 
 		// fix list end pointer.
-		for l2 = n.List; l2 != nil; l2 = l2.Next {
+		for l2 := n.List; l2 != nil; l2 = l2.Next {
 			n.List.End = l2
 		}
 
@@ -598,19 +563,24 @@ func evconst(n *Node) {
 		return
 	}
 
-	nl = n.Left
+	nl := n.Left
 	if nl == nil || nl.Type == nil {
 		return
 	}
 	if consttype(nl) < 0 {
 		return
 	}
-	wl = int(nl.Type.Etype)
+	wl := int(nl.Type.Etype)
 	if Isint[wl] != 0 || Isfloat[wl] != 0 || Iscomplex[wl] != 0 {
 		wl = TIDEAL
 	}
 
-	nr = n.Right
+	nr := n.Right
+	var rv Val
+	var lno int
+	var wr int
+	var v Val
+	var norig *Node
 	if nr == nil {
 		goto unary
 	}
@@ -1020,13 +990,14 @@ unary:
 
 	case OCOM<<16 | CTINT,
 		OCOM<<16 | CTRUNE:
-		et = Txxx
+		et := Txxx
 		if nl.Type != nil {
 			et = int(nl.Type.Etype)
 		}
 
 		// calculate the mask in b
 		// result will be (a ^ mask)
+		var b Mpint
 		switch et {
 		// signed guys change sign
 		default:
@@ -1107,9 +1078,7 @@ illegal:
 }
 
 func nodlit(v Val) *Node {
-	var n *Node
-
-	n = Nod(OLITERAL, nil, nil)
+	n := Nod(OLITERAL, nil, nil)
 	n.Val = v
 	switch v.Ctype {
 	default:
@@ -1135,14 +1104,11 @@ func nodlit(v Val) *Node {
 }
 
 func nodcplxlit(r Val, i Val) *Node {
-	var n *Node
-	var c *Mpcplx
-
 	r = toflt(r)
 	i = toflt(i)
 
-	c = new(Mpcplx)
-	n = Nod(OLITERAL, nil, nil)
+	c := new(Mpcplx)
+	n := Nod(OLITERAL, nil, nil)
 	n.Type = Types[TIDEAL]
 	n.Val.U.Cval = c
 	n.Val.Ctype = CTCPLX
@@ -1159,9 +1125,6 @@ func nodcplxlit(r Val, i Val) *Node {
 // idealkind returns a constant kind like consttype
 // but for an arbitrary "ideal" (untyped constant) expression.
 func idealkind(n *Node) int {
-	var k1 int
-	var k2 int
-
 	if n == nil || !isideal(n.Type) {
 		return CTxxx
 	}
@@ -1186,9 +1149,9 @@ func idealkind(n *Node) int {
 		OXOR,
 		OOR,
 		OPLUS:
-		k1 = idealkind(n.Left)
+		k1 := idealkind(n.Left)
 
-		k2 = idealkind(n.Right)
+		k2 := idealkind(n.Right)
 		if k1 > k2 {
 			return k1
 		} else {
@@ -1227,26 +1190,21 @@ func idealkind(n *Node) int {
 }
 
 func defaultlit(np **Node, t *Type) {
-	var lno int
-	var ctype int
-	var n *Node
-	var nn *Node
-	var t1 *Type
-
-	n = *np
+	n := *np
 	if n == nil || !isideal(n.Type) {
 		return
 	}
 
 	if n.Op == OLITERAL {
-		nn = Nod(OXXX, nil, nil)
+		nn := Nod(OXXX, nil, nil)
 		*nn = *n
 		n = nn
 		*np = n
 	}
 
-	lno = int(setlineno(n))
-	ctype = idealkind(n)
+	lno := int(setlineno(n))
+	ctype := idealkind(n)
+	var t1 *Type
 	switch ctype {
 	default:
 		if t != nil {
@@ -1266,7 +1224,7 @@ func defaultlit(np **Node, t *Type) {
 		}
 
 		if n.Val.Ctype == CTSTR {
-			t1 = Types[TSTRING]
+			t1 := Types[TSTRING]
 			Convlit(np, t1)
 			break
 		}
@@ -1277,7 +1235,7 @@ func defaultlit(np **Node, t *Type) {
 		Fatal("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
 
 	case CTBOOL:
-		t1 = Types[TBOOL]
+		t1 := Types[TBOOL]
 		if t != nil && t.Etype == TBOOL {
 			t1 = t
 		}
@@ -1330,13 +1288,8 @@ num:
  * force means must assign concrete (non-ideal) type.
  */
 func defaultlit2(lp **Node, rp **Node, force int) {
-	var l *Node
-	var r *Node
-	var lkind int
-	var rkind int
-
-	l = *lp
-	r = *rp
+	l := *lp
+	r := *rp
 	if l.Type == nil || r.Type == nil {
 		return
 	}
@@ -1358,8 +1311,8 @@ func defaultlit2(lp **Node, rp **Node, force int) {
 		Convlit(rp, Types[TBOOL])
 	}
 
-	lkind = idealkind(l)
-	rkind = idealkind(r)
+	lkind := idealkind(l)
+	rkind := idealkind(r)
 	if lkind == CTCPLX || rkind == CTCPLX {
 		Convlit(lp, Types[TCOMPLEX128])
 		Convlit(rp, Types[TCOMPLEX128])
@@ -1473,10 +1426,7 @@ func iconv(x int64, et int) int64 {
  * for back end.
  */
 func Convconst(con *Node, t *Type, val *Val) {
-	var i int64
-	var tt int
-
-	tt = Simsimtype(t)
+	tt := Simsimtype(t)
 
 	// copy the constant for conversion
 	Nodconst(con, Types[TINT8], 0)
@@ -1487,6 +1437,7 @@ func Convconst(con *Node, t *Type, val *Val) {
 	if Isint[tt] != 0 {
 		con.Val.Ctype = CTINT
 		con.Val.U.Xval = new(Mpint)
+		var i int64
 		switch val.Ctype {
 		default:
 			Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
@@ -1614,9 +1565,6 @@ func cmplxdiv(v *Mpcplx, rv *Mpcplx) {
 // Only called for expressions known to evaluated to compile-time
 // constants.
 func isgoconst(n *Node) bool {
-	var l *Node
-	var t *Type
-
 	if n.Orig != nil {
 		n = n.Orig
 	}
@@ -1661,7 +1609,7 @@ func isgoconst(n *Node) bool {
 
 	case OLEN,
 		OCAP:
-		l = n.Left
+		l := n.Left
 		if isgoconst(l) {
 			return true
 		}
@@ -1669,7 +1617,7 @@ func isgoconst(n *Node) bool {
 		// Special case: len/cap is constant when applied to array or
 		// pointer to array when the expression does not contain
 		// function calls or channel receive operations.
-		t = l.Type
+		t := l.Type
 
 		if t != nil && Isptr[t.Etype] != 0 {
 			t = t.Type
@@ -1684,7 +1632,7 @@ func isgoconst(n *Node) bool {
 		}
 
 	case ONAME:
-		l = n.Sym.Def
+		l := n.Sym.Def
 		if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
 			return true
 		}
@@ -1696,7 +1644,7 @@ func isgoconst(n *Node) bool {
 
 		// Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
 	case OCALL:
-		l = n.Left
+		l := n.Left
 
 		for l.Op == OPAREN {
 			l = l.Left
@@ -1714,8 +1662,6 @@ func isgoconst(n *Node) bool {
 }
 
 func hascallchan(n *Node) bool {
-	var l *NodeList
-
 	if n == nil {
 		return false
 	}
@@ -1747,12 +1693,12 @@ func hascallchan(n *Node) bool {
 		return true
 	}
 
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		if hascallchan(l.N) {
 			return true
 		}
 	}
-	for l = n.Rlist; l != nil; l = l.Next {
+	for l := n.Rlist; l != nil; l = l.Next {
 		if hascallchan(l.N) {
 			return true
 		}
diff --git a/src/cmd/internal/gc/cplx.go b/src/cmd/internal/gc/cplx.go
index 34decd1421392e064190b4331550ea0615f640e0..c07ba346335cf5cfa9d19b52a75684b1e33232e9 100644
--- a/src/cmd/internal/gc/cplx.go
+++ b/src/cmd/internal/gc/cplx.go
@@ -20,14 +20,6 @@ func overlap_cplx(f *Node, t *Node) bool {
 
 func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
 	var tnl Node
-	var tnr Node
-	var n1 Node
-	var n2 Node
-	var n3 Node
-	var n4 Node
-	var na Node
-	var nb Node
-	var nc Node
 
 	// make both sides addable in ullman order
 	if nr != nil {
@@ -38,6 +30,7 @@ func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Pro
 		}
 
 		if nr.Addable == 0 {
+			var tnr Node
 			Tempname(&tnr, nr.Type)
 			Thearch.Cgen(nr, &tnr)
 			nr = &tnr
@@ -53,13 +46,19 @@ func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Pro
 	// build tree
 	// real(l) == real(r) && imag(l) == imag(r)
 
+	var n2 Node
+	var n1 Node
 	subnode(&n1, &n2, nl)
 
+	var n3 Node
+	var n4 Node
 	subnode(&n3, &n4, nr)
 
-	na = Node{}
+	na := Node{}
 	na.Op = OANDAND
+	var nb Node
 	na.Left = &nb
+	var nc Node
 	na.Right = &nc
 	na.Type = Types[TBOOL]
 
@@ -84,16 +83,13 @@ func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Pro
 
 // break addable nc-complex into nr-real and ni-imaginary
 func subnode(nr *Node, ni *Node, nc *Node) {
-	var tc int
-	var t *Type
-
 	if nc.Addable == 0 {
 		Fatal("subnode not addable")
 	}
 
-	tc = Simsimtype(nc.Type)
+	tc := Simsimtype(nc.Type)
 	tc = cplxsubtype(tc)
-	t = Types[tc]
+	t := Types[tc]
 
 	if nc.Op == OLITERAL {
 		nodfconst(nr, t, &nc.Val.U.Cval.Real)
@@ -111,9 +107,7 @@ func subnode(nr *Node, ni *Node, nc *Node) {
 
 // generate code res = -nl
 func minus(nl *Node, res *Node) {
-	var ra Node
-
-	ra = Node{}
+	ra := Node{}
 	ra.Op = OMINUS
 	ra.Left = nl
 	ra.Type = nl.Type
@@ -146,13 +140,12 @@ func complexadd(op int, nl *Node, nr *Node, res *Node) {
 	var n4 Node
 	var n5 Node
 	var n6 Node
-	var ra Node
 
 	subnode(&n1, &n2, nl)
 	subnode(&n3, &n4, nr)
 	subnode(&n5, &n6, res)
 
-	ra = Node{}
+	ra := Node{}
 	ra.Op = uint8(op)
 	ra.Left = &n1
 	ra.Right = &n3
@@ -178,9 +171,6 @@ func complexmul(nl *Node, nr *Node, res *Node) {
 	var n4 Node
 	var n5 Node
 	var n6 Node
-	var rm1 Node
-	var rm2 Node
-	var ra Node
 	var tmp Node
 
 	subnode(&n1, &n2, nl)
@@ -189,20 +179,20 @@ func complexmul(nl *Node, nr *Node, res *Node) {
 	Tempname(&tmp, n5.Type)
 
 	// real part -> tmp
-	rm1 = Node{}
+	rm1 := Node{}
 
 	rm1.Op = OMUL
 	rm1.Left = &n1
 	rm1.Right = &n3
 	rm1.Type = n1.Type
 
-	rm2 = Node{}
+	rm2 := Node{}
 	rm2.Op = OMUL
 	rm2.Left = &n2
 	rm2.Right = &n4
 	rm2.Type = n2.Type
 
-	ra = Node{}
+	ra := Node{}
 	ra.Op = OSUB
 	ra.Left = &rm1
 	ra.Right = &rm2
@@ -300,14 +290,6 @@ yes:
 }
 
 func Complexmove(f *Node, t *Node) {
-	var ft int
-	var tt int
-	var n1 Node
-	var n2 Node
-	var n3 Node
-	var n4 Node
-	var tmp Node
-
 	if Debug['g'] != 0 {
 		Dump("\ncomplexmove-f", f)
 		Dump("complexmove-t", t)
@@ -317,8 +299,8 @@ func Complexmove(f *Node, t *Node) {
 		Fatal("complexmove: to not addable")
 	}
 
-	ft = Simsimtype(f.Type)
-	tt = Simsimtype(t.Type)
+	ft := Simsimtype(f.Type)
+	tt := Simsimtype(t.Type)
 	switch uint32(ft)<<16 | uint32(tt) {
 	default:
 		Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
@@ -331,12 +313,17 @@ func Complexmove(f *Node, t *Node) {
 		TCOMPLEX128<<16 | TCOMPLEX64,
 		TCOMPLEX128<<16 | TCOMPLEX128:
 		if f.Addable == 0 || overlap_cplx(f, t) {
+			var tmp Node
 			Tempname(&tmp, f.Type)
 			Complexmove(f, &tmp)
 			f = &tmp
 		}
 
+		var n1 Node
+		var n2 Node
 		subnode(&n1, &n2, f)
+		var n4 Node
+		var n3 Node
 		subnode(&n3, &n4, t)
 
 		Thearch.Cgen(&n1, &n3)
@@ -345,16 +332,6 @@ func Complexmove(f *Node, t *Node) {
 }
 
 func Complexgen(n *Node, res *Node) {
-	var nl *Node
-	var nr *Node
-	var tnl Node
-	var tnr Node
-	var n1 Node
-	var n2 Node
-	var tmp Node
-	var tl int
-	var tr int
-
 	if Debug['g'] != 0 {
 		Dump("\ncomplexgen-n", n)
 		Dump("complexgen-res", res)
@@ -368,7 +345,10 @@ func Complexgen(n *Node, res *Node) {
 	switch n.Op {
 	case OCOMPLEX:
 		if res.Addable != 0 {
+			var n1 Node
+			var n2 Node
 			subnode(&n1, &n2, res)
+			var tmp Node
 			Tempname(&tmp, n1.Type)
 			Thearch.Cgen(n.Left, &tmp)
 			Thearch.Cgen(n.Right, &n2)
@@ -378,13 +358,16 @@ func Complexgen(n *Node, res *Node) {
 
 	case OREAL,
 		OIMAG:
-		nl = n.Left
+		nl := n.Left
 		if nl.Addable == 0 {
+			var tmp Node
 			Tempname(&tmp, nl.Type)
 			Complexgen(nl, &tmp)
 			nl = &tmp
 		}
 
+		var n1 Node
+		var n2 Node
 		subnode(&n1, &n2, nl)
 		if n.Op == OREAL {
 			Thearch.Cgen(&n1, res)
@@ -396,13 +379,14 @@ func Complexgen(n *Node, res *Node) {
 	}
 
 	// perform conversion from n to res
-	tl = Simsimtype(res.Type)
+	tl := Simsimtype(res.Type)
 
 	tl = cplxsubtype(tl)
-	tr = Simsimtype(n.Type)
+	tr := Simsimtype(n.Type)
 	tr = cplxsubtype(tr)
 	if tl != tr {
 		if n.Addable == 0 {
+			var n1 Node
 			Tempname(&n1, n.Type)
 			Complexmove(n, &n1)
 			n = &n1
@@ -413,6 +397,7 @@ func Complexgen(n *Node, res *Node) {
 	}
 
 	if res.Addable == 0 {
+		var n1 Node
 		Thearch.Igen(res, &n1, nil)
 		Thearch.Cgen(n, &n1)
 		Thearch.Regfree(&n1)
@@ -437,6 +422,7 @@ func Complexgen(n *Node, res *Node) {
 		OCALLFUNC,
 		OCALLMETH,
 		OCALLINTER:
+		var n1 Node
 		Thearch.Igen(n, &n1, res)
 
 		Complexmove(&n1, res)
@@ -454,13 +440,14 @@ func Complexgen(n *Node, res *Node) {
 		break
 	}
 
-	nl = n.Left
+	nl := n.Left
 	if nl == nil {
 		return
 	}
-	nr = n.Right
+	nr := n.Right
 
 	// make both sides addable in ullman order
+	var tnl Node
 	if nr != nil {
 		if nl.Ullman > nr.Ullman && nl.Addable == 0 {
 			Tempname(&tnl, nl.Type)
@@ -469,6 +456,7 @@ func Complexgen(n *Node, res *Node) {
 		}
 
 		if nr.Addable == 0 {
+			var tnr Node
 			Tempname(&tnr, nr.Type)
 			Thearch.Cgen(nr, &tnr)
 			nr = &tnr
diff --git a/src/cmd/internal/gc/dcl.go b/src/cmd/internal/gc/dcl.go
index 577f7ec791e7243a1bf61aced4aed812e5d32003..ceececd01fbbab813dc620efe8ee60d812f9ed59 100644
--- a/src/cmd/internal/gc/dcl.go
+++ b/src/cmd/internal/gc/dcl.go
@@ -35,9 +35,7 @@ func dcopy(a *Sym, b *Sym) {
 }
 
 func push() *Sym {
-	var d *Sym
-
-	d = new(Sym)
+	d := new(Sym)
 	d.Lastlineno = lineno
 	d.Link = dclstack
 	dclstack = d
@@ -45,9 +43,7 @@ func push() *Sym {
 }
 
 func pushdcl(s *Sym) *Sym {
-	var d *Sym
-
-	d = push()
+	d := push()
 	dcopy(d, s)
 	if dflag() {
 		fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
@@ -94,9 +90,7 @@ func poptodcl() {
 }
 
 func markdcl() {
-	var d *Sym
-
-	d = push()
+	d := push()
 	d.Name = "" // used as a mark in fifo
 	d.Block = block
 
@@ -108,11 +102,9 @@ func markdcl() {
 //		print("markdcl\n");
 func dumpdcl(st string) {
 	var s *Sym
-	var d *Sym
-	var i int
 
-	i = 0
-	for d = dclstack; d != nil; d = d.Link {
+	i := 0
+	for d := dclstack; d != nil; d = d.Link {
 		i++
 		fmt.Printf("    %.2d %p", i, d)
 		if d.Name == "" {
@@ -127,9 +119,7 @@ func dumpdcl(st string) {
 }
 
 func testdclstack() {
-	var d *Sym
-
-	for d = dclstack; d != nil; d = d.Link {
+	for d := dclstack; d != nil; d = d.Link {
 		if d.Name == "" {
 			if nerrors != 0 {
 				errorexit()
@@ -141,10 +131,6 @@ func testdclstack() {
 }
 
 func redeclare(s *Sym, where string) {
-	var pkgstr *Strlit
-	var line1 int
-	var line2 int
-
 	if s.Lastlineno == 0 {
 		var tmp *Strlit
 		if s.Origpkg != nil {
@@ -152,11 +138,11 @@ func redeclare(s *Sym, where string) {
 		} else {
 			tmp = s.Pkg.Path
 		}
-		pkgstr = tmp
+		pkgstr := tmp
 		Yyerror("%v redeclared %s\n"+"\tprevious declaration during import \"%v\"", Sconv(s, 0), where, Zconv(pkgstr, 0))
 	} else {
-		line1 = parserline()
-		line2 = int(s.Lastlineno)
+		line1 := parserline()
+		line2 := int(s.Lastlineno)
 
 		// When an import and a declaration collide in separate files,
 		// present the import as the "redeclared", because the declaration
@@ -180,9 +166,6 @@ var vargen int
 var declare_typegen int
 
 func declare(n *Node, ctxt int) {
-	var s *Sym
-	var gen int
-
 	if ctxt == PDISCARD {
 		return
 	}
@@ -192,7 +175,7 @@ func declare(n *Node, ctxt int) {
 	}
 
 	n.Lineno = int32(parserline())
-	s = n.Sym
+	s := n.Sym
 
 	// kludgy: typecheckok means we're past parsing.  Eg genwrapper may declare out of package names later.
 	if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
@@ -203,7 +186,7 @@ func declare(n *Node, ctxt int) {
 		Yyerror("cannot declare init - must be func", s)
 	}
 
-	gen = 0
+	gen := 0
 	if ctxt == PEXTERN {
 		externdcl = list(externdcl, n)
 		if dflag() {
@@ -264,20 +247,15 @@ func addvar(n *Node, t *Type, ctxt int) {
  * new_name_list (type | [type] = expr_list)
  */
 func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
-	var doexpr bool
-	var v *Node
-	var e *Node
-	var as2 *Node
-	var init *NodeList
-
-	init = nil
-	doexpr = el != nil
+	init := (*NodeList)(nil)
+	doexpr := el != nil
 
 	if count(el) == 1 && count(vl) > 1 {
-		e = el.N
-		as2 = Nod(OAS2, nil, nil)
+		e := el.N
+		as2 := Nod(OAS2, nil, nil)
 		as2.List = vl
 		as2.Rlist = list1(e)
+		var v *Node
 		for ; vl != nil; vl = vl.Next {
 			v = vl.N
 			v.Op = ONAME
@@ -292,6 +270,8 @@ func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
 		return list(init, as2)
 	}
 
+	var v *Node
+	var e *Node
 	for ; vl != nil; vl = vl.Next {
 		if doexpr {
 			if el == nil {
@@ -333,11 +313,7 @@ func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
  * new_name_list [[type] = expr_list]
  */
 func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
-	var v *Node
-	var c *Node
-	var vv *NodeList
-
-	vv = nil
+	vv := (*NodeList)(nil)
 	if cl == nil {
 		if t != nil {
 			Yyerror("const declaration cannot have type without expression")
@@ -351,6 +327,8 @@ func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
 
 	cl = listtreecopy(cl)
 
+	var v *Node
+	var c *Node
 	for ; vl != nil; vl = vl.Next {
 		if cl == nil {
 			Yyerror("missing value in const declaration")
@@ -382,13 +360,11 @@ func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
  * typically for labels or other one-off names.
  */
 func newname(s *Sym) *Node {
-	var n *Node
-
 	if s == nil {
 		Fatal("newname nil")
 	}
 
-	n = Nod(ONAME, nil, nil)
+	n := Nod(ONAME, nil, nil)
 	n.Sym = s
 	n.Type = nil
 	n.Addable = 1
@@ -402,9 +378,7 @@ func newname(s *Sym) *Node {
  * being declared.
  */
 func dclname(s *Sym) *Node {
-	var n *Node
-
-	n = newname(s)
+	n := newname(s)
 	n.Op = ONONAME // caller will correct it
 	return n
 }
@@ -429,10 +403,7 @@ func typenod(t *Type) *Node {
  * generated if no name has been defined.
  */
 func oldname(s *Sym) *Node {
-	var n *Node
-	var c *Node
-
-	n = s.Def
+	n := s.Def
 	if n == nil {
 		// maybe a top-level name will come along
 		// to give this a definition later.
@@ -453,7 +424,7 @@ func oldname(s *Sym) *Node {
 		// make x a closure variable unnecessarily.
 		if n.Closure == nil || n.Closure.Funcdepth != Funcdepth {
 			// create new closure var.
-			c = Nod(ONAME, nil, nil)
+			c := Nod(ONAME, nil, nil)
 
 			c.Sym = s
 			c.Class = PPARAMREF
@@ -493,20 +464,16 @@ func colasname(n *Node) bool {
 }
 
 func colasdefn(left *NodeList, defn *Node) {
-	var nnew int
-	var nerr int
-	var l *NodeList
-	var n *Node
-
-	for l = left; l != nil; l = l.Next {
+	for l := left; l != nil; l = l.Next {
 		if l.N.Sym != nil {
 			l.N.Sym.Flags |= SymUniq
 		}
 	}
 
-	nnew = 0
-	nerr = 0
-	for l = left; l != nil; l = l.Next {
+	nnew := 0
+	nerr := 0
+	var n *Node
+	for l := left; l != nil; l = l.Next {
 		n = l.N
 		if isblank(n) {
 			continue
@@ -543,9 +510,7 @@ func colasdefn(left *NodeList, defn *Node) {
 }
 
 func colas(left *NodeList, right *NodeList, lno int32) *Node {
-	var as *Node
-
-	as = Nod(OAS2, nil, nil)
+	as := Nod(OAS2, nil, nil)
 	as.List = left
 	as.Rlist = right
 	as.Colas = 1
@@ -622,11 +587,6 @@ func funchdr(n *Node) {
 }
 
 func funcargs(nt *Node) {
-	var n *Node
-	var nn *Node
-	var l *NodeList
-	var gen int
-
 	if nt.Op != OTFUNC {
 		Fatal("funcargs %v", Oconv(int(nt.Op), 0))
 	}
@@ -640,7 +600,7 @@ func funcargs(nt *Node) {
 	// no n->defn because type checking of func header
 	// will not fill in the types until later
 	if nt.Left != nil {
-		n = nt.Left
+		n := nt.Left
 		if n.Op != ODCLFIELD {
 			Fatal("funcargs receiver %v", Oconv(int(n.Op), 0))
 		}
@@ -655,7 +615,8 @@ func funcargs(nt *Node) {
 		}
 	}
 
-	for l = nt.List; l != nil; l = l.Next {
+	var n *Node
+	for l := nt.List; l != nil; l = l.Next {
 		n = l.N
 		if n.Op != ODCLFIELD {
 			Fatal("funcargs in %v", Oconv(int(n.Op), 0))
@@ -672,9 +633,10 @@ func funcargs(nt *Node) {
 	}
 
 	// declare the out arguments.
-	gen = count(nt.List)
+	gen := count(nt.List)
 	var i int = 0
-	for l = nt.Rlist; l != nil; l = l.Next {
+	var nn *Node
+	for l := nt.Rlist; l != nil; l = l.Next {
 		n = l.N
 
 		if n.Op != ODCLFIELD {
@@ -726,15 +688,13 @@ func funcargs(nt *Node) {
  * used functype directly to parse the function's type.
  */
 func funcargs2(t *Type) {
-	var ft *Type
-	var n *Node
-
 	if t.Etype != TFUNC {
 		Fatal("funcargs2 %v", Tconv(t, 0))
 	}
 
 	if t.Thistuple != 0 {
-		for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
+		var n *Node
+		for ft := getthisx(t).Type; ft != nil; ft = ft.Down {
 			if ft.Nname == nil || ft.Nname.Sym == nil {
 				continue
 			}
@@ -745,7 +705,8 @@ func funcargs2(t *Type) {
 	}
 
 	if t.Intuple != 0 {
-		for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
+		var n *Node
+		for ft := getinargx(t).Type; ft != nil; ft = ft.Down {
 			if ft.Nname == nil || ft.Nname.Sym == nil {
 				continue
 			}
@@ -756,7 +717,8 @@ func funcargs2(t *Type) {
 	}
 
 	if t.Outtuple != 0 {
-		for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
+		var n *Node
+		for ft := getoutargx(t).Type; ft != nil; ft = ft.Down {
 			if ft.Nname == nil || ft.Nname.Sym == nil {
 				continue
 			}
@@ -790,9 +752,7 @@ func funcbody(n *Node) {
  * new type being defined with name s.
  */
 func typedcl0(s *Sym) *Node {
-	var n *Node
-
-	n = newname(s)
+	n := newname(s)
 	n.Op = OTYPE
 	declare(n, dclcontext)
 	return n
@@ -833,17 +793,14 @@ func checkembeddedtype(t *Type) {
 }
 
 func structfield(n *Node) *Type {
-	var f *Type
-	var lno int
-
-	lno = int(lineno)
+	lno := int(lineno)
 	lineno = n.Lineno
 
 	if n.Op != ODCLFIELD {
 		Fatal("structfield: oops %v\n", Nconv(n, 0))
 	}
 
-	f = typ(TFIELD)
+	f := typ(TFIELD)
 	f.Isddd = n.Isddd
 
 	if n.Right != nil {
@@ -890,9 +847,7 @@ func structfield(n *Node) *Type {
 var uniqgen uint32
 
 func checkdupfields(t *Type, what string) {
-	var lno int
-
-	lno = int(lineno)
+	lno := int(lineno)
 
 	for ; t != nil; t = t.Down {
 		if t.Sym != nil && t.Nname != nil && !isblank(t.Nname) {
@@ -913,19 +868,17 @@ func checkdupfields(t *Type, what string) {
  * a type for struct/interface/arglist
  */
 func tostruct(l *NodeList) *Type {
-	var t *Type
 	var f *Type
-	var tp **Type
-	t = typ(TSTRUCT)
+	t := typ(TSTRUCT)
 
-	for tp = &t.Type; l != nil; l = l.Next {
+	for tp := &t.Type; l != nil; l = l.Next {
 		f = structfield(l.N)
 
 		*tp = f
 		tp = &f.Down
 	}
 
-	for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+	for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
 		if f.Broke != 0 {
 			t.Broke = 1
 		}
@@ -942,14 +895,12 @@ func tostruct(l *NodeList) *Type {
 }
 
 func tofunargs(l *NodeList) *Type {
-	var t *Type
 	var f *Type
-	var tp **Type
 
-	t = typ(TSTRUCT)
+	t := typ(TSTRUCT)
 	t.Funarg = 1
 
-	for tp = &t.Type; l != nil; l = l.Next {
+	for tp := &t.Type; l != nil; l = l.Next {
 		f = structfield(l.N)
 		f.Funarg = 1
 
@@ -962,7 +913,7 @@ func tofunargs(l *NodeList) *Type {
 		tp = &f.Down
 	}
 
-	for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+	for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
 		if f.Broke != 0 {
 			t.Broke = 1
 		}
@@ -972,10 +923,7 @@ func tofunargs(l *NodeList) *Type {
 }
 
 func interfacefield(n *Node) *Type {
-	var f *Type
-	var lno int
-
-	lno = int(lineno)
+	lno := int(lineno)
 	lineno = n.Lineno
 
 	if n.Op != ODCLFIELD {
@@ -986,7 +934,7 @@ func interfacefield(n *Node) *Type {
 		Yyerror("interface method cannot have annotation")
 	}
 
-	f = typ(TFIELD)
+	f := typ(TFIELD)
 	f.Isddd = n.Isddd
 
 	if n.Right != nil {
@@ -1042,14 +990,12 @@ func interfacefield(n *Node) *Type {
 }
 
 func tointerface(l *NodeList) *Type {
-	var t *Type
 	var f *Type
-	var tp **Type
 	var t1 *Type
 
-	t = typ(TINTER)
+	t := typ(TINTER)
 
-	tp = &t.Type
+	tp := &t.Type
 	for ; l != nil; l = l.Next {
 		f = interfacefield(l.N)
 
@@ -1072,7 +1018,7 @@ func tointerface(l *NodeList) *Type {
 		}
 	}
 
-	for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+	for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
 		if f.Broke != 0 {
 			t.Broke = 1
 		}
@@ -1087,20 +1033,19 @@ func tointerface(l *NodeList) *Type {
 }
 
 func embedded(s *Sym, pkg *Pkg) *Node {
-	var n *Node
-	var name string
 	const (
 		CenterDot = 0xB7
 	)
 	// Names sometimes have disambiguation junk
 	// appended after a center dot.  Discard it when
 	// making the name for the embedded struct field.
-	name = s.Name
+	name := s.Name
 
 	if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
 		name = s.Name[:i]
 	}
 
+	var n *Node
 	if exportname(name) {
 		n = newname(Lookup(name))
 	} else if s.Pkg == builtinpkg {
@@ -1127,14 +1072,8 @@ func findtype(l *NodeList) *Node {
 }
 
 func checkarglist(all *NodeList, input int) *NodeList {
-	var named int
-	var n *Node
-	var t *Node
-	var nextt *Node
-	var l *NodeList
-
-	named = 0
-	for l = all; l != nil; l = l.Next {
+	named := 0
+	for l := all; l != nil; l = l.Next {
 		if l.N.Op == OKEY {
 			named = 1
 			break
@@ -1142,7 +1081,8 @@ func checkarglist(all *NodeList, input int) *NodeList {
 	}
 
 	if named != 0 {
-		n = nil
+		n := (*Node)(nil)
+		var l *NodeList
 		for l = all; l != nil; l = l.Next {
 			n = l.N
 			if n.Op != OKEY && n.Sym == nil {
@@ -1156,8 +1096,10 @@ func checkarglist(all *NodeList, input int) *NodeList {
 		}
 	}
 
-	nextt = nil
-	for l = all; l != nil; l = l.Next {
+	nextt := (*Node)(nil)
+	var t *Node
+	var n *Node
+	for l := all; l != nil; l = l.Next {
 		// can cache result from findtype to avoid
 		// quadratic behavior here, but unlikely to matter.
 		n = l.N
@@ -1220,9 +1162,7 @@ func checkarglist(all *NodeList, input int) *NodeList {
 }
 
 func fakethis() *Node {
-	var n *Node
-
-	n = Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
+	n := Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
 	return n
 }
 
@@ -1233,14 +1173,11 @@ func fakethis() *Node {
  * (See fakethis above.)
  */
 func isifacemethod(f *Type) bool {
-	var rcvr *Type
-	var t *Type
-
-	rcvr = getthisx(f).Type
+	rcvr := getthisx(f).Type
 	if rcvr.Sym != nil {
 		return false
 	}
-	t = rcvr.Type
+	t := rcvr.Type
 	if Isptr[t.Etype] == 0 {
 		return false
 	}
@@ -1256,13 +1193,9 @@ func isifacemethod(f *Type) bool {
  * into a type
  */
 func functype(this *Node, in *NodeList, out *NodeList) *Type {
-	var t *Type
-	var rcvr *NodeList
-	var s *Sym
+	t := typ(TFUNC)
 
-	t = typ(TFUNC)
-
-	rcvr = nil
+	rcvr := (*NodeList)(nil)
 	if this != nil {
 		rcvr = list1(this)
 	}
@@ -1286,7 +1219,7 @@ func functype(this *Node, in *NodeList, out *NodeList) *Type {
 	t.Intuple = count(in)
 	t.Outnamed = 0
 	if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
-		s = out.N.Left.Orig.Sym
+		s := out.N.Left.Orig.Sym
 		if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
 			t.Outnamed = 1
 		}
@@ -1300,11 +1233,10 @@ var methodsym_toppkg *Pkg
 func methodsym(nsym *Sym, t0 *Type, iface int) *Sym {
 	var s *Sym
 	var p string
-	var t *Type
 	var suffix string
 	var spkg *Pkg
 
-	t = t0
+	t := t0
 	if t == nil {
 		goto bad
 	}
@@ -1367,9 +1299,7 @@ bad:
 }
 
 func methodname(n *Node, t *Type) *Node {
-	var s *Sym
-
-	s = methodsym(n.Sym, t, 0)
+	s := methodsym(n.Sym, t, 0)
 	if s == nil {
 		return n
 	}
@@ -1377,10 +1307,7 @@ func methodname(n *Node, t *Type) *Node {
 }
 
 func methodname1(n *Node, t *Node) *Node {
-	var star string
-	var p string
-
-	star = ""
+	star := ""
 	if t.Op == OIND {
 		star = "*"
 		t = t.Left
@@ -1390,6 +1317,7 @@ func methodname1(n *Node, t *Node) *Node {
 		return newname(n.Sym)
 	}
 
+	var p string
 	if star != "" {
 		p = fmt.Sprintf("(%s%v).%v", star, Sconv(t.Sym, 0), Sconv(n.Sym, 0))
 	} else {
@@ -1410,25 +1338,20 @@ func methodname1(n *Node, t *Node) *Node {
  * n is fieldname, pa is base type, t is function type
  */
 func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
-	var f *Type
-	var d *Type
-	var pa *Type
-	var n *Node
-
 	// get field sym
 	if sf == nil {
 		Fatal("no method symbol")
 	}
 
 	// get parent type sym
-	pa = getthisx(t).Type // ptr to this structure
+	pa := getthisx(t).Type // ptr to this structure
 	if pa == nil {
 		Yyerror("missing receiver")
 		return
 	}
 
 	pa = pa.Type
-	f = methtype(pa, 1)
+	f := methtype(pa, 1)
 	if f == nil {
 		t = pa
 		if t == nil { // rely on typecheck having complained before
@@ -1472,7 +1395,7 @@ func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
 
 	pa = f
 	if pa.Etype == TSTRUCT {
-		for f = pa.Type; f != nil; f = f.Down {
+		for f := pa.Type; f != nil; f = f.Down {
 			if f.Sym == sf {
 				Yyerror("type %v has both field and method named %v", Tconv(pa, 0), Sconv(sf, 0))
 				return
@@ -1487,11 +1410,11 @@ func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
 		return
 	}
 
-	n = Nod(ODCLFIELD, newname(sf), nil)
+	n := Nod(ODCLFIELD, newname(sf), nil)
 	n.Type = t
 
-	d = nil // last found
-	for f = pa.Method; f != nil; f = f.Down {
+	d := (*Type)(nil) // last found
+	for f := pa.Method; f != nil; f = f.Down {
 		d = f
 		if f.Etype != TFIELD {
 			Fatal("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
@@ -1549,11 +1472,8 @@ func funccompile(n *Node) {
 }
 
 func funcsym(s *Sym) *Sym {
-	var p string
-	var s1 *Sym
-
-	p = fmt.Sprintf("%s·f", s.Name)
-	s1 = Pkglookup(p, s.Pkg)
+	p := fmt.Sprintf("%s·f", s.Name)
+	s1 := Pkglookup(p, s.Pkg)
 
 	if s1.Def == nil {
 		s1.Def = newname(s1)
diff --git a/src/cmd/internal/gc/esc.go b/src/cmd/internal/gc/esc.go
index f4d5b436a4f4dc0b05f6e75c3f6256db71568313..697ca6770b3813e00498251c577aadf75ec46f8e 100644
--- a/src/cmd/internal/gc/esc.go
+++ b/src/cmd/internal/gc/esc.go
@@ -49,30 +49,23 @@ const (
 )
 
 func escapes(all *NodeList) {
-	var l *NodeList
-
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		l.N.Walkgen = 0
 	}
 
 	visitgen = 0
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC && l.N.Curfn == nil {
 			visit(l.N)
 		}
 	}
 
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		l.N.Walkgen = 0
 	}
 }
 
 func visit(n *Node) uint32 {
-	var min uint32
-	var recursive bool
-	var l *NodeList
-	var block *NodeList
-
 	if n.Walkgen > 0 {
 		// already visited
 		return n.Walkgen
@@ -81,9 +74,9 @@ func visit(n *Node) uint32 {
 	visitgen++
 	n.Walkgen = visitgen
 	visitgen++
-	min = visitgen
+	min := visitgen
 
-	l = new(NodeList)
+	l := new(NodeList)
 	l.Next = stack
 	l.N = n
 	stack = l
@@ -95,13 +88,14 @@ func visit(n *Node) uint32 {
 		// If visitcodelist found its way back to n->walkgen, then this
 		// block is a set of mutually recursive functions.
 		// Otherwise it's just a lone function that does not recurse.
-		recursive = min == n.Walkgen
+		recursive := min == n.Walkgen
 
 		// Remove connected component from stack.
 		// Mark walkgen so that future visits return a large number
 		// so as not to affect the caller's min.
-		block = stack
+		block := stack
 
+		var l *NodeList
 		for l = stack; l.N != n; l = l.Next {
 			l.N.Walkgen = ^uint32(0)
 		}
@@ -124,9 +118,6 @@ func visitcodelist(l *NodeList, min uint32) uint32 {
 }
 
 func visitcode(n *Node, min uint32) uint32 {
-	var fn *Node
-	var m uint32
-
 	if n == nil {
 		return min
 	}
@@ -142,12 +133,12 @@ func visitcode(n *Node, min uint32) uint32 {
 	min = visitcodelist(n.Rlist, min)
 
 	if n.Op == OCALLFUNC || n.Op == OCALLMETH {
-		fn = n.Left
+		fn := n.Left
 		if n.Op == OCALLMETH {
 			fn = n.Left.Right.Sym.Def
 		}
 		if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil {
-			m = visit(fn.Defn)
+			m := visit(fn.Defn)
 			if m < min {
 				min = m
 			}
@@ -155,7 +146,7 @@ func visitcode(n *Node, min uint32) uint32 {
 	}
 
 	if n.Op == OCLOSURE {
-		m = visit(n.Closure)
+		m := visit(n.Closure)
 		if m < min {
 			min = m
 		}
@@ -205,9 +196,6 @@ type EscState struct {
 var tags [16]*Strlit
 
 func mktag(mask int) *Strlit {
-	var s *Strlit
-	var buf string
-
 	switch mask & EscMask {
 	case EscNone,
 		EscReturn:
@@ -223,8 +211,8 @@ func mktag(mask int) *Strlit {
 		return tags[mask]
 	}
 
-	buf = fmt.Sprintf("esc:0x%x", mask)
-	s = newstrlit(buf)
+	buf := fmt.Sprintf("esc:0x%x", mask)
+	s := newstrlit(buf)
 	if mask < len(tags) {
 		tags[mask] = s
 	}
@@ -232,15 +220,13 @@ func mktag(mask int) *Strlit {
 }
 
 func parsetag(note *Strlit) int {
-	var em int
-
 	if note == nil {
 		return EscUnknown
 	}
 	if !strings.HasPrefix(note.S, "esc:") {
 		return EscUnknown
 	}
-	em = atoi(note.S[4:])
+	em := atoi(note.S[4:])
 	if em == 0 {
 		return EscNone
 	}
@@ -248,12 +234,8 @@ func parsetag(note *Strlit) int {
 }
 
 func analyze(all *NodeList, recursive bool) {
-	var l *NodeList
-	var es EscState
-	var e *EscState
-
-	es = EscState{}
-	e = &es
+	es := EscState{}
+	e := &es
 	e.theSink.Op = ONAME
 	e.theSink.Orig = &e.theSink
 	e.theSink.Class = PEXTERN
@@ -267,14 +249,14 @@ func analyze(all *NodeList, recursive bool) {
 	e.funcParam.Sym = Lookup(".param")
 	e.funcParam.Escloopdepth = 10000000
 
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC {
 			l.N.Esc = EscFuncPlanned
 		}
 	}
 
 	// flow-analyze functions
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC {
 			escfunc(e, l.N)
 		}
@@ -284,19 +266,19 @@ func analyze(all *NodeList, recursive bool) {
 
 	// visit the upstream of each dst, mark address nodes with
 	// addrescapes, mark parameters unsafe
-	for l = e.dsts; l != nil; l = l.Next {
+	for l := e.dsts; l != nil; l = l.Next {
 		escflood(e, l.N)
 	}
 
 	// for all top level functions, tag the typenodes corresponding to the param nodes
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC {
 			esctag(e, l.N)
 		}
 	}
 
 	if Debug['m'] != 0 {
-		for l = e.noesc; l != nil; l = l.Next {
+		for l := e.noesc; l != nil; l = l.Next {
 			if l.N.Esc == EscNone {
 				var tmp *Sym
 				if l.N.Curfn != nil && l.N.Curfn.Nname != nil {
@@ -311,10 +293,6 @@ func analyze(all *NodeList, recursive bool) {
 }
 
 func escfunc(e *EscState, func_ *Node) {
-	var savefn *Node
-	var ll *NodeList
-	var saveld int
-
 	//	print("escfunc %N %s\n", func->nname, e->recursive?"(recursive)":"");
 
 	if func_.Esc != 1 {
@@ -322,12 +300,12 @@ func escfunc(e *EscState, func_ *Node) {
 	}
 	func_.Esc = EscFuncStarted
 
-	saveld = e.loopdepth
+	saveld := e.loopdepth
 	e.loopdepth = 1
-	savefn = Curfn
+	savefn := Curfn
 	Curfn = func_
 
-	for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+	for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 		if ll.N.Op != ONAME {
 			continue
 		}
@@ -352,7 +330,7 @@ func escfunc(e *EscState, func_ *Node) {
 
 	// in a mutually recursive group we lose track of the return values
 	if e.recursive {
-		for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+		for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 			if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
 				escflows(e, &e.theSink, ll.N)
 			}
@@ -426,17 +404,14 @@ func esclist(e *EscState, l *NodeList, up *Node) {
 }
 
 func esc(e *EscState, n *Node, up *Node) {
-	var lno int
 	var ll *NodeList
 	var lr *NodeList
-	var a *Node
-	var v *Node
 
 	if n == nil {
 		return
 	}
 
-	lno = int(setlineno(n))
+	lno := int(setlineno(n))
 
 	// ninit logically runs at a different loopdepth than the rest of the for loop.
 	esclist(e, n.Ninit, n)
@@ -702,6 +677,8 @@ func esc(e *EscState, n *Node, up *Node) {
 
 		// Link addresses of captured variables to closure.
 	case OCLOSURE:
+		var a *Node
+		var v *Node
 		for ll = n.Cvars; ll != nil; ll = ll.Next {
 			v = ll.N
 			if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
@@ -780,9 +757,6 @@ func esc(e *EscState, n *Node, up *Node) {
 // evaluated in curfn.	For expr==nil, dst must still be examined for
 // evaluations inside it (e.g *f(x) = y)
 func escassign(e *EscState, dst *Node, src *Node) {
-	var lno int
-	var ll *NodeList
-
 	if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX {
 		return
 	}
@@ -846,7 +820,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
 		dst = &e.theSink
 	}
 
-	lno = int(setlineno(src))
+	lno := int(setlineno(src))
 	e.pdepth++
 
 	switch src.Op {
@@ -879,7 +853,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
 	case OCALLMETH,
 		OCALLFUNC,
 		OCALLINTER:
-		for ll = src.Escretval; ll != nil; ll = ll.Next {
+		for ll := src.Escretval; ll != nil; ll = ll.Next {
 			escflows(e, dst, ll.N)
 		}
 
@@ -945,7 +919,6 @@ func escassign(e *EscState, dst *Node, src *Node) {
 
 func escassignfromtag(e *EscState, note *Strlit, dsts *NodeList, src *Node) int {
 	var em int
-	var em0 int
 
 	em = parsetag(note)
 
@@ -964,7 +937,7 @@ func escassignfromtag(e *EscState, note *Strlit, dsts *NodeList, src *Node) int
 		escassign(e, &e.funcParam, src)
 	}
 
-	em0 = em
+	em0 := em
 	for em >>= EscReturnBits; em != 0 && dsts != nil; (func() { em >>= 1; dsts = dsts.Next })() {
 		if em&1 != 0 {
 			escassign(e, dsts.N, src)
@@ -986,15 +959,9 @@ func escassignfromtag(e *EscState, note *Strlit, dsts *NodeList, src *Node) int
 func esccall(e *EscState, n *Node, up *Node) {
 	var ll *NodeList
 	var lr *NodeList
-	var a *Node
-	var fn *Node
-	var src *Node
-	var t *Type
 	var fntype *Type
-	var buf string
-	var i int
 
-	fn = nil
+	fn := (*Node)(nil)
 	switch n.Op {
 	default:
 		Fatal("esccall")
@@ -1017,7 +984,7 @@ func esccall(e *EscState, n *Node, up *Node) {
 
 	ll = n.List
 	if n.List != nil && n.List.Next == nil {
-		a = n.List.N
+		a := n.List.N
 		if a.Type.Etype == TSTRUCT && a.Type.Funarg != 0 { // f(g()).
 			ll = a.Escretval
 		}
@@ -1040,6 +1007,7 @@ func esccall(e *EscState, n *Node, up *Node) {
 			escassign(e, fn.Ntype.Left.Left, n.Left.Left)
 		}
 
+		var src *Node
 		for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
 			src = ll.N
 			if lr.N.Isddd != 0 && n.Isddd == 0 {
@@ -1079,9 +1047,11 @@ func esccall(e *EscState, n *Node, up *Node) {
 	}
 
 	// set up out list on this call node with dummy auto ONAMES in the current (calling) function.
-	i = 0
+	i := 0
 
-	for t = getoutargx(fntype).Type; t != nil; t = t.Down {
+	var src *Node
+	var buf string
+	for t := getoutargx(fntype).Type; t != nil; t = t.Down {
 		src = Nod(ONAME, nil, nil)
 		buf = fmt.Sprintf(".dum%d", i)
 		i++
@@ -1099,14 +1069,15 @@ func esccall(e *EscState, n *Node, up *Node) {
 
 	// Receiver.
 	if n.Op != OCALLFUNC {
-		t = getthisx(fntype).Type
-		src = n.Left.Left
+		t := getthisx(fntype).Type
+		src := n.Left.Left
 		if haspointers(t.Type) {
 			escassignfromtag(e, t.Note, n.Escretval, src)
 		}
 	}
 
-	for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
+	var a *Node
+	for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
 		src = ll.N
 		if t.Isddd != 0 && n.Isddd == 0 {
 			// Introduce ODDDARG node to represent ... allocation.
@@ -1197,8 +1168,6 @@ func escflows(e *EscState, dst *Node, src *Node) {
 // Once an object has been moved to the heap, all of it's upstream should be considered
 // escaping to the global scope.
 func escflood(e *EscState, dst *Node) {
-	var l *NodeList
-
 	switch dst.Op {
 	case ONAME,
 		OCLOSURE:
@@ -1218,7 +1187,7 @@ func escflood(e *EscState, dst *Node) {
 		fmt.Printf("\nescflood:%d: dst %v scope:%v[%d]\n", walkgen, Nconv(dst, obj.FmtShort), Sconv(tmp, 0), dst.Escloopdepth)
 	}
 
-	for l = dst.Escflowsrc; l != nil; l = l.Next {
+	for l := dst.Escflowsrc; l != nil; l = l.Next {
 		walkgen++
 		escwalk(e, 0, dst, l.N)
 	}
@@ -1240,10 +1209,6 @@ const (
 )
 
 func escwalk(e *EscState, level int, dst *Node, src *Node) {
-	var ll *NodeList
-	var leaks bool
-	var newlevel int
-
 	if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
 		return
 	}
@@ -1263,6 +1228,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
 	e.pdepth++
 
 	// Input parameter flowing to output parameter?
+	var leaks bool
 	if dst.Op == ONAME && dst.Class == PPARAMOUT && dst.Vargen <= 20 {
 		if src.Op == ONAME && src.Class == PPARAM && src.Curfn == dst.Curfn && src.Esc != EscScope && src.Esc != EscHeap {
 			if level == 0 {
@@ -1320,7 +1286,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
 			}
 		}
 
-		newlevel = level
+		newlevel := level
 		if level > MinLevel {
 			newlevel--
 		}
@@ -1373,7 +1339,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
 	case ODOTPTR,
 		OINDEXMAP,
 		OIND:
-		newlevel = level
+		newlevel := level
 
 		if level > MinLevel {
 			newlevel++
@@ -1382,7 +1348,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
 	}
 
 recurse:
-	for ll = src.Escflowsrc; ll != nil; ll = ll.Next {
+	for ll := src.Escflowsrc; ll != nil; ll = ll.Next {
 		escwalk(e, level, dst, ll.N)
 	}
 
@@ -1390,17 +1356,13 @@ recurse:
 }
 
 func esctag(e *EscState, func_ *Node) {
-	var savefn *Node
-	var ll *NodeList
-	var t *Type
-
 	func_.Esc = EscFuncTagged
 
 	// External functions are assumed unsafe,
 	// unless //go:noescape is given before the declaration.
 	if func_.Nbody == nil {
 		if func_.Noescape {
-			for t = getinargx(func_.Type).Type; t != nil; t = t.Down {
+			for t := getinargx(func_.Type).Type; t != nil; t = t.Down {
 				if haspointers(t.Type) {
 					t.Note = mktag(EscNone)
 				}
@@ -1410,10 +1372,10 @@ func esctag(e *EscState, func_ *Node) {
 		return
 	}
 
-	savefn = Curfn
+	savefn := Curfn
 	Curfn = func_
 
-	for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+	for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 		if ll.N.Op != ONAME || ll.N.Class != PPARAM {
 			continue
 		}
diff --git a/src/cmd/internal/gc/export.go b/src/cmd/internal/gc/export.go
index b0c593168e43cfbd6d006c9711643691b9f833e1..24670867c6ad9ebca2f914f8f77842a015c5ec66 100644
--- a/src/cmd/internal/gc/export.go
+++ b/src/cmd/internal/gc/export.go
@@ -79,13 +79,11 @@ func autoexport(n *Node, ctxt int) {
 }
 
 func dumppkg(p *Pkg) {
-	var suffix string
-
 	if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
 		return
 	}
 	p.Exported = 1
-	suffix = ""
+	suffix := ""
 	if p.Direct == 0 {
 		suffix = " // indirect"
 	}
@@ -100,8 +98,6 @@ func reexportdeplist(ll *NodeList) {
 }
 
 func reexportdep(n *Node) {
-	var t *Type
-
 	if n == nil {
 		return
 	}
@@ -135,7 +131,7 @@ func reexportdep(n *Node) {
 
 		// Local variables in the bodies need their type.
 	case ODCL:
-		t = n.Left.Type
+		t := n.Left.Type
 
 		if t != Types[t.Etype] && t != idealbool && t != idealstring {
 			if Isptr[t.Etype] != 0 {
@@ -150,7 +146,7 @@ func reexportdep(n *Node) {
 		}
 
 	case OLITERAL:
-		t = n.Type
+		t := n.Type
 		if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
 			if Isptr[t.Etype] != 0 {
 				t = t.Type
@@ -190,7 +186,7 @@ func reexportdep(n *Node) {
 		OMAKEMAP,
 		OMAKESLICE,
 		OMAKECHAN:
-		t = n.Type
+		t := n.Type
 
 		if t.Sym == nil && t.Type != nil {
 			t = t.Type
@@ -215,16 +211,13 @@ func reexportdep(n *Node) {
 }
 
 func dumpexportconst(s *Sym) {
-	var n *Node
-	var t *Type
-
-	n = s.Def
+	n := s.Def
 	typecheck(&n, Erv)
 	if n == nil || n.Op != OLITERAL {
 		Fatal("dumpexportconst: oconst nil: %v", Sconv(s, 0))
 	}
 
-	t = n.Type // may or may not be specified
+	t := n.Type // may or may not be specified
 	dumpexporttype(t)
 
 	if t != nil && !isideal(t) {
@@ -235,17 +228,14 @@ func dumpexportconst(s *Sym) {
 }
 
 func dumpexportvar(s *Sym) {
-	var n *Node
-	var t *Type
-
-	n = s.Def
+	n := s.Def
 	typecheck(&n, Erv|Ecall)
 	if n == nil || n.Type == nil {
 		Yyerror("variable exported but not defined: %v", Sconv(s, 0))
 		return
 	}
 
-	t = n.Type
+	t := n.Type
 	dumpexporttype(t)
 
 	if t.Etype == TFUNC && n.Class == PFUNC {
@@ -279,20 +269,12 @@ func (x methodbyname) Swap(i, j int) {
 }
 
 func (x methodbyname) Less(i, j int) bool {
-	var a *Type
-	var b *Type
-
-	a = x[i]
-	b = x[j]
+	a := x[i]
+	b := x[j]
 	return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
 }
 
 func dumpexporttype(t *Type) {
-	var f *Type
-	var m []*Type
-	var i int
-	var n int
-
 	if t == nil {
 		return
 	}
@@ -312,22 +294,23 @@ func dumpexporttype(t *Type) {
 		return
 	}
 
-	n = 0
-	for f = t.Method; f != nil; f = f.Down {
+	n := 0
+	for f := t.Method; f != nil; f = f.Down {
 		dumpexporttype(f)
 		n++
 	}
 
-	m = make([]*Type, n)
-	i = 0
-	for f = t.Method; f != nil; f = f.Down {
+	m := make([]*Type, n)
+	i := 0
+	for f := t.Method; f != nil; f = f.Down {
 		m[i] = f
 		i++
 	}
 	sort.Sort(methodbyname(m[:n]))
 
 	fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
-	for i = 0; i < n; i++ {
+	var f *Type
+	for i := 0; i < n; i++ {
 		f = m[i]
 		if f.Nointerface {
 			fmt.Fprintf(bout, "\t//go:nointerface\n")
@@ -381,12 +364,7 @@ func dumpsym(s *Sym) {
 }
 
 func dumpexport() {
-	var l *NodeList
-	var i int32
-	var lno int32
-	var p *Pkg
-
-	lno = lineno
+	lno := lineno
 
 	fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
 	if safemode != 0 {
@@ -394,7 +372,8 @@ func dumpexport() {
 	}
 	fmt.Fprintf(bout, "\n")
 
-	for i = 0; i < int32(len(phash)); i++ {
+	var p *Pkg
+	for i := int32(0); i < int32(len(phash)); i++ {
 		for p = phash[i]; p != nil; p = p.Link {
 			if p.Direct != 0 {
 				dumppkg(p)
@@ -402,7 +381,7 @@ func dumpexport() {
 		}
 	}
 
-	for l = exportlist; l != nil; l = l.Next {
+	for l := exportlist; l != nil; l = l.Next {
 		lineno = l.N.Lineno
 		dumpsym(l.N.Sym)
 	}
@@ -419,10 +398,8 @@ func dumpexport() {
  * return the sym for ss, which should match lexical
  */
 func importsym(s *Sym, op int) *Sym {
-	var pkgstr string
-
 	if s.Def != nil && int(s.Def.Op) != op {
-		pkgstr = fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
+		pkgstr := fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
 		redeclare(s, pkgstr)
 	}
 
@@ -442,11 +419,9 @@ func importsym(s *Sym, op int) *Sym {
  * return the type pkg.name, forward declaring if needed
  */
 func pkgtype(s *Sym) *Type {
-	var t *Type
-
 	importsym(s, OTYPE)
 	if s.Def == nil || s.Def.Op != OTYPE {
-		t = typ(TFORW)
+		t := typ(TFORW)
 		t.Sym = s
 		s.Def = typenod(t)
 	}
@@ -461,12 +436,11 @@ func importimport(s *Sym, z *Strlit) {
 	// Informational: record package name
 	// associated with import path, for use in
 	// human-readable messages.
-	var p *Pkg
 
 	if isbadimport(z) {
 		errorexit()
 	}
-	p = mkpkg(z)
+	p := mkpkg(z)
 	if p.Name == "" {
 		p.Name = s.Name
 		Pkglookup(s.Name, nil).Npkg++
@@ -481,8 +455,6 @@ func importimport(s *Sym, z *Strlit) {
 }
 
 func importconst(s *Sym, t *Type, n *Node) {
-	var n1 *Node
-
 	importsym(s, OLITERAL)
 	Convlit(&n, t)
 
@@ -496,7 +468,7 @@ func importconst(s *Sym, t *Type, n *Node) {
 	}
 
 	if n.Sym != nil {
-		n1 = Nod(OXXX, nil, nil)
+		n1 := Nod(OXXX, nil, nil)
 		*n1 = *n
 		n = n1
 	}
@@ -511,8 +483,6 @@ func importconst(s *Sym, t *Type, n *Node) {
 }
 
 func importvar(s *Sym, t *Type) {
-	var n *Node
-
 	importsym(s, ONAME)
 	if s.Def != nil && s.Def.Op == ONAME {
 		if Eqtype(t, s.Def.Type) {
@@ -521,7 +491,7 @@ func importvar(s *Sym, t *Type) {
 		Yyerror("inconsistent definition for var %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(s, 0), Tconv(s.Def.Type, 0), Zconv(s.Importdef.Path, 0), Tconv(t, 0), Zconv(importpkg.Path, 0))
 	}
 
-	n = newname(s)
+	n := newname(s)
 	s.Importdef = importpkg
 	n.Type = t
 	declare(n, PEXTERN)
@@ -532,8 +502,6 @@ func importvar(s *Sym, t *Type) {
 }
 
 func importtype(pt *Type, t *Type) {
-	var n *Node
-
 	// override declaration in unsafe.go for Pointer.
 	// there is no way in Go code to define unsafe.Pointer
 	// so we have to supply it.
@@ -542,7 +510,7 @@ func importtype(pt *Type, t *Type) {
 	}
 
 	if pt.Etype == TFORW {
-		n = pt.Nod
+		n := pt.Nod
 		copytype(pt.Nod, t)
 		pt.Nod = n // unzero nod
 		pt.Sym.Importdef = importpkg
@@ -560,16 +528,15 @@ func importtype(pt *Type, t *Type) {
 
 func dumpasmhdr() {
 	var b *obj.Biobuf
-	var l *NodeList
-	var n *Node
-	var t *Type
 
 	b, err := obj.Bopenw(asmhdr)
 	if err != nil {
 		Fatal("%v", err)
 	}
 	fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
-	for l = asmlist; l != nil; l = l.Next {
+	var n *Node
+	var t *Type
+	for l := asmlist; l != nil; l = l.Next {
 		n = l.N
 		if isblanksym(n.Sym) {
 			continue
diff --git a/src/cmd/internal/gc/fmt.go b/src/cmd/internal/gc/fmt.go
index b155f7857459a658a56adf7994983adbcada9a31..ce7367665585a8ba437b39caa3bbfb6240292398 100644
--- a/src/cmd/internal/gc/fmt.go
+++ b/src/cmd/internal/gc/fmt.go
@@ -88,9 +88,7 @@ var fmtpkgpfx int // %uT stickyness
 //
 
 func setfmode(flags *int) int {
-	var fm int
-
-	fm = fmtmode
+	fm := fmtmode
 	if *flags&obj.FmtSign != 0 {
 		fmtmode = FDbg
 	} else if *flags&obj.FmtSharp != 0 {
@@ -170,20 +168,21 @@ var goopnames = []string{
 
 // Fmt "%O":  Node opcodes
 func Oconv(o int, flag int) string {
-	var fp string
-
 	if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode != FDbg {
 		if o >= 0 && o < len(goopnames) && goopnames[o] != "" {
+			var fp string
 			fp += goopnames[o]
 			return fp
 		}
 	}
 
 	if o >= 0 && o < len(opnames) && opnames[o] != "" {
+		var fp string
 		fp += opnames[o]
 		return fp
 	}
 
+	var fp string
 	fp += fmt.Sprintf("O-%d", o)
 	return fp
 }
@@ -202,10 +201,7 @@ var classnames = []string{
 func Jconv(n *Node, flag int) string {
 	var fp string
 
-	var s string
-	var c int
-
-	c = flag & obj.FmtShort
+	c := flag & obj.FmtShort
 
 	if c == 0 && n.Ullman != 0 {
 		fp += fmt.Sprintf(" u(%d)", n.Ullman)
@@ -228,7 +224,7 @@ func Jconv(n *Node, flag int) string {
 	}
 
 	if n.Class != 0 {
-		s = ""
+		s := ""
 		if n.Class&PHEAP != 0 {
 			s = ",heap"
 		}
@@ -309,98 +305,111 @@ func Jconv(n *Node, flag int) string {
 
 // Fmt "%V": Values
 func Vconv(v *Val, flag int) string {
-	var fp string
-
-	var x int64
-
 	switch v.Ctype {
 	case CTINT:
 		if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+			var fp string
 			fp += fmt.Sprintf("%v", Bconv(v.U.Xval, obj.FmtSharp))
 			return fp
 		}
+		var fp string
 		fp += fmt.Sprintf("%v", Bconv(v.U.Xval, 0))
 		return fp
 
 	case CTRUNE:
-		x = Mpgetfix(v.U.Xval)
+		x := Mpgetfix(v.U.Xval)
 		if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' {
+			var fp string
 			fp += fmt.Sprintf("'%c'", int(x))
 			return fp
 		}
 		if 0 <= x && x < 1<<16 {
+			var fp string
 			fp += fmt.Sprintf("'\\u%04x'", uint(int(x)))
 			return fp
 		}
 		if 0 <= x && x <= utf8.MaxRune {
+			var fp string
 			fp += fmt.Sprintf("'\\U%08x'", uint64(x))
 			return fp
 		}
+		var fp string
 		fp += fmt.Sprintf("('\\x00' + %v)", Bconv(v.U.Xval, 0))
 		return fp
 
 	case CTFLT:
 		if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+			var fp string
 			fp += fmt.Sprintf("%v", Fconv(v.U.Fval, 0))
 			return fp
 		}
+		var fp string
 		fp += fmt.Sprintf("%v", Fconv(v.U.Fval, obj.FmtSharp))
 		return fp
 
 	case CTCPLX:
 		if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+			var fp string
 			fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, 0), Fconv(&v.U.Cval.Imag, 0))
 			return fp
 		}
 		if mpcmpfltc(&v.U.Cval.Real, 0) == 0 {
+			var fp string
 			fp += fmt.Sprintf("%vi", Fconv(&v.U.Cval.Imag, obj.FmtSharp))
 			return fp
 		}
 		if mpcmpfltc(&v.U.Cval.Imag, 0) == 0 {
+			var fp string
 			fp += fmt.Sprintf("%v", Fconv(&v.U.Cval.Real, obj.FmtSharp))
 			return fp
 		}
 		if mpcmpfltc(&v.U.Cval.Imag, 0) < 0 {
+			var fp string
 			fp += fmt.Sprintf("(%v%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
 			return fp
 		}
+		var fp string
 		fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
 		return fp
 
 	case CTSTR:
+		var fp string
 		fp += fmt.Sprintf("\"%v\"", Zconv(v.U.Sval, 0))
 		return fp
 
 	case CTBOOL:
 		if v.U.Bval != 0 {
+			var fp string
 			fp += "true"
 			return fp
 		}
+		var fp string
 		fp += "false"
 		return fp
 
 	case CTNIL:
+		var fp string
 		fp += "nil"
 		return fp
 	}
 
+	var fp string
 	fp += fmt.Sprintf("<ctype=%d>", v.Ctype)
 	return fp
 }
 
 // Fmt "%Z": escaped string literals
 func Zconv(sp *Strlit, flag int) string {
-	var fp string
-	var s string
-	var n int
-
 	if sp == nil {
+		var fp string
 		fp += "<nil>"
 		return fp
 	}
 
 	// NOTE: Keep in sync with ../ld/go.c:/^Zconv.
-	s = sp.S
+	s := sp.S
+	var n int
+	var fp string
 	for i := 0; i < len(s); i += n {
 		var r rune
 		r, n = utf8.DecodeRuneInString(s[i:])
@@ -481,47 +490,49 @@ var etnames = []string{
 
 // Fmt "%E": etype
 func Econv(et int, flag int) string {
-	var fp string
-
 	if et >= 0 && et < len(etnames) && etnames[et] != "" {
+		var fp string
 		fp += etnames[et]
 		return fp
 	}
+	var fp string
 	fp += fmt.Sprintf("E-%d", et)
 	return fp
 }
 
 // Fmt "%S": syms
 func symfmt(s *Sym, flag int) string {
-	var fp string
-
-	var p string
-
 	if s.Pkg != nil && flag&obj.FmtShort == 0 /*untyped*/ {
 		switch fmtmode {
 		case FErr: // This is for the user
 			if s.Pkg == localpkg {
+				var fp string
 				fp += s.Name
 				return fp
 			}
 
 			// If the name was used by multiple packages, display the full path,
 			if s.Pkg.Name != "" && Pkglookup(s.Pkg.Name, nil).Npkg > 1 {
+				var fp string
 				fp += fmt.Sprintf("\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
 				return fp
 			}
+			var fp string
 			fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
 			return fp
 
 		case FDbg:
+			var fp string
 			fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
 			return fp
 
 		case FTypeId:
 			if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+				var fp string
 				fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
 				return fp // dcommontype, typehash
 			}
+			var fp string
 			fp += fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
 			return fp // (methodsym), typesym, weaksym
 
@@ -530,6 +541,7 @@ func symfmt(s *Sym, flag int) string {
 				Fatal("exporting synthetic symbol %s", s.Name)
 			}
 			if s.Pkg != builtinpkg {
+				var fp string
 				fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
 				return fp
 			}
@@ -539,21 +551,24 @@ func symfmt(s *Sym, flag int) string {
 	if flag&obj.FmtByte != 0 /*untyped*/ { // FmtByte (hh) implies FmtShort (h)
 
 		// skip leading "type." in method name
-		p = s.Name
+		p := s.Name
 		if i := strings.LastIndex(s.Name, "."); i >= 0 {
 			p = s.Name[i+1:]
 		}
 
 		// exportname needs to see the name without the prefix too.
 		if (fmtmode == FExp && !exportname(p)) || fmtmode == FDbg {
+			var fp string
 			fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), p)
 			return fp
 		}
 
+		var fp string
 		fp += p
 		return fp
 	}
 
+	var fp string
 	fp += s.Name
 	return fp
 }
@@ -583,12 +598,8 @@ var basicnames = []string{
 }
 
 func typefmt(t *Type, flag int) string {
-	var fp string
-
-	var t1 *Type
-	var s *Sym
-
 	if t == nil {
+		var fp string
 		fp += "<T>"
 		return fp
 	}
@@ -596,6 +607,7 @@ func typefmt(t *Type, flag int) string {
 	if t == bytetype || t == runetype {
 		// in %-T mode collapse rune and byte with their originals.
 		if fmtmode != FTypeId {
+			var fp string
 			fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
 			return fp
 		}
@@ -603,6 +615,7 @@ func typefmt(t *Type, flag int) string {
 	}
 
 	if t == errortype {
+		var fp string
 		fp += "error"
 		return fp
 	}
@@ -613,14 +626,17 @@ func typefmt(t *Type, flag int) string {
 		case FTypeId:
 			if flag&obj.FmtShort != 0 /*untyped*/ {
 				if t.Vargen != 0 {
+					var fp string
 					fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, obj.FmtShort), t.Vargen)
 					return fp
 				}
+				var fp string
 				fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
 				return fp
 			}
 
 			if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+				var fp string
 				fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtUnsigned))
 				return fp
 			}
@@ -629,15 +645,18 @@ func typefmt(t *Type, flag int) string {
 			// fallthrough
 		case FExp:
 			if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+				var fp string
 				fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, 0), t.Vargen)
 				return fp
 			}
 		}
 
+		var fp string
 		fp += fmt.Sprintf("%v", Sconv(t.Sym, 0))
 		return fp
 	}
 
+	var fp string
 	if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
 		if fmtmode == FErr && (t == idealbool || t == idealstring) {
 			fp += "untyped "
@@ -696,7 +715,7 @@ func typefmt(t *Type, flag int) string {
 
 	case TINTER:
 		fp += "interface {"
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			if exportname(t1.Sym.Name) {
 				if t1.Down != nil {
 					fp += fmt.Sprintf(" %v%v;", Sconv(t1.Sym, obj.FmtShort), Tconv(t1.Type, obj.FmtShort))
@@ -769,7 +788,7 @@ func typefmt(t *Type, flag int) string {
 		if t.Funarg != 0 {
 			fp += "("
 			if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
-				for t1 = t.Type; t1 != nil; t1 = t1.Down {
+				for t1 := t.Type; t1 != nil; t1 = t1.Down {
 					if t1.Down != nil {
 						fp += fmt.Sprintf("%v, ", Tconv(t1, obj.FmtShort))
 					} else {
@@ -777,7 +796,7 @@ func typefmt(t *Type, flag int) string {
 					}
 				}
 			} else {
-				for t1 = t.Type; t1 != nil; t1 = t1.Down {
+				for t1 := t.Type; t1 != nil; t1 = t1.Down {
 					if t1.Down != nil {
 						fp += fmt.Sprintf("%v, ", Tconv(t1, 0))
 					} else {
@@ -789,7 +808,7 @@ func typefmt(t *Type, flag int) string {
 			fp += ")"
 		} else {
 			fp += "struct {"
-			for t1 = t.Type; t1 != nil; t1 = t1.Down {
+			for t1 := t.Type; t1 != nil; t1 = t1.Down {
 				if t1.Down != nil {
 					fp += fmt.Sprintf(" %v;", Tconv(t1, obj.FmtLong))
 				} else {
@@ -803,7 +822,7 @@ func typefmt(t *Type, flag int) string {
 
 	case TFIELD:
 		if flag&obj.FmtShort == 0 /*untyped*/ {
-			s = t.Sym
+			s := t.Sym
 
 			// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
 			// ~r%d is a (formerly) unnamed result.
@@ -896,23 +915,19 @@ func stmtwithinit(op int) bool {
 func stmtfmt(n *Node) string {
 	var f string
 
-	var complexinit bool
-	var simpleinit bool
-	var extrablock bool
-
 	// some statements allow for an init, but at most one,
 	// but we may have an arbitrary number added, eg by typecheck
 	// and inlining.  If it doesn't fit the syntax, emit an enclosing
 	// block starting with the init statements.
 
 	// if we can just say "for" n->ninit; ... then do so
-	simpleinit = n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
+	simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
 
 	// otherwise, print the inits as separate statements
-	complexinit = n.Ninit != nil && !simpleinit && (fmtmode != FErr)
+	complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
 
 	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
-	extrablock = complexinit && stmtwithinit(int(n.Op))
+	extrablock := complexinit && stmtwithinit(int(n.Op))
 
 	if extrablock {
 		f += "{"
@@ -1208,50 +1223,51 @@ var opprec = []int{
 }
 
 func exprfmt(n *Node, prec int) string {
-	var f string
-
-	var nprec int
-	var ptrlit bool
-	var l *NodeList
-
 	for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
 		n = n.Left
 	}
 
 	if n == nil {
+		var f string
 		f += "<N>"
 		return f
 	}
 
-	nprec = opprec[n.Op]
+	nprec := opprec[n.Op]
 	if n.Op == OTYPE && n.Sym != nil {
 		nprec = 8
 	}
 
 	if prec > nprec {
+		var f string
 		f += fmt.Sprintf("(%v)", Nconv(n, 0))
 		return f
 	}
 
 	switch n.Op {
 	case OPAREN:
+		var f string
 		f += fmt.Sprintf("(%v)", Nconv(n.Left, 0))
 		return f
 
 	case ODDDARG:
+		var f string
 		f += fmt.Sprintf("... argument")
 		return f
 
 	case OREGISTER:
+		var f string
 		f += fmt.Sprintf("%v", Ctxt.Rconv(int(n.Val.U.Reg)))
 		return f
 
 	case OLITERAL: // this is a bit of a mess
 		if fmtmode == FErr && n.Sym != nil {
+			var f string
 			f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
 			return f
 		}
 		if n.Val.Ctype == CTNIL && n.Orig != nil && n.Orig != n {
+			var f string
 			f += exprfmt(n.Orig, prec)
 			return f
 		}
@@ -1259,14 +1275,17 @@ func exprfmt(n *Node, prec int) string {
 			// Need parens when type begins with what might
 			// be misinterpreted as a unary operator: * or <-.
 			if Isptr[n.Type.Etype] != 0 || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) {
+				var f string
 				f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
 				return f
 			} else {
+				var f string
 				f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
 				return f
 			}
 		}
 
+		var f string
 		f += fmt.Sprintf("%v", Vconv(&n.Val, 0))
 		return f
 
@@ -1274,10 +1293,12 @@ func exprfmt(n *Node, prec int) string {
 	// _ becomes ~b%d internally; print as _ for export
 	case ONAME:
 		if fmtmode == FExp && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+			var f string
 			f += fmt.Sprintf("_")
 			return f
 		}
 		if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Vargen > 0 {
+			var f string
 			f += fmt.Sprintf("%v·%d", Sconv(n.Sym, 0), n.Vargen)
 			return f
 		}
@@ -1287,9 +1308,11 @@ func exprfmt(n *Node, prec int) string {
 		// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
 		if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
 			if Isptr[n.Left.Type.Etype] != 0 {
+				var f string
 				f += fmt.Sprintf("(%v).%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
 				return f
 			} else {
+				var f string
 				f += fmt.Sprintf("%v.%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
 				return f
 			}
@@ -1299,44 +1322,54 @@ func exprfmt(n *Node, prec int) string {
 		//fallthrough
 	case OPACK,
 		ONONAME:
+		var f string
 		f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
 		return f
 
 	case OTYPE:
 		if n.Type == nil && n.Sym != nil {
+			var f string
 			f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("%v", Tconv(n.Type, 0))
 		return f
 
 	case OTARRAY:
 		if n.Left != nil {
+			var f string
 			f += fmt.Sprintf("[]%v", Nconv(n.Left, 0))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("[]%v", Nconv(n.Right, 0))
 		return f // happens before typecheck
 
 	case OTMAP:
+		var f string
 		f += fmt.Sprintf("map[%v]%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
 		return f
 
 	case OTCHAN:
 		switch n.Etype {
 		case Crecv:
+			var f string
 			f += fmt.Sprintf("<-chan %v", Nconv(n.Left, 0))
 			return f
 
 		case Csend:
+			var f string
 			f += fmt.Sprintf("chan<- %v", Nconv(n.Left, 0))
 			return f
 
 		default:
 			if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv {
+				var f string
 				f += fmt.Sprintf("chan (%v)", Nconv(n.Left, 0))
 				return f
 			} else {
+				var f string
 				f += fmt.Sprintf("chan %v", Nconv(n.Left, 0))
 				return f
 			}
@@ -1344,71 +1377,85 @@ func exprfmt(n *Node, prec int) string {
 		fallthrough
 
 	case OTSTRUCT:
+		var f string
 		f += fmt.Sprintf("<struct>")
 		return f
 
 	case OTINTER:
+		var f string
 		f += fmt.Sprintf("<inter>")
 		return f
 
 	case OTFUNC:
+		var f string
 		f += fmt.Sprintf("<func>")
 		return f
 
 	case OCLOSURE:
 		if fmtmode == FErr {
+			var f string
 			f += "func literal"
 			return f
 		}
 		if n.Nbody != nil {
+			var f string
 			f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Nbody, 0))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Closure.Nbody, 0))
 		return f
 
 	case OCOMPLIT:
-		ptrlit = n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
+		ptrlit := n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
 		if fmtmode == FErr {
 			if n.Right != nil && n.Right.Type != nil && n.Implicit == 0 {
 				if ptrlit {
+					var f string
 					f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
 					return f
 				} else {
+					var f string
 					f += fmt.Sprintf("%v literal", Tconv(n.Right.Type, 0))
 					return f
 				}
 			}
 
+			var f string
 			f += "composite literal"
 			return f
 		}
 
 		if fmtmode == FExp && ptrlit {
 			// typecheck has overwritten OIND by OTYPE with pointer type.
+			var f string
 			f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
 			return f
 		}
 
+		var f string
 		f += fmt.Sprintf("(%v{ %v })", Nconv(n.Right, 0), Hconv(n.List, obj.FmtComma))
 		return f
 
 	case OPTRLIT:
 		if fmtmode == FExp && n.Left.Implicit != 0 {
+			var f string
 			f += fmt.Sprintf("%v", Nconv(n.Left, 0))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("&%v", Nconv(n.Left, 0))
 		return f
 
 	case OSTRUCTLIT:
 		if fmtmode == FExp { // requires special handling of field names
+			var f string
 			if n.Implicit != 0 {
 				f += "{"
 			} else {
 				f += fmt.Sprintf("(%v{", Tconv(n.Type, 0))
 			}
-			for l = n.List; l != nil; l = l.Next {
+			for l := n.List; l != nil; l = l.Next {
 				f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(l.N.Right, 0))
 
 				if l.Next != nil {
@@ -1432,13 +1479,16 @@ func exprfmt(n *Node, prec int) string {
 	case OARRAYLIT,
 		OMAPLIT:
 		if fmtmode == FErr {
+			var f string
 			f += fmt.Sprintf("%v literal", Tconv(n.Type, 0))
 			return f
 		}
 		if fmtmode == FExp && n.Implicit != 0 {
+			var f string
 			f += fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("(%v{ %v })", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
 		return f
 
@@ -1446,22 +1496,27 @@ func exprfmt(n *Node, prec int) string {
 		if n.Left != nil && n.Right != nil {
 			if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD {
 				// requires special handling of field names
+				var f string
 				f += fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(n.Right, 0))
 				return f
 			} else {
+				var f string
 				f += fmt.Sprintf("%v:%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
 				return f
 			}
 		}
 
 		if n.Left == nil && n.Right != nil {
+			var f string
 			f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
 			return f
 		}
 		if n.Left != nil && n.Right == nil {
+			var f string
 			f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
 			return f
 		}
+		var f string
 		f += ":"
 		return f
 
@@ -1471,6 +1526,7 @@ func exprfmt(n *Node, prec int) string {
 		ODOTINTER,
 		ODOTMETH,
 		OCALLPART:
+		var f string
 		f += exprfmt(n.Left, nprec)
 		if n.Right == nil || n.Right.Sym == nil {
 			f += ".<nil>"
@@ -1481,6 +1537,7 @@ func exprfmt(n *Node, prec int) string {
 
 	case ODOTTYPE,
 		ODOTTYPE2:
+		var f string
 		f += exprfmt(n.Left, nprec)
 		if n.Right != nil {
 			f += fmt.Sprintf(".(%v)", Nconv(n.Right, 0))
@@ -1496,12 +1553,14 @@ func exprfmt(n *Node, prec int) string {
 		OSLICEARR,
 		OSLICE3,
 		OSLICE3ARR:
+		var f string
 		f += exprfmt(n.Left, nprec)
 		f += fmt.Sprintf("[%v]", Nconv(n.Right, 0))
 		return f
 
 	case OCOPY,
 		OCOMPLEX:
+		var f string
 		f += fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0), Nconv(n.Right, 0))
 		return f
 
@@ -1514,13 +1573,16 @@ func exprfmt(n *Node, prec int) string {
 		OSTRARRAYRUNE,
 		ORUNESTR:
 		if n.Type == nil || n.Type.Sym == nil {
+			var f string
 			f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
 			return f
 		}
 		if n.Left != nil {
+			var f string
 			f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
 		return f
 
@@ -1538,13 +1600,16 @@ func exprfmt(n *Node, prec int) string {
 		OPRINT,
 		OPRINTN:
 		if n.Left != nil {
+			var f string
 			f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0))
 			return f
 		}
 		if n.Isddd != 0 {
+			var f string
 			f += fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
 		return f
 
@@ -1552,6 +1617,7 @@ func exprfmt(n *Node, prec int) string {
 		OCALLFUNC,
 		OCALLINTER,
 		OCALLMETH:
+		var f string
 		f += exprfmt(n.Left, nprec)
 		if n.Isddd != 0 {
 			f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma))
@@ -1564,17 +1630,21 @@ func exprfmt(n *Node, prec int) string {
 		OMAKECHAN,
 		OMAKESLICE:
 		if n.List != nil { // pre-typecheck
+			var f string
 			f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
 			return f
 		}
 		if n.Right != nil {
+			var f string
 			f += fmt.Sprintf("make(%v, %v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0), Nconv(n.Right, 0))
 			return f
 		}
 		if n.Left != nil {
+			var f string
 			f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
 			return f
 		}
+		var f string
 		f += fmt.Sprintf("make(%v)", Tconv(n.Type, 0))
 		return f
 
@@ -1586,6 +1656,7 @@ func exprfmt(n *Node, prec int) string {
 		OIND,
 		ONOT,
 		ORECV:
+		var f string
 		if n.Left.Op == n.Op {
 			f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
 		} else {
@@ -1615,6 +1686,7 @@ func exprfmt(n *Node, prec int) string {
 		OSEND,
 		OSUB,
 		OXOR:
+		var f string
 		f += exprfmt(n.Left, nprec)
 
 		f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
@@ -1622,7 +1694,8 @@ func exprfmt(n *Node, prec int) string {
 		return f
 
 	case OADDSTR:
-		for l = n.List; l != nil; l = l.Next {
+		var f string
+		for l := n.List; l != nil; l = l.Next {
 			if l != n.List {
 				f += fmt.Sprintf(" + ")
 			}
@@ -1633,22 +1706,20 @@ func exprfmt(n *Node, prec int) string {
 
 	case OCMPSTR,
 		OCMPIFACE:
+		var f string
 		f += exprfmt(n.Left, nprec)
 		f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
 		f += exprfmt(n.Right, nprec+1)
 		return f
 	}
 
+	var f string
 	f += fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
 	return f
 }
 
 func nodefmt(n *Node, flag int) string {
-	var f string
-
-	var t *Type
-
-	t = n.Type
+	t := n.Type
 
 	// we almost always want the original, except in export mode for literals
 	// this saves the importer some work, and avoids us having to redo some
@@ -1659,9 +1730,11 @@ func nodefmt(n *Node, flag int) string {
 
 	if flag&obj.FmtLong != 0 /*untyped*/ && t != nil {
 		if t.Etype == TNIL {
+			var f string
 			f += fmt.Sprintf("nil")
 			return f
 		} else {
+			var f string
 			f += fmt.Sprintf("%v (type %v)", Nconv(n, 0), Tconv(t, 0))
 			return f
 		}
@@ -1673,6 +1746,7 @@ func nodefmt(n *Node, flag int) string {
 		return stmtfmt(n)
 	}
 
+	var f string
 	f += exprfmt(n, 0)
 	return f
 }
@@ -1684,16 +1758,14 @@ func indent(s string) string {
 }
 
 func nodedump(n *Node, flag int) string {
-	var fp string
-
-	var recur bool
-
 	if n == nil {
+		var fp string
 		return fp
 	}
 
-	recur = flag&obj.FmtShort == 0 /*untyped*/
+	recur := flag&obj.FmtShort == 0 /*untyped*/
 
+	var fp string
 	if recur {
 		fp = indent(fp)
 		if dumpdepth > 10 {
@@ -1795,28 +1867,25 @@ func nodedump(n *Node, flag int) string {
 // Fmt "%S": syms
 // Flags:  "%hS" suppresses qualifying with package
 func Sconv(s *Sym, flag int) string {
-	var fp string
-
-	var r int
-	var sm int
-	var sf int
-
 	if flag&obj.FmtLong != 0 /*untyped*/ {
 		panic("linksymfmt")
 	}
 
 	if s == nil {
+		var fp string
 		fp += "<S>"
 		return fp
 	}
 
 	if s.Name == "_" {
+		var fp string
 		fp += "_"
 		return fp
 	}
 
-	sf = flag
-	sm = setfmode(&flag)
+	sf := flag
+	sm := setfmode(&flag)
+	var r int
 	_ = r
 	str := symfmt(s, flag)
 	flag = sf
@@ -1829,25 +1898,21 @@ func Sconv(s *Sym, flag int) string {
 //	  'h' omit 'func' and receiver from function types, short type names
 //	  'u' package name, not prefix (FTypeId mode, sticky)
 func Tconv(t *Type, flag int) string {
-	var fp string
-
-	var r int
-	var sm int
-	var sf int
-
 	if t == nil {
+		var fp string
 		fp += "<T>"
 		return fp
 	}
 
 	if t.Trecur > 4 {
+		var fp string
 		fp += "<...>"
 		return fp
 	}
 
 	t.Trecur++
-	sf = flag
-	sm = setfmode(&flag)
+	sf := flag
+	sm := setfmode(&flag)
 
 	if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
 		fmtpkgpfx++
@@ -1856,6 +1921,7 @@ func Tconv(t *Type, flag int) string {
 		flag |= obj.FmtUnsigned
 	}
 
+	var r int
 	_ = r
 	str := typefmt(t, flag)
 
@@ -1873,19 +1939,15 @@ func Tconv(t *Type, flag int) string {
 // Flags: 'l' suffix with "(type %T)" where possible
 //	  '+h' in debug mode, don't recurse, no multiline output
 func Nconv(n *Node, flag int) string {
-	var fp string
-
-	var r int
-	var sm int
-	var sf int
-
 	if n == nil {
+		var fp string
 		fp += "<N>"
 		return fp
 	}
-	sf = flag
-	sm = setfmode(&flag)
+	sf := flag
+	sm := setfmode(&flag)
 
+	var r int
 	_ = r
 	var str string
 	switch fmtmode {
@@ -1910,28 +1972,24 @@ func Nconv(n *Node, flag int) string {
 // Fmt '%H': NodeList.
 // Flags: all those of %N plus ',': separate with comma's instead of semicolons.
 func Hconv(l *NodeList, flag int) string {
-	var fp string
-
-	var r int
-	var sm int
-	var sf int
-	var sep string
-
 	if l == nil && fmtmode == FDbg {
+		var fp string
 		fp += "<nil>"
 		return fp
 	}
 
-	sf = flag
-	sm = setfmode(&flag)
+	sf := flag
+	sm := setfmode(&flag)
+	var r int
 	_ = r
-	sep = "; "
+	sep := "; "
 	if fmtmode == FDbg {
 		sep = "\n"
 	} else if flag&obj.FmtComma != 0 /*untyped*/ {
 		sep = ", "
 	}
 
+	var fp string
 	for ; l != nil; l = l.Next {
 		fp += fmt.Sprintf("%v", Nconv(l.N, 0))
 		if l.Next != nil {
diff --git a/src/cmd/internal/gc/gen.go b/src/cmd/internal/gc/gen.go
index 9d41b6ff20eff350226756870a5a57a67490375c..079158a38497d9d579075ea003ae86195d5cad25 100644
--- a/src/cmd/internal/gc/gen.go
+++ b/src/cmd/internal/gc/gen.go
@@ -18,9 +18,7 @@ var labellist *Label
 var lastlabel *Label
 
 func Sysfunc(name string) *Node {
-	var n *Node
-
-	n = newname(Pkglookup(name, Runtimepkg))
+	n := newname(Pkglookup(name, Runtimepkg))
 	n.Class = PFUNC
 	return n
 }
@@ -31,9 +29,6 @@ func Sysfunc(name string) *Node {
  * as needing to move to the heap.
  */
 func addrescapes(n *Node) {
-	var buf string
-	var oldfn *Node
-
 	switch n.Op {
 	// probably a type error already.
 	// dump("addrescapes", n);
@@ -84,11 +79,11 @@ func addrescapes(n *Node) {
 			n.Xoffset = 0
 
 			// create stack variable to hold pointer to heap
-			oldfn = Curfn
+			oldfn := Curfn
 
 			Curfn = n.Curfn
 			n.Heapaddr = temp(Ptrto(n.Type))
-			buf = fmt.Sprintf("&%v", Sconv(n.Sym, 0))
+			buf := fmt.Sprintf("&%v", Sconv(n.Sym, 0))
 			n.Heapaddr.Sym = Lookup(buf)
 			n.Heapaddr.Orig.Sym = n.Heapaddr.Sym
 			n.Esc = EscHeap
@@ -116,9 +111,7 @@ func addrescapes(n *Node) {
 }
 
 func clearlabels() {
-	var l *Label
-
-	for l = labellist; l != nil; l = l.Link {
+	for l := labellist; l != nil; l = l.Link {
 		l.Sym.Label = nil
 	}
 
@@ -127,11 +120,8 @@ func clearlabels() {
 }
 
 func newlab(n *Node) *Label {
-	var s *Sym
-	var lab *Label
-
-	s = n.Left.Sym
-	lab = s.Label
+	s := n.Left.Sym
+	lab := s.Label
 	if lab == nil {
 		lab = new(Label)
 		if lastlabel == nil {
@@ -158,41 +148,33 @@ func newlab(n *Node) *Label {
 }
 
 func checkgoto(from *Node, to *Node) {
-	var nf int
-	var nt int
-	var block *Sym
-	var dcl *Sym
-	var fs *Sym
-	var ts *Sym
-	var lno int
-
 	if from.Sym == to.Sym {
 		return
 	}
 
-	nf = 0
-	for fs = from.Sym; fs != nil; fs = fs.Link {
+	nf := 0
+	for fs := from.Sym; fs != nil; fs = fs.Link {
 		nf++
 	}
-	nt = 0
-	for fs = to.Sym; fs != nil; fs = fs.Link {
+	nt := 0
+	for fs := to.Sym; fs != nil; fs = fs.Link {
 		nt++
 	}
-	fs = from.Sym
+	fs := from.Sym
 	for ; nf > nt; nf-- {
 		fs = fs.Link
 	}
 	if fs != to.Sym {
-		lno = int(lineno)
+		lno := int(lineno)
 		setlineno(from)
 
 		// decide what to complain about.
 		// prefer to complain about 'into block' over declarations,
 		// so scan backward to find most recent block or else dcl.
-		block = nil
+		block := (*Sym)(nil)
 
-		dcl = nil
-		ts = to.Sym
+		dcl := (*Sym)(nil)
+		ts := to.Sym
 		for ; nt > nf; nt-- {
 			if ts.Pkg == nil {
 				block = ts
@@ -222,10 +204,8 @@ func checkgoto(from *Node, to *Node) {
 }
 
 func stmtlabel(n *Node) *Label {
-	var lab *Label
-
 	if n.Sym != nil {
-		lab = n.Sym.Label
+		lab := n.Sym.Label
 		if lab != nil {
 			if lab.Def != nil {
 				if lab.Def.Defn == n {
@@ -295,8 +275,6 @@ func cgen_dcl(n *Node) {
  * generate discard of value
  */
 func cgen_discard(nr *Node) {
-	var tmp Node
-
 	if nr == nil {
 		return
 	}
@@ -342,6 +320,7 @@ func cgen_discard(nr *Node) {
 
 		// special enough to just evaluate
 	default:
+		var tmp Node
 		Tempname(&tmp, nr.Type)
 
 		Cgen_as(&tmp, nr)
@@ -353,10 +332,7 @@ func cgen_discard(nr *Node) {
  * clearslim generates code to zero a slim node.
  */
 func Clearslim(n *Node) {
-	var z Node
-	var zero Mpflt
-
-	z = Node{}
+	z := Node{}
 	z.Op = OLITERAL
 	z.Type = n.Type
 	z.Addable = 1
@@ -370,6 +346,7 @@ func Clearslim(n *Node) {
 
 	case TFLOAT32,
 		TFLOAT64:
+		var zero Mpflt
 		Mpmovecflt(&zero, 0.0)
 		z.Val.Ctype = CTFLT
 		z.Val.U.Fval = &zero
@@ -410,20 +387,17 @@ func Clearslim(n *Node) {
  * n->right is data
  */
 func Cgen_eface(n *Node, res *Node) {
-	var dst Node
 	/*
 	 * the right node of an eface may contain function calls that uses res as an argument,
 	 * so it's important that it is done first
 	 */
 
-	var tmp *Node
-
-	tmp = temp(Types[Tptr])
+	tmp := temp(Types[Tptr])
 	Thearch.Cgen(n.Right, tmp)
 
 	Gvardef(res)
 
-	dst = *res
+	dst := *res
 	dst.Type = Types[Tptr]
 	dst.Xoffset += int64(Widthptr)
 	Thearch.Cgen(tmp, &dst)
@@ -442,23 +416,9 @@ func Cgen_eface(n *Node, res *Node) {
  * called for OSLICE, OSLICE3, OSLICEARR, OSLICE3ARR, OSLICESTR.
  */
 func Cgen_slice(n *Node, res *Node) {
-	var src Node
-	var dst Node
-	var cap *Node
-	var len *Node
-	var offs *Node
-	var add *Node
-	var base *Node
-	var tmpcap *Node
-	var tmplen *Node
-	var cmp *Node
-	var con Node
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
-	cap = n.List.N
-	len = n.List.Next.N
-	offs = nil
+	cap := n.List.N
+	len := n.List.Next.N
+	offs := (*Node)(nil)
 	if n.List.Next.Next != nil {
 		offs = n.List.Next.Next.N
 	}
@@ -470,15 +430,17 @@ func Cgen_slice(n *Node, res *Node) {
 	// might cause preemption or garbage collection.
 	// this makes the whole slice update atomic as far as the
 	// garbage collector can see.
-	base = temp(Types[TUINTPTR])
+	base := temp(Types[TUINTPTR])
 
-	tmplen = temp(Types[TINT])
+	tmplen := temp(Types[TINT])
+	var tmpcap *Node
 	if n.Op != OSLICESTR {
 		tmpcap = temp(Types[TINT])
 	} else {
 		tmpcap = tmplen
 	}
 
+	var src Node
 	if isnil(n.Left) {
 		Tempname(&src, n.Left.Type)
 		Thearch.Cgen(n.Left, &src)
@@ -519,16 +481,17 @@ func Cgen_slice(n *Node, res *Node) {
 	// In essence we are replacing x[i:j:k] where i == j == k
 	// or x[i:j] where i == j == cap(x) with x[0:0:0].
 	if offs != nil {
-		p1 = gjmp(nil)
-		p2 = gjmp(nil)
+		p1 := gjmp(nil)
+		p2 := gjmp(nil)
 		Patch(p1, Pc)
 
+		var con Node
 		Nodconst(&con, tmpcap.Type, 0)
-		cmp = Nod(OEQ, tmpcap, &con)
+		cmp := Nod(OEQ, tmpcap, &con)
 		typecheck(&cmp, Erv)
 		Thearch.Bgen(cmp, true, -1, p2)
 
-		add = Nod(OADD, base, offs)
+		add := Nod(OADD, base, offs)
 		typecheck(&add, Erv)
 		Thearch.Cgen(add, base)
 
@@ -536,7 +499,7 @@ func Cgen_slice(n *Node, res *Node) {
 	}
 
 	// dst.array = src.array  [ + lo *width ]
-	dst = *res
+	dst := *res
 
 	dst.Xoffset += int64(Array_array)
 	dst.Type = Types[Tptr]
@@ -616,9 +579,6 @@ func Dotoffset(n *Node, oary []int64, nn **Node) int {
  * make a new off the books
  */
 func Tempname(nn *Node, t *Type) {
-	var n *Node
-	var s *Sym
-
 	if Curfn == nil {
 		Fatal("no curfn for tempname")
 	}
@@ -633,8 +593,8 @@ func Tempname(nn *Node, t *Type) {
 	namebuf = fmt.Sprintf("autotmp_%.4d", statuniqgen)
 
 	statuniqgen++
-	s = Lookup(namebuf)
-	n = Nod(ONAME, nil, nil)
+	s := Lookup(namebuf)
+	n := Nod(ONAME, nil, nil)
 	n.Sym = s
 	s.Def = n
 	n.Type = t
@@ -651,26 +611,16 @@ func Tempname(nn *Node, t *Type) {
 }
 
 func temp(t *Type) *Node {
-	var n *Node
-
-	n = Nod(OXXX, nil, nil)
+	n := Nod(OXXX, nil, nil)
 	Tempname(n, t)
 	n.Sym.Def.Used = 1
 	return n.Orig
 }
 
 func gen(n *Node) {
-	var lno int32
-	var scontin *obj.Prog
-	var sbreak *obj.Prog
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var p3 *obj.Prog
-	var lab *Label
-
 	//dump("gen", n);
 
-	lno = setlineno(n)
+	lno := setlineno(n)
 
 	wasregalloc := Thearch.Anyregalloc()
 
@@ -708,10 +658,11 @@ func gen(n *Node) {
 			break
 		}
 
-		lab = newlab(n)
+		lab := newlab(n)
 
 		// if there are pending gotos, resolve them all to the current pc.
-		for p1 = lab.Gotopc; p1 != nil; p1 = p2 {
+		var p2 *obj.Prog
+		for p1 := lab.Gotopc; p1 != nil; p1 = p2 {
 			p2 = unpatch(p1)
 			Patch(p1, Pc)
 		}
@@ -739,7 +690,7 @@ func gen(n *Node) {
 	// to the same label.  we'll unwind it when we learn the pc
 	// of the label in the OLABEL case above.)
 	case OGOTO:
-		lab = newlab(n)
+		lab := newlab(n)
 
 		if lab.Labelpc != nil {
 			gjmp(lab.Labelpc)
@@ -749,7 +700,7 @@ func gen(n *Node) {
 
 	case OBREAK:
 		if n.Left != nil {
-			lab = n.Left.Sym.Label
+			lab := n.Left.Sym.Label
 			if lab == nil {
 				Yyerror("break label not defined: %v", Sconv(n.Left.Sym, 0))
 				break
@@ -774,7 +725,7 @@ func gen(n *Node) {
 
 	case OCONTINUE:
 		if n.Left != nil {
-			lab = n.Left.Sym.Label
+			lab := n.Left.Sym.Label
 			if lab == nil {
 				Yyerror("continue label not defined: %v", Sconv(n.Left.Sym, 0))
 				break
@@ -798,14 +749,14 @@ func gen(n *Node) {
 		gjmp(continpc)
 
 	case OFOR:
-		sbreak = breakpc
-		p1 = gjmp(nil)      //		goto test
+		sbreak := breakpc
+		p1 := gjmp(nil)     //		goto test
 		breakpc = gjmp(nil) // break:	goto done
-		scontin = continpc
+		scontin := continpc
 		continpc = Pc
 
 		// define break and continue labels
-		lab = stmtlabel(n)
+		lab := stmtlabel(n)
 		if lab != nil {
 			lab.Breakpc = breakpc
 			lab.Continpc = continpc
@@ -825,23 +776,23 @@ func gen(n *Node) {
 		}
 
 	case OIF:
-		p1 = gjmp(nil)                                   //		goto test
-		p2 = gjmp(nil)                                   // p2:		goto else
+		p1 := gjmp(nil)                                  //		goto test
+		p2 := gjmp(nil)                                  // p2:		goto else
 		Patch(p1, Pc)                                    // test:
 		Thearch.Bgen(n.Ntest, false, int(-n.Likely), p2) //		if(!test) goto p2
 		Genlist(n.Nbody)                                 //		then
-		p3 = gjmp(nil)                                   //		goto done
+		p3 := gjmp(nil)                                  //		goto done
 		Patch(p2, Pc)                                    // else:
 		Genlist(n.Nelse)                                 //		else
 		Patch(p3, Pc)                                    // done:
 
 	case OSWITCH:
-		sbreak = breakpc
-		p1 = gjmp(nil)      //		goto test
+		sbreak := breakpc
+		p1 := gjmp(nil)     //		goto test
 		breakpc = gjmp(nil) // break:	goto done
 
 		// define break label
-		lab = stmtlabel(n)
+		lab := stmtlabel(n)
 		if lab != nil {
 			lab.Breakpc = breakpc
 		}
@@ -855,12 +806,12 @@ func gen(n *Node) {
 		}
 
 	case OSELECT:
-		sbreak = breakpc
-		p1 = gjmp(nil)      //		goto test
+		sbreak := breakpc
+		p1 := gjmp(nil)     //		goto test
 		breakpc = gjmp(nil) // break:	goto done
 
 		// define break label
-		lab = stmtlabel(n)
+		lab := stmtlabel(n)
 		if lab != nil {
 			lab.Breakpc = breakpc
 		}
@@ -918,8 +869,6 @@ ret:
 }
 
 func Cgen_as(nl *Node, nr *Node) {
-	var tl *Type
-
 	if Debug['g'] != 0 {
 		Dump("cgen_as", nl)
 		Dump("cgen_as = ", nr)
@@ -940,7 +889,7 @@ func Cgen_as(nl *Node, nr *Node) {
 			return
 		}
 
-		tl = nl.Type
+		tl := nl.Type
 		if tl == nil {
 			return
 		}
@@ -956,7 +905,7 @@ func Cgen_as(nl *Node, nr *Node) {
 		return
 	}
 
-	tl = nl.Type
+	tl := nl.Type
 	if tl == nil {
 		return
 	}
@@ -965,19 +914,16 @@ func Cgen_as(nl *Node, nr *Node) {
 }
 
 func Cgen_callmeth(n *Node, proc int) {
-	var n2 Node
-	var l *Node
-
 	// generate a rewrite in n2 for the method call
 	// (p.f)(...) goes to (f)(p,...)
 
-	l = n.Left
+	l := n.Left
 
 	if l.Op != ODOTMETH {
 		Fatal("cgen_callmeth: not dotmethod: %v")
 	}
 
-	n2 = *n
+	n2 := *n
 	n2.Op = OCALLFUNC
 	n2.Left = l.Right
 	n2.Left.Type = l.Type
@@ -989,10 +935,9 @@ func Cgen_callmeth(n *Node, proc int) {
 }
 
 func checklabels() {
-	var lab *Label
 	var l *NodeList
 
-	for lab = labellist; lab != nil; lab = lab.Link {
+	for lab := labellist; lab != nil; lab = lab.Link {
 		if lab.Def == nil {
 			for l = lab.Use; l != nil; l = l.Next {
 				yyerrorl(int(l.N.Lineno), "label %v not defined", Sconv(lab.Sym, 0))
diff --git a/src/cmd/internal/gc/gsubr.go b/src/cmd/internal/gc/gsubr.go
index 6fd6057fee68d5dfe3ccd831af2ade3da4c178a5..ad5e494f4fc13c2896ff55f927cc427fa46d0412 100644
--- a/src/cmd/internal/gc/gsubr.go
+++ b/src/cmd/internal/gc/gsubr.go
@@ -80,9 +80,7 @@ func Samereg(a *Node, b *Node) bool {
  * gsubr.c
  */
 func Gbranch(as int, t *Type, likely int) *obj.Prog {
-	var p *obj.Prog
-
-	p = Prog(as)
+	p := Prog(as)
 	p.To.Type = obj.TYPE_BRANCH
 	p.To.U.Branch = nil
 	if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' {
@@ -170,9 +168,7 @@ func dumpdata() {
 }
 
 func fixautoused(p *obj.Prog) {
-	var lp **obj.Prog
-
-	for lp = &p; ; {
+	for lp := &p; ; {
 		p = *lp
 		if p == nil {
 			break
@@ -205,9 +201,7 @@ func fixautoused(p *obj.Prog) {
 }
 
 func ggloblnod(nam *Node) {
-	var p *obj.Prog
-
-	p = Thearch.Gins(obj.AGLOBL, nam, nil)
+	p := Thearch.Gins(obj.AGLOBL, nam, nil)
 	p.Lineno = nam.Lineno
 	p.From.Sym.Gotype = Linksym(ngotype(nam))
 	p.To.Sym = nil
@@ -222,9 +216,7 @@ func ggloblnod(nam *Node) {
 }
 
 func ggloblsym(s *Sym, width int32, flags int8) {
-	var p *obj.Prog
-
-	p = Thearch.Gins(obj.AGLOBL, nil, nil)
+	p := Thearch.Gins(obj.AGLOBL, nil, nil)
 	p.From.Type = obj.TYPE_MEM
 	p.From.Name = obj.NAME_EXTERN
 	p.From.Sym = Linksym(s)
@@ -234,9 +226,7 @@ func ggloblsym(s *Sym, width int32, flags int8) {
 }
 
 func gjmp(to *obj.Prog) *obj.Prog {
-	var p *obj.Prog
-
-	p = Gbranch(obj.AJMP, nil, 0)
+	p := Gbranch(obj.AJMP, nil, 0)
 	if to != nil {
 		Patch(p, to)
 	}
@@ -244,9 +234,7 @@ func gjmp(to *obj.Prog) *obj.Prog {
 }
 
 func gtrack(s *Sym) {
-	var p *obj.Prog
-
-	p = Thearch.Gins(obj.AUSEFIELD, nil, nil)
+	p := Thearch.Gins(obj.AUSEFIELD, nil, nil)
 	p.From.Type = obj.TYPE_MEM
 	p.From.Name = obj.NAME_EXTERN
 	p.From.Sym = Linksym(s)
@@ -287,8 +275,6 @@ func markautoused(p *obj.Prog) {
 }
 
 func Naddr(n *Node, a *obj.Addr, canemitcode int) {
-	var s *Sym
-
 	*a = obj.Addr{}
 	if n == nil {
 		return
@@ -361,7 +347,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
 			a.Etype = Simtype[n.Type.Etype]
 		}
 		a.Offset = n.Xoffset
-		s = n.Sym
+		s := n.Sym
 		a.Node = n.Orig
 
 		//if(a->node >= (Node*)&n)
@@ -502,9 +488,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
 }
 
 func newplist() *obj.Plist {
-	var pl *obj.Plist
-
-	pl = obj.Linknewplist(Ctxt)
+	pl := obj.Linknewplist(Ctxt)
 
 	Pc = Ctxt.NewProg()
 	Clearp(Pc)
@@ -515,16 +499,14 @@ func newplist() *obj.Plist {
 
 func nodarg(t *Type, fp int) *Node {
 	var n *Node
-	var l *NodeList
-	var first *Type
-	var savet Iter
 
 	// entire argument struct, not just one arg
 	if t.Etype == TSTRUCT && t.Funarg != 0 {
 		n = Nod(ONAME, nil, nil)
 		n.Sym = Lookup(".args")
 		n.Type = t
-		first = Structfirst(&savet, &t)
+		var savet Iter
+		first := Structfirst(&savet, &t)
 		if first == nil {
 			Fatal("nodarg: bad struct")
 		}
@@ -541,7 +523,8 @@ func nodarg(t *Type, fp int) *Node {
 	}
 
 	if fp == 1 {
-		for l = Curfn.Dcl; l != nil; l = l.Next {
+		var n *Node
+		for l := Curfn.Dcl; l != nil; l = l.Next {
 			n = l.N
 			if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
 				return n
@@ -604,12 +587,10 @@ func Patch(p *obj.Prog, to *obj.Prog) {
 }
 
 func unpatch(p *obj.Prog) *obj.Prog {
-	var q *obj.Prog
-
 	if p.To.Type != obj.TYPE_BRANCH {
 		Fatal("unpatch: not a branch")
 	}
-	q = p.To.U.Branch
+	q := p.To.U.Branch
 	p.To.U.Branch = nil
 	p.To.Offset = 0
 	return q
diff --git a/src/cmd/internal/gc/init.go b/src/cmd/internal/gc/init.go
index 9202ac5c758f6c255456157371b593bcbadca8af..a7d4fbd5f4f5d7fd04953bec2b8e982fdd3a608f 100644
--- a/src/cmd/internal/gc/init.go
+++ b/src/cmd/internal/gc/init.go
@@ -54,12 +54,8 @@ func renameinit() *Sym {
  *	}
  */
 func anyinit(n *NodeList) bool {
-	var h uint32
-	var s *Sym
-	var l *NodeList
-
 	// are there any interesting init statements
-	for l = n; l != nil; l = l.Next {
+	for l := n; l != nil; l = l.Next {
 		switch l.N.Op {
 		case ODCLFUNC,
 			ODCLCONST,
@@ -85,14 +81,14 @@ func anyinit(n *NodeList) bool {
 	}
 
 	// is there an explicit init function
-	s = Lookup("init.1")
+	s := Lookup("init.1")
 
 	if s.Def != nil {
 		return true
 	}
 
 	// are there any imported init functions
-	for h = 0; h < NHASH; h++ {
+	for h := uint32(0); h < NHASH; h++ {
 		for s = hash[h]; s != nil; s = s.Link {
 			if s.Name[0] != 'i' || s.Name != "init" {
 				continue
@@ -109,16 +105,6 @@ func anyinit(n *NodeList) bool {
 }
 
 func fninit(n *NodeList) {
-	var i int
-	var gatevar *Node
-	var a *Node
-	var b *Node
-	var fn *Node
-	var r *NodeList
-	var h uint32
-	var s *Sym
-	var initsym *Sym
-
 	if Debug['A'] != 0 {
 		// sys.go or unsafe.go during compiler build
 		return
@@ -129,12 +115,12 @@ func fninit(n *NodeList) {
 		return
 	}
 
-	r = nil
+	r := (*NodeList)(nil)
 
 	// (1)
 	namebuf = fmt.Sprintf("initdone·")
 
-	gatevar = newname(Lookup(namebuf))
+	gatevar := newname(Lookup(namebuf))
 	addvar(gatevar, Types[TUINT8], PEXTERN)
 
 	// (2)
@@ -142,8 +128,8 @@ func fninit(n *NodeList) {
 
 	namebuf = fmt.Sprintf("init")
 
-	fn = Nod(ODCLFUNC, nil, nil)
-	initsym = Lookup(namebuf)
+	fn := Nod(ODCLFUNC, nil, nil)
+	initsym := Lookup(namebuf)
 	fn.Nname = newname(initsym)
 	fn.Nname.Defn = fn
 	fn.Nname.Ntype = Nod(OTFUNC, nil, nil)
@@ -151,13 +137,13 @@ func fninit(n *NodeList) {
 	funchdr(fn)
 
 	// (3)
-	a = Nod(OIF, nil, nil)
+	a := Nod(OIF, nil, nil)
 
 	a.Ntest = Nod(ONE, gatevar, Nodintconst(0))
 	r = list(r, a)
 
 	// (4)
-	b = Nod(OIF, nil, nil)
+	b := Nod(OIF, nil, nil)
 
 	b.Ntest = Nod(OEQ, gatevar, Nodintconst(2))
 	b.Nbody = list1(Nod(ORETURN, nil, nil))
@@ -175,7 +161,8 @@ func fninit(n *NodeList) {
 	r = list(r, a)
 
 	// (7)
-	for h = 0; h < NHASH; h++ {
+	var s *Sym
+	for h := uint32(0); h < NHASH; h++ {
 		for s = hash[h]; s != nil; s = s.Link {
 			if s.Name[0] != 'i' || s.Name != "init" {
 				continue
@@ -199,7 +186,7 @@ func fninit(n *NodeList) {
 
 	// (9)
 	// could check that it is fn of no args/returns
-	for i = 1; ; i++ {
+	for i := 1; ; i++ {
 		namebuf = fmt.Sprintf("init.%d", i)
 		s = Lookup(namebuf)
 		if s.Def == nil {
diff --git a/src/cmd/internal/gc/inl.go b/src/cmd/internal/gc/inl.go
index 73d6481ab976e56a0b4d91b213e8005b4ff8543b..8b088a7f7f45ebe123010465f1738ac2fe094770 100644
--- a/src/cmd/internal/gc/inl.go
+++ b/src/cmd/internal/gc/inl.go
@@ -47,11 +47,9 @@ var inlretvars *NodeList // temp out variables
 // Get the function's package.  For ordinary functions it's on the ->sym, but for imported methods
 // the ->sym can be re-used in the local package, so peel it off the receiver's type.
 func fnpkg(fn *Node) *Pkg {
-	var rcvr *Type
-
 	if fn.Type.Thistuple != 0 {
 		// method
-		rcvr = getthisx(fn.Type).Type.Type
+		rcvr := getthisx(fn.Type).Type.Type
 
 		if Isptr[rcvr.Etype] != 0 {
 			rcvr = rcvr.Type
@@ -69,18 +67,13 @@ func fnpkg(fn *Node) *Pkg {
 // Lazy typechecking of imported bodies.  For local functions, caninl will set ->typecheck
 // because they're a copy of an already checked body.
 func typecheckinl(fn *Node) {
-	var savefn *Node
-	var pkg *Pkg
-	var save_safemode int
-	var lno int
-
-	lno = int(setlineno(fn))
+	lno := int(setlineno(fn))
 
 	// typecheckinl is only for imported functions;
 	// their bodies may refer to unsafe as long as the package
 	// was marked safe during import (which was checked then).
 	// the ->inl of a local function has been typechecked before caninl copied it.
-	pkg = fnpkg(fn)
+	pkg := fnpkg(fn)
 
 	if pkg == localpkg || pkg == nil {
 		return // typecheckinl on local function
@@ -90,10 +83,10 @@ func typecheckinl(fn *Node) {
 		fmt.Printf("typecheck import [%v] %v { %v }\n", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Hconv(fn.Inl, obj.FmtSharp))
 	}
 
-	save_safemode = safemode
+	save_safemode := safemode
 	safemode = 0
 
-	savefn = Curfn
+	savefn := Curfn
 	Curfn = fn
 	typechecklist(fn.Inl, Etop)
 	Curfn = savefn
@@ -107,10 +100,6 @@ func typecheckinl(fn *Node) {
 // If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
 // fn and ->nbody will already have been typechecked.
 func caninl(fn *Node) {
-	var savefn *Node
-	var t *Type
-	var budget int
-
 	if fn.Op != ODCLFUNC {
 		Fatal("caninl %v", Nconv(fn, 0))
 	}
@@ -129,19 +118,19 @@ func caninl(fn *Node) {
 
 	// can't handle ... args yet
 	if Debug['l'] < 3 {
-		for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+		for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
 			if t.Isddd != 0 {
 				return
 			}
 		}
 	}
 
-	budget = 40 // allowed hairyness
+	budget := 40 // allowed hairyness
 	if ishairylist(fn.Nbody, &budget) {
 		return
 	}
 
-	savefn = Curfn
+	savefn := Curfn
 	Curfn = fn
 
 	fn.Nname.Inl = fn.Nbody
@@ -211,9 +200,7 @@ func ishairy(n *Node, budget *int) bool {
 // Any name-like node of non-local class is marked for re-export by adding it to
 // the exportlist.
 func inlcopylist(ll *NodeList) *NodeList {
-	var l *NodeList
-
-	l = nil
+	l := (*NodeList)(nil)
 	for ; ll != nil; ll = ll.Next {
 		l = list(l, inlcopy(ll.N))
 	}
@@ -221,8 +208,6 @@ func inlcopylist(ll *NodeList) *NodeList {
 }
 
 func inlcopy(n *Node) *Node {
-	var m *Node
-
 	if n == nil {
 		return nil
 	}
@@ -234,7 +219,7 @@ func inlcopy(n *Node) *Node {
 		return n
 	}
 
-	m = Nod(OXXX, nil, nil)
+	m := Nod(OXXX, nil, nil)
 	*m = *n
 	m.Inl = nil
 	m.Left = inlcopy(n.Left)
@@ -253,9 +238,7 @@ func inlcopy(n *Node) *Node {
 // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
 // calls made to inlineable functions.  This is the external entry point.
 func inlcalls(fn *Node) {
-	var savefn *Node
-
-	savefn = Curfn
+	savefn := Curfn
 	Curfn = fn
 	inlnode(&fn)
 	if fn != Curfn {
@@ -277,10 +260,8 @@ func inlconv2stmt(n *Node) {
 
 // Turn an OINLCALL into a single valued expression.
 func inlconv2expr(np **Node) {
-	var n *Node
-	var r *Node
-	n = *np
-	r = n.Rlist.N
+	n := *np
+	r := n.Rlist.N
 	addinit(&r, concat(n.Ninit, n.Nbody))
 	*np = r
 }
@@ -291,13 +272,11 @@ func inlconv2expr(np **Node) {
 // order will be preserved Used in return, oas2func and call
 // statements.
 func inlconv2list(n *Node) *NodeList {
-	var l *NodeList
-
 	if n.Op != OINLCALL || n.Rlist == nil {
 		Fatal("inlconv2list %v\n", Nconv(n, obj.FmtSign))
 	}
 
-	l = n.Rlist
+	l := n.Rlist
 	addinit(&l.N, concat(n.Ninit, n.Nbody))
 	return l
 }
@@ -320,15 +299,11 @@ func inlnodelist(l *NodeList) {
 // but then you may as well do it here.  so this is cleaner and
 // shorter and less complicated.
 func inlnode(np **Node) {
-	var n *Node
-	var l *NodeList
-	var lno int
-
 	if *np == nil {
 		return
 	}
 
-	n = *np
+	n := *np
 
 	switch n.Op {
 	// inhibit inlining of their argument
@@ -347,10 +322,10 @@ func inlnode(np **Node) {
 		return
 	}
 
-	lno = int(setlineno(n))
+	lno := int(setlineno(n))
 
 	inlnodelist(n.Ninit)
-	for l = n.Ninit; l != nil; l = l.Next {
+	for l := n.Ninit; l != nil; l = l.Next {
 		if l.N.Op == OINLCALL {
 			inlconv2stmt(l.N)
 		}
@@ -369,7 +344,7 @@ func inlnode(np **Node) {
 	inlnodelist(n.List)
 	switch n.Op {
 	case OBLOCK:
-		for l = n.List; l != nil; l = l.Next {
+		for l := n.List; l != nil; l = l.Next {
 			if l.N.Op == OINLCALL {
 				inlconv2stmt(l.N)
 			}
@@ -391,7 +366,7 @@ func inlnode(np **Node) {
 
 		// fallthrough
 	default:
-		for l = n.List; l != nil; l = l.Next {
+		for l := n.List; l != nil; l = l.Next {
 			if l.N.Op == OINLCALL {
 				inlconv2expr(&l.N)
 			}
@@ -412,7 +387,7 @@ func inlnode(np **Node) {
 
 		// fallthrough
 	default:
-		for l = n.Rlist; l != nil; l = l.Next {
+		for l := n.Rlist; l != nil; l = l.Next {
 			if l.N.Op == OINLCALL {
 				inlconv2expr(&l.N)
 			}
@@ -430,14 +405,14 @@ func inlnode(np **Node) {
 	}
 
 	inlnodelist(n.Nbody)
-	for l = n.Nbody; l != nil; l = l.Next {
+	for l := n.Nbody; l != nil; l = l.Next {
 		if l.N.Op == OINLCALL {
 			inlconv2stmt(l.N)
 		}
 	}
 
 	inlnodelist(n.Nelse)
-	for l = n.Nelse; l != nil; l = l.Next {
+	for l := n.Nelse; l != nil; l = l.Next {
 		if l.N.Op == OINLCALL {
 			inlconv2stmt(l.N)
 		}
@@ -488,14 +463,11 @@ func inlnode(np **Node) {
 }
 
 func mkinlcall(np **Node, fn *Node, isddd int) {
-	var save_safemode int
-	var pkg *Pkg
-
-	save_safemode = safemode
+	save_safemode := safemode
 
 	// imported functions may refer to unsafe as long as the
 	// package was marked safe during import (already checked).
-	pkg = fnpkg(fn)
+	pkg := fnpkg(fn)
 
 	if pkg != localpkg && pkg != nil {
 		safemode = 0
@@ -523,26 +495,6 @@ var inlgen int
 // inlined function body and list, rlist contain the input, output
 // parameters.
 func mkinlcall1(np **Node, fn *Node, isddd int) {
-	var i int
-	var chkargcount bool
-	var n *Node
-	var call *Node
-	var saveinlfn *Node
-	var as *Node
-	var m *Node
-	var dcl *NodeList
-	var ll *NodeList
-	var ninit *NodeList
-	var body *NodeList
-	var t *Type
-	var variadic bool
-	var varargcount int
-	var multiret int
-	var vararg *Node
-	var varargs *NodeList
-	var varargtype *Type
-	var vararrtype *Type
-
 	// For variadic fn.
 	if fn.Inl == nil {
 		return
@@ -556,7 +508,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 		typecheckinl(fn)
 	}
 
-	n = *np
+	n := *np
 
 	// Bingo, we have a function node, and it has an inlineable body
 	if Debug['m'] > 1 {
@@ -569,13 +521,14 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 		fmt.Printf("%v: Before inlining: %v\n", n.Line(), Nconv(n, obj.FmtSign))
 	}
 
-	saveinlfn = inlfn
+	saveinlfn := inlfn
 	inlfn = fn
 
-	ninit = n.Ninit
+	ninit := n.Ninit
 
 	//dumplist("ninit pre", ninit);
 
+	var dcl *NodeList
 	if fn.Defn != nil { // local function
 		dcl = fn.Inldcl // imported function
 	} else {
@@ -583,10 +536,10 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	inlretvars = nil
-	i = 0
+	i := 0
 
 	// Make temp names to use instead of the originals
-	for ll = dcl; ll != nil; ll = ll.Next {
+	for ll := dcl; ll != nil; ll = ll.Next {
 		if ll.N.Class == PPARAMOUT { // return values handled below.
 			continue
 		}
@@ -603,7 +556,8 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	// temporaries for return values.
-	for t = getoutargx(fn.Type).Type; t != nil; t = t.Down {
+	var m *Node
+	for t := getoutargx(fn.Type).Type; t != nil; t = t.Down {
 		if t != nil && t.Nname != nil && !isblank(t.Nname) {
 			m = inlvar(t.Nname)
 			typecheck(&m, Erv)
@@ -619,9 +573,10 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	// assign receiver.
+	var as *Node
 	if fn.Type.Thistuple != 0 && n.Left.Op == ODOTMETH {
 		// method call with a receiver.
-		t = getthisx(fn.Type).Type
+		t := getthisx(fn.Type).Type
 
 		if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
 			Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
@@ -640,11 +595,11 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	// check if inlined function is variadic.
-	variadic = false
+	variadic := false
 
-	varargtype = nil
-	varargcount = 0
-	for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+	varargtype := (*Type)(nil)
+	varargcount := 0
+	for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
 		if t.Isddd != 0 {
 			variadic = true
 			varargtype = t.Type
@@ -657,7 +612,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	// check if argument is actually a returned tuple from call.
-	multiret = 0
+	multiret := 0
 
 	if n.List != nil && n.List.Next == nil {
 		switch n.List.N.Op {
@@ -683,7 +638,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	as = Nod(OAS2, nil, nil)
 
 	as.Rlist = n.List
-	ll = n.List
+	ll := n.List
 
 	// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
 	if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
@@ -693,7 +648,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 		}
 
 		// append receiver inlvar to LHS.
-		t = getthisx(fn.Type).Type
+		t := getthisx(fn.Type).Type
 
 		if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
 			Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
@@ -706,13 +661,14 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	// append ordinary arguments to LHS.
-	chkargcount = n.List != nil && n.List.Next != nil
+	chkargcount := n.List != nil && n.List.Next != nil
 
-	vararg = nil  // the slice argument to a variadic call
-	varargs = nil // the list of LHS names to put in vararg.
+	vararg := (*Node)(nil)      // the slice argument to a variadic call
+	varargs := (*NodeList)(nil) // the list of LHS names to put in vararg.
 	if !chkargcount {
 		// 0 or 1 expression on RHS.
-		for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
+		var i int
+		for t := getinargx(fn.Type).Type; t != nil; t = t.Down {
 			if variadic && t.Isddd != 0 {
 				vararg = tinlvar(t)
 				for i = 0; i < varargcount && ll != nil; i++ {
@@ -728,6 +684,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 		}
 	} else {
 		// match arguments except final variadic (unless the call is dotted itself)
+		var t *Type
 		for t = getinargx(fn.Type).Type; t != nil; {
 			if ll == nil {
 				break
@@ -743,6 +700,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 		// match varargcount arguments with variadic parameters.
 		if variadic && t != nil && t.Isddd != 0 {
 			vararg = tinlvar(t)
+			var i int
 			for i = 0; i < varargcount && ll != nil; i++ {
 				m = argvar(varargtype, i)
 				varargs = list(varargs, m)
@@ -772,7 +730,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 			as.Right = nodnil()
 			as.Right.Type = varargtype
 		} else {
-			vararrtype = typ(TARRAY)
+			vararrtype := typ(TARRAY)
 			vararrtype.Type = varargtype.Type
 			vararrtype.Bound = int64(varargcount)
 
@@ -786,7 +744,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	}
 
 	// zero the outparams
-	for ll = inlretvars; ll != nil; ll = ll.Next {
+	for ll := inlretvars; ll != nil; ll = ll.Next {
 		as = Nod(OAS, ll.N, nil)
 		typecheck(&as, Etop)
 		ninit = list(ninit, as)
@@ -794,7 +752,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 
 	inlretlabel = newlabel_inl()
 	inlgen++
-	body = inlsubstlist(fn.Inl)
+	body := inlsubstlist(fn.Inl)
 
 	body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesnt have return
 	body = list(body, Nod(OLABEL, inlretlabel, nil))
@@ -803,7 +761,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 
 	//dumplist("ninit post", ninit);
 
-	call = Nod(OINLCALL, nil, nil)
+	call := Nod(OINLCALL, nil, nil)
 
 	call.Ninit = ninit
 	call.Nbody = body
@@ -824,10 +782,10 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 	// either supporting exporting statemetns with complex ninits
 	// or saving inl and making inlinl
 	if Debug['l'] >= 5 {
-		body = fn.Inl
+		body := fn.Inl
 		fn.Inl = nil // prevent infinite recursion
 		inlnodelist(call.Nbody)
-		for ll = call.Nbody; ll != nil; ll = ll.Next {
+		for ll := call.Nbody; ll != nil; ll = ll.Next {
 			if ll.N.Op == OINLCALL {
 				inlconv2stmt(ll.N)
 			}
@@ -844,13 +802,11 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
 // PAUTO's in the calling functions, and link them off of the
 // PPARAM's, PAUTOS and PPARAMOUTs of the called function.
 func inlvar(var_ *Node) *Node {
-	var n *Node
-
 	if Debug['m'] > 3 {
 		fmt.Printf("inlvar %v\n", Nconv(var_, obj.FmtSign))
 	}
 
-	n = newname(var_.Sym)
+	n := newname(var_.Sym)
 	n.Type = var_.Type
 	n.Class = PAUTO
 	n.Used = 1
@@ -872,10 +828,8 @@ func inlvar(var_ *Node) *Node {
 
 // Synthesize a variable to store the inlined function's results in.
 func retvar(t *Type, i int) *Node {
-	var n *Node
-
 	namebuf = fmt.Sprintf("~r%d", i)
-	n = newname(Lookup(namebuf))
+	n := newname(Lookup(namebuf))
 	n.Type = t.Type
 	n.Class = PAUTO
 	n.Used = 1
@@ -887,10 +841,8 @@ func retvar(t *Type, i int) *Node {
 // Synthesize a variable to store the inlined function's arguments
 // when they come from a multiple return call.
 func argvar(t *Type, i int) *Node {
-	var n *Node
-
 	namebuf = fmt.Sprintf("~arg%d", i)
-	n = newname(Lookup(namebuf))
+	n := newname(Lookup(namebuf))
 	n.Type = t.Type
 	n.Class = PAUTO
 	n.Used = 1
@@ -902,11 +854,9 @@ func argvar(t *Type, i int) *Node {
 var newlabel_inl_label int
 
 func newlabel_inl() *Node {
-	var n *Node
-
 	newlabel_inl_label++
 	namebuf = fmt.Sprintf(".inlret%.6d", newlabel_inl_label)
-	n = newname(Lookup(namebuf))
+	n := newname(Lookup(namebuf))
 	n.Etype = 1 // flag 'safe' for escape analysis (no backjumps)
 	return n
 }
@@ -916,9 +866,7 @@ func newlabel_inl() *Node {
 // to input/output parameters with ones to the tmpnames, and
 // substituting returns with assignments to the output.
 func inlsubstlist(ll *NodeList) *NodeList {
-	var l *NodeList
-
-	l = nil
+	l := (*NodeList)(nil)
 	for ; ll != nil; ll = ll.Next {
 		l = list(l, inlsubst(ll.N))
 	}
@@ -926,11 +874,6 @@ func inlsubstlist(ll *NodeList) *NodeList {
 }
 
 func inlsubst(n *Node) *Node {
-	var p string
-	var m *Node
-	var as *Node
-	var ll *NodeList
-
 	if n == nil {
 		return nil
 	}
@@ -957,15 +900,15 @@ func inlsubst(n *Node) *Node {
 
 	//		dump("Return before substitution", n);
 	case ORETURN:
-		m = Nod(OGOTO, inlretlabel, nil)
+		m := Nod(OGOTO, inlretlabel, nil)
 
 		m.Ninit = inlsubstlist(n.Ninit)
 
 		if inlretvars != nil && n.List != nil {
-			as = Nod(OAS2, nil, nil)
+			as := Nod(OAS2, nil, nil)
 
 			// shallow copy or OINLCALL->rlist will be the same list, and later walk and typecheck may clobber that.
-			for ll = inlretvars; ll != nil; ll = ll.Next {
+			for ll := inlretvars; ll != nil; ll = ll.Next {
 				as.List = list(as.List, ll.N)
 			}
 			as.Rlist = inlsubstlist(n.List)
@@ -981,16 +924,16 @@ func inlsubst(n *Node) *Node {
 
 	case OGOTO,
 		OLABEL:
-		m = Nod(OXXX, nil, nil)
+		m := Nod(OXXX, nil, nil)
 		*m = *n
 		m.Ninit = nil
-		p = fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
+		p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
 		m.Left = newname(Lookup(p))
 
 		return m
 	}
 
-	m = Nod(OXXX, nil, nil)
+	m := Nod(OXXX, nil, nil)
 	*m = *n
 	m.Ninit = nil
 
diff --git a/src/cmd/internal/gc/lex.go b/src/cmd/internal/gc/lex.go
index 26f2eff13a073051803b89f0293c19f7e3b4e570..96273370777693de547f918a688331850dbe930b 100644
--- a/src/cmd/internal/gc/lex.go
+++ b/src/cmd/internal/gc/lex.go
@@ -88,14 +88,11 @@ func hidePanic() {
 }
 
 func doversion() {
-	var p string
-	var sep string
-
-	p = obj.Expstring()
+	p := obj.Expstring()
 	if p == "X:none" {
 		p = ""
 	}
-	sep = ""
+	sep := ""
 	if p != "" {
 		sep = " "
 	}
@@ -105,12 +102,10 @@ func doversion() {
 
 func Main() {
 	defer hidePanic()
-	var l *NodeList
-	var p string
 
 	// Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix,
 	// but not other values.
-	p = obj.Getgoarch()
+	p := obj.Getgoarch()
 
 	if !strings.HasPrefix(p, Thearch.Thestring) {
 		log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p)
@@ -276,7 +271,7 @@ func Main() {
 	}
 
 	if Thearch.Thechar == '8' {
-		p = obj.Getgo386()
+		p := obj.Getgo386()
 		if p == "387" {
 			Use_sse = 0
 		} else if p == "sse2" {
@@ -355,7 +350,7 @@ func Main() {
 	//   and methods but doesn't depend on any of it.
 	defercheckwidth()
 
-	for l = xtop; l != nil; l = l.Next {
+	for l := xtop; l != nil; l = l.Next {
 		if l.N.Op != ODCL && l.N.Op != OAS {
 			typecheck(&l.N, Etop)
 		}
@@ -363,7 +358,7 @@ func Main() {
 
 	// Phase 2: Variable assignments.
 	//   To check interface assignments, depends on phase 1.
-	for l = xtop; l != nil; l = l.Next {
+	for l := xtop; l != nil; l = l.Next {
 		if l.N.Op == ODCL || l.N.Op == OAS {
 			typecheck(&l.N, Etop)
 		}
@@ -371,7 +366,7 @@ func Main() {
 	resumecheckwidth()
 
 	// Phase 3: Type check function bodies.
-	for l = xtop; l != nil; l = l.Next {
+	for l := xtop; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC || l.N.Op == OCLOSURE {
 			Curfn = l.N
 			decldepth = 1
@@ -387,7 +382,7 @@ func Main() {
 	// Phase 4: Decide how to capture closed variables.
 	// This needs to run before escape analysis,
 	// because variables captured by value do not escape.
-	for l = xtop; l != nil; l = l.Next {
+	for l := xtop; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC && l.N.Closure != nil {
 			Curfn = l.N
 			capturevars(l.N)
@@ -404,7 +399,7 @@ func Main() {
 	if Debug['l'] > 1 {
 		// Typecheck imported function bodies if debug['l'] > 1,
 		// otherwise lazily when used or re-exported.
-		for l = importlist; l != nil; l = l.Next {
+		for l := importlist; l != nil; l = l.Next {
 			if l.N.Inl != nil {
 				saveerrors()
 				typecheckinl(l.N)
@@ -418,14 +413,14 @@ func Main() {
 
 	if Debug['l'] != 0 {
 		// Find functions that can be inlined and clone them before walk expands them.
-		for l = xtop; l != nil; l = l.Next {
+		for l := xtop; l != nil; l = l.Next {
 			if l.N.Op == ODCLFUNC {
 				caninl(l.N)
 			}
 		}
 
 		// Expand inlineable calls in all functions
-		for l = xtop; l != nil; l = l.Next {
+		for l := xtop; l != nil; l = l.Next {
 			if l.N.Op == ODCLFUNC {
 				inlcalls(l.N)
 			}
@@ -447,7 +442,7 @@ func Main() {
 	// Phase 7: Transform closure bodies to properly reference captured variables.
 	// This needs to happen before walk, because closures must be transformed
 	// before walk reaches a call of a closure.
-	for l = xtop; l != nil; l = l.Next {
+	for l := xtop; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC && l.N.Closure != nil {
 			Curfn = l.N
 			transformclosure(l.N)
@@ -457,7 +452,7 @@ func Main() {
 	Curfn = nil
 
 	// Phase 8: Compile top level functions.
-	for l = xtop; l != nil; l = l.Next {
+	for l := xtop; l != nil; l = l.Next {
 		if l.N.Op == ODCLFUNC {
 			funccompile(l.N)
 		}
@@ -468,7 +463,7 @@ func Main() {
 	}
 
 	// Phase 9: Check external declarations.
-	for l = externdcl; l != nil; l = l.Next {
+	for l := externdcl; l != nil; l = l.Next {
 		if l.N.Op == ONAME {
 			typecheck(&l.N, Erv)
 		}
@@ -511,11 +506,8 @@ func arsize(b *obj.Biobuf, name string) int {
 }
 
 func skiptopkgdef(b *obj.Biobuf) bool {
-	var p string
-	var sz int
-
 	/* archive header */
-	p = obj.Brdline(b, '\n')
+	p := obj.Brdline(b, '\n')
 	if p == "" {
 		return false
 	}
@@ -527,7 +519,7 @@ func skiptopkgdef(b *obj.Biobuf) bool {
 	}
 
 	/* symbol table may be first; skip it */
-	sz = arsize(b, "__.GOSYMDEF")
+	sz := arsize(b, "__.GOSYMDEF")
 
 	if sz >= 0 {
 		obj.Bseek(b, int64(sz), 1)
@@ -545,12 +537,11 @@ func skiptopkgdef(b *obj.Biobuf) bool {
 }
 
 func addidir(dir string) {
-	var pp **Idir
-
 	if dir == "" {
 		return
 	}
 
+	var pp **Idir
 	for pp = &idirs; *pp != nil; pp = &(*pp).link {
 	}
 	*pp = new(Idir)
@@ -567,11 +558,6 @@ func islocalname(name *Strlit) bool {
 }
 
 func findpkg(name *Strlit) bool {
-	var p *Idir
-	var q string
-	var suffix string
-	var suffixsep string
-
 	if islocalname(name) {
 		if safemode != 0 || nolocalimports != 0 {
 			return false
@@ -595,13 +581,14 @@ func findpkg(name *Strlit) bool {
 	// local imports should be canonicalized already.
 	// don't want to see "encoding/../encoding/base64"
 	// as different from "encoding/base64".
+	var q string
 	_ = q
 	if path.Clean(name.S) != name.S {
 		Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
 		return false
 	}
 
-	for p = idirs; p != nil; p = p.link {
+	for p := idirs; p != nil; p = p.link {
 		namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
 		if obj.Access(namebuf, 0) >= 0 {
 			return true
@@ -613,8 +600,8 @@ func findpkg(name *Strlit) bool {
 	}
 
 	if goroot != "" {
-		suffix = ""
-		suffixsep = ""
+		suffix := ""
+		suffixsep := ""
 		if flag_installsuffix != "" {
 			suffixsep = "_"
 			suffix = flag_installsuffix
@@ -642,17 +629,6 @@ func fakeimport() {
 }
 
 func importfile(f *Val, line int) {
-	var imp *obj.Biobuf
-	var file string
-	var p string
-	var q string
-	var tag string
-	var c int32
-	var n int
-	var path_ *Strlit
-	var cleanbuf string
-	var prefix string
-
 	if f.Ctype != CTSTR {
 		Yyerror("import statement not a string")
 		fakeimport()
@@ -696,7 +672,7 @@ func importfile(f *Val, line int) {
 		return
 	}
 
-	path_ = f.U.Sval
+	path_ := f.U.Sval
 	if islocalname(path_) {
 		if path_.S[0] == '/' {
 			Yyerror("import path cannot be absolute path")
@@ -704,11 +680,11 @@ func importfile(f *Val, line int) {
 			return
 		}
 
-		prefix = Ctxt.Pathname
+		prefix := Ctxt.Pathname
 		if localimport != "" {
 			prefix = localimport
 		}
-		cleanbuf = prefix
+		cleanbuf := prefix
 		cleanbuf += "/"
 		cleanbuf += path_.S
 		cleanbuf = path.Clean(cleanbuf)
@@ -730,13 +706,13 @@ func importfile(f *Val, line int) {
 	// If we already saw that package, feed a dummy statement
 	// to the lexer to avoid parsing export data twice.
 	if importpkg.Imported != 0 {
-		file = namebuf
-		tag = ""
+		file := namebuf
+		tag := ""
 		if importpkg.Safe {
 			tag = "safe"
 		}
 
-		p = fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+		p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
 		cannedimports(file, p)
 		return
 	}
@@ -744,15 +720,16 @@ func importfile(f *Val, line int) {
 	importpkg.Imported = 1
 
 	var err error
+	var imp *obj.Biobuf
 	imp, err = obj.Bopenr(namebuf)
 	if err != nil {
 		Yyerror("can't open import: \"%v\": %v", Zconv(f.U.Sval, 0), err)
 		errorexit()
 	}
 
-	file = namebuf
+	file := namebuf
 
-	n = len(namebuf)
+	n := len(namebuf)
 	if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
 		if !skiptopkgdef(imp) {
 			Yyerror("import %s: not a package file", file)
@@ -761,7 +738,7 @@ func importfile(f *Val, line int) {
 	}
 
 	// check object header
-	p = obj.Brdstr(imp, '\n', 1)
+	p := obj.Brdstr(imp, '\n', 1)
 
 	if p != "empty archive" {
 		if !strings.HasPrefix(p, "go object ") {
@@ -769,7 +746,7 @@ func importfile(f *Val, line int) {
 			errorexit()
 		}
 
-		q = fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+		q := fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
 		if p[10:] != q {
 			Yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
 			errorexit()
@@ -793,6 +770,7 @@ func importfile(f *Val, line int) {
 	curio.nlsemi = 0
 	typecheckok = 1
 
+	var c int32
 	for {
 		c = int32(getc())
 		if c == EOF {
@@ -951,7 +929,6 @@ l0:
 		cp.Reset()
 
 		for {
-
 			if escchar('"', &escflag, &v) {
 				break
 			}
@@ -974,7 +951,6 @@ l0:
 		cp.Reset()
 
 		for {
-
 			c = int(getr())
 			if c == '\r' {
 				continue
@@ -1014,9 +990,7 @@ l0:
 	case '/':
 		c1 = getc()
 		if c1 == '*' {
-			var nl int
-
-			nl = 0
+			nl := 0
 			for {
 				c = int(getr())
 				if c == '\n' {
@@ -1297,7 +1271,6 @@ asop:
 	 */
 talph:
 	for {
-
 		if c >= utf8.RuneSelf {
 			ungetc(c)
 			rune_ = uint(getr())
@@ -1339,7 +1312,6 @@ tnum:
 	cp.Reset()
 	if c != '0' {
 		for {
-
 			cp.WriteByte(byte(c))
 			c = getc()
 			if yy_isdigit(c) {
@@ -1353,7 +1325,6 @@ tnum:
 	c = getc()
 	if c == 'x' || c == 'X' {
 		for {
-
 			cp.WriteByte(byte(c))
 			c = getc()
 			if yy_isdigit(c) {
@@ -1381,7 +1352,6 @@ tnum:
 
 	c1 = 0
 	for {
-
 		if !yy_isdigit(c) {
 			break
 		}
@@ -1436,7 +1406,6 @@ ncu:
 
 casedot:
 	for {
-
 		cp.WriteByte(byte(c))
 		c = getc()
 		if !yy_isdigit(c) {
@@ -1463,7 +1432,6 @@ caseep:
 		Yyerror("malformed fp constant exponent")
 	}
 	for yy_isdigit(c) {
-
 		cp.WriteByte(byte(c))
 		c = getc()
 	}
@@ -1533,21 +1501,18 @@ func more(pp *string) bool {
  */
 func getlinepragma() int {
 	var cmd, verb, name string
-	var i int
-	var c int
 	var n int
 	var cp *bytes.Buffer
 	var linep int
-	var h *obj.Hist
 
-	c = int(getr())
+	c := int(getr())
 	if c == 'g' {
 		goto go_
 	}
 	if c != 'l' {
 		goto out
 	}
-	for i = 1; i < 5; i++ {
+	for i := 1; i < 5; i++ {
 		c = int(getr())
 		if c != int("line "[i]) {
 			goto out
@@ -1597,7 +1562,7 @@ func getlinepragma() int {
 
 	// try to avoid allocating file name over and over
 	name = lexbuf.String()[:linep-1]
-	for h = Ctxt.Hist; h != nil; h = h.Link {
+	for h := Ctxt.Hist; h != nil; h = h.Link {
 		if h.Name != "" && h.Name == name {
 			linehist(h.Name, int32(n), 0)
 			goto out
@@ -1708,20 +1673,17 @@ func getquoted(pp *string) (string, bool) {
 // Copied nearly verbatim from the C compiler's #pragma parser.
 // TODO: Rewrite more cleanly once the compiler is written in Go.
 func pragcgo(text string) {
-	var local string
-	var remote string
-	var p string
 	var q string
-	var verb string
 
 	if i := strings.Index(text, " "); i >= 0 {
 		text, q = text[:i], text[i:]
 	}
 
-	verb = text[3:] // skip "go:"
+	verb := text[3:] // skip "go:"
 
 	if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
 		var ok bool
+		var p string
 		p, ok = getquoted(&q)
 		if !ok {
 			goto err1
@@ -1738,7 +1700,8 @@ func pragcgo(text string) {
 		verb = "cgo_export_dynamic"
 	}
 	if verb == "cgo_export_static" || verb == "cgo_export_dynamic" {
-		local = getimpsym(&q)
+		local := getimpsym(&q)
+		var remote string
 		if local == "" {
 			goto err2
 		}
@@ -1761,7 +1724,9 @@ func pragcgo(text string) {
 
 	if verb == "cgo_import_dynamic" || verb == "dynimport" {
 		var ok bool
-		local = getimpsym(&q)
+		local := getimpsym(&q)
+		var p string
+		var remote string
 		if local == "" {
 			goto err3
 		}
@@ -1792,7 +1757,7 @@ func pragcgo(text string) {
 	}
 
 	if verb == "cgo_import_static" {
-		local = getimpsym(&q)
+		local := getimpsym(&q)
 		if local == "" || more(&q) {
 			goto err4
 		}
@@ -1806,6 +1771,7 @@ func pragcgo(text string) {
 
 	if verb == "cgo_ldflag" {
 		var ok bool
+		var p string
 		p, ok = getquoted(&q)
 		if !ok {
 			goto err5
@@ -1847,9 +1813,7 @@ func yyparse() {
 }
 
 func yylex(yylval *yySymType) int32 {
-	var lx int
-
-	lx = int(_yylex(yylval))
+	lx := int(_yylex(yylval))
 
 	if curio.nlsemi != 0 && lx == EOF {
 		// Treat EOF as "end of line" for the purposes
@@ -1883,11 +1847,7 @@ func yylex(yylval *yySymType) int32 {
 }
 
 func getc() int {
-	var c int
-	var c1 int
-	var c2 int
-
-	c = curio.peekc
+	c := curio.peekc
 	if c != 0 {
 		curio.peekc = curio.peekc1
 		curio.peekc1 = 0
@@ -1902,6 +1862,8 @@ func getc() int {
 			curio.cp = curio.cp[1:]
 		}
 	} else {
+		var c1 int
+		var c2 int
 	loop:
 		c = obj.Bgetc(curio.bin)
 		if c == 0xef {
@@ -1974,14 +1936,9 @@ func getr() int32 {
 }
 
 func escchar(e int, escflg *int, val *int64) bool {
-	var i int
-	var u int
-	var c int
-	var l int64
-
 	*escflg = 0
 
-	c = int(getr())
+	c := int(getr())
 	switch c {
 	case EOF:
 		Yyerror("eof in string")
@@ -2002,8 +1959,10 @@ func escchar(e int, escflg *int, val *int64) bool {
 		return false
 	}
 
-	u = 0
+	u := 0
 	c = int(getr())
+	var l int64
+	var i int
 	switch c {
 	case 'x':
 		*escflg = 1 // it's a byte
@@ -2091,7 +2050,7 @@ hex:
 
 oct:
 	l = int64(c) - '0'
-	for i = 2; i > 0; i-- {
+	for i := 2; i > 0; i-- {
 		c = getc()
 		if c >= '0' && c <= '7' {
 			l = l*8 + int64(c) - '0'
@@ -2482,19 +2441,17 @@ var syms = []struct {
 }
 
 func lexinit() {
-	var i int
 	var lex int
 	var s *Sym
 	var s1 *Sym
 	var t *Type
 	var etype int
-	var v Val
 
 	/*
 	 * initialize basic types array
 	 * initialize known symbols
 	 */
-	for i = 0; i < len(syms); i++ {
+	for i := 0; i < len(syms); i++ {
 		lex = syms[i].lexical
 		s = Lookup(syms[i].name)
 		s.Lexical = uint16(lex)
@@ -2568,33 +2525,26 @@ func lexinit() {
 
 	Types[TNIL] = typ(TNIL)
 	s = Pkglookup("nil", builtinpkg)
+	var v Val
 	v.Ctype = CTNIL
 	s.Def = nodlit(v)
 	s.Def.Sym = s
 }
 
 func lexinit1() {
-	var s *Sym
-	var s1 *Sym
-	var t *Type
-	var f *Type
-	var rcvr *Type
-	var in *Type
-	var out *Type
-
 	// t = interface { Error() string }
-	rcvr = typ(TSTRUCT)
+	rcvr := typ(TSTRUCT)
 
 	rcvr.Type = typ(TFIELD)
 	rcvr.Type.Type = Ptrto(typ(TSTRUCT))
 	rcvr.Funarg = 1
-	in = typ(TSTRUCT)
+	in := typ(TSTRUCT)
 	in.Funarg = 1
-	out = typ(TSTRUCT)
+	out := typ(TSTRUCT)
 	out.Type = typ(TFIELD)
 	out.Type.Type = Types[TSTRING]
 	out.Funarg = 1
-	f = typ(TFUNC)
+	f := typ(TFUNC)
 	*getthis(f) = rcvr
 	*Getoutarg(f) = out
 	*getinarg(f) = in
@@ -2602,16 +2552,16 @@ func lexinit1() {
 	f.Intuple = 0
 	f.Outnamed = 0
 	f.Outtuple = 1
-	t = typ(TINTER)
+	t := typ(TINTER)
 	t.Type = typ(TFIELD)
 	t.Type.Sym = Lookup("Error")
 	t.Type.Type = f
 
 	// error type
-	s = Lookup("error")
+	s := Lookup("error")
 
 	s.Lexical = LNAME
-	s1 = Pkglookup("error", builtinpkg)
+	s1 := Pkglookup("error", builtinpkg)
 	errortype = t
 	errortype.Sym = s1
 	s1.Lexical = LNAME
@@ -2643,7 +2593,6 @@ func lexfini() {
 	var lex int
 	var etype int
 	var i int
-	var v Val
 
 	for i = 0; i < len(syms); i++ {
 		lex = syms[i].lexical
@@ -2701,6 +2650,7 @@ func lexfini() {
 
 	s = Lookup("nil")
 	if s.Def == nil {
+		var v Val
 		v.Ctype = CTNIL
 		s.Def = nodlit(v)
 		s.Def.Sym = s
@@ -2920,9 +2870,7 @@ var lexn = []struct {
 var lexname_buf string
 
 func lexname(lex int) string {
-	var i int
-
-	for i = 0; i < len(lexn); i++ {
+	for i := 0; i < len(lexn); i++ {
 		if lexn[i].lex == lex {
 			return lexn[i].name
 		}
@@ -3131,15 +3079,13 @@ var yytfix = []struct {
 }
 
 func pkgnotused(lineno int, path_ *Strlit, name string) {
-	var elem string
-
 	// If the package was imported with a name other than the final
 	// import path element, show it explicitly in the error message.
 	// Note that this handles both renamed imports and imports of
 	// packages containing unconventional package declarations.
 	// Note that this uses / always, even on Windows, because Go import
 	// paths always use forward slashes.
-	elem = path_.S
+	elem := path_.S
 	if i := strings.LastIndex(elem, "/"); i >= 0 {
 		elem = elem[i+1:]
 	}
@@ -3151,10 +3097,6 @@ func pkgnotused(lineno int, path_ *Strlit, name string) {
 }
 
 func mkpackage(pkgname string) {
-	var s *Sym
-	var h int32
-	var p string
-
 	if localpkg.Name == "" {
 		if pkgname == "_" {
 			Yyerror("invalid package name _")
@@ -3164,7 +3106,8 @@ func mkpackage(pkgname string) {
 		if pkgname != localpkg.Name {
 			Yyerror("package %s; expected %s", pkgname, localpkg.Name)
 		}
-		for h = 0; h < NHASH; h++ {
+		var s *Sym
+		for h := int32(0); h < NHASH; h++ {
 			for s = hash[h]; s != nil; s = s.Link {
 				if s.Def == nil || s.Pkg != localpkg {
 					continue
@@ -3198,7 +3141,7 @@ func mkpackage(pkgname string) {
 	}
 
 	if outfile == "" {
-		p = infile
+		p := infile
 		if i := strings.LastIndex(p, "/"); i >= 0 {
 			p = p[i+1:]
 		}
diff --git a/src/cmd/internal/gc/md5.go b/src/cmd/internal/gc/md5.go
index 862fdd553e7f6081a5ce08fbcf11724c566d6447..3b5190013ea2614f0fff40fafb3a19cf23a618e9 100644
--- a/src/cmd/internal/gc/md5.go
+++ b/src/cmd/internal/gc/md5.go
@@ -39,16 +39,13 @@ func md5reset(d *MD5) {
 }
 
 func md5write(d *MD5, p []byte, nn int) {
-	var i int
-	var n int
-
 	d.len += uint64(nn)
 	if d.nx > 0 {
-		n = nn
+		n := nn
 		if n > _Chunk-d.nx {
 			n = _Chunk - d.nx
 		}
-		for i = 0; i < n; i++ {
+		for i := 0; i < n; i++ {
 			d.x[d.nx+i] = uint8(p[i])
 		}
 		d.nx += n
@@ -61,11 +58,11 @@ func md5write(d *MD5, p []byte, nn int) {
 		nn -= n
 	}
 
-	n = md5block(d, p, nn)
+	n := md5block(d, p, nn)
 	p = p[n:]
 	nn -= n
 	if nn > 0 {
-		for i = 0; i < nn; i++ {
+		for i := 0; i < nn; i++ {
 			d.x[i] = uint8(p[i])
 		}
 		d.nx = nn
@@ -73,14 +70,10 @@ func md5write(d *MD5, p []byte, nn int) {
 }
 
 func md5sum(d *MD5, hi *uint64) uint64 {
-	var tmp [64]uint8
-	var i int
-	var len uint64
-
 	// Padding.  Add a 1 bit and 0 bits until 56 bytes mod 64.
-	len = d.len
+	len := d.len
 
-	tmp = [64]uint8{}
+	tmp := [64]uint8{}
 	tmp[0] = 0x80
 	if len%64 < 56 {
 		md5write(d, tmp[:], int(56-len%64))
@@ -91,7 +84,7 @@ func md5sum(d *MD5, hi *uint64) uint64 {
 	// Length in bits.
 	len <<= 3
 
-	for i = 0; i < 8; i++ {
+	for i := 0; i < 8; i++ {
 		tmp[i] = uint8(len >> uint(8*i))
 	}
 	md5write(d, tmp[:], 8)
@@ -194,24 +187,19 @@ var shift3 = []uint32{4, 11, 16, 23}
 var shift4 = []uint32{6, 10, 15, 21}
 
 func md5block(dig *MD5, p []byte, nn int) int {
-	var a uint32
-	var b uint32
-	var c uint32
-	var d uint32
 	var aa uint32
 	var bb uint32
 	var cc uint32
 	var dd uint32
 	var i int
 	var j int
-	var n int
 	var X [16]uint32
 
-	a = dig.s[0]
-	b = dig.s[1]
-	c = dig.s[2]
-	d = dig.s[3]
-	n = 0
+	a := dig.s[0]
+	b := dig.s[1]
+	c := dig.s[2]
+	d := dig.s[3]
+	n := 0
 
 	for nn >= _Chunk {
 		aa = a
@@ -226,14 +214,10 @@ func md5block(dig *MD5, p []byte, nn int) int {
 
 		// Round 1.
 		for i = 0; i < 16; i++ {
-			var x uint32
-			var t uint32
-			var s uint32
-			var f uint32
-			x = uint32(i)
-			t = uint32(i)
-			s = shift1[i%4]
-			f = ((c ^ d) & b) ^ d
+			x := uint32(i)
+			t := uint32(i)
+			s := shift1[i%4]
+			f := ((c ^ d) & b) ^ d
 			a += f + X[x] + table[t]
 			a = a<<s | a>>(32-s)
 			a += b
@@ -247,15 +231,10 @@ func md5block(dig *MD5, p []byte, nn int) int {
 
 		// Round 2.
 		for i = 0; i < 16; i++ {
-			var x uint32
-			var t uint32
-			var s uint32
-			var g uint32
-
-			x = (1 + 5*uint32(i)) % 16
-			t = 16 + uint32(i)
-			s = shift2[i%4]
-			g = ((b ^ c) & d) ^ c
+			x := (1 + 5*uint32(i)) % 16
+			t := 16 + uint32(i)
+			s := shift2[i%4]
+			g := ((b ^ c) & d) ^ c
 			a += g + X[x] + table[t]
 			a = a<<s | a>>(32-s)
 			a += b
@@ -269,15 +248,10 @@ func md5block(dig *MD5, p []byte, nn int) int {
 
 		// Round 3.
 		for i = 0; i < 16; i++ {
-			var x uint32
-			var t uint32
-			var s uint32
-			var h uint32
-
-			x = (5 + 3*uint32(i)) % 16
-			t = 32 + uint32(i)
-			s = shift3[i%4]
-			h = b ^ c ^ d
+			x := (5 + 3*uint32(i)) % 16
+			t := 32 + uint32(i)
+			s := shift3[i%4]
+			h := b ^ c ^ d
 			a += h + X[x] + table[t]
 			a = a<<s | a>>(32-s)
 			a += b
@@ -291,15 +265,10 @@ func md5block(dig *MD5, p []byte, nn int) int {
 
 		// Round 4.
 		for i = 0; i < 16; i++ {
-			var x uint32
-			var s uint32
-			var t uint32
-			var ii uint32
-
-			x = (7 * uint32(i)) % 16
-			s = shift4[i%4]
-			t = 48 + uint32(i)
-			ii = c ^ (b | ^d)
+			x := (7 * uint32(i)) % 16
+			s := shift4[i%4]
+			t := 48 + uint32(i)
+			ii := c ^ (b | ^d)
 			a += ii + X[x] + table[t]
 			a = a<<s | a>>(32-s)
 			a += b
diff --git a/src/cmd/internal/gc/mparith1.go b/src/cmd/internal/gc/mparith1.go
index 14b8620ad490a3daca45039111179f52c46c1c35..454b688f9745e1c1e680b6f6480fad3a04f0beab 100644
--- a/src/cmd/internal/gc/mparith1.go
+++ b/src/cmd/internal/gc/mparith1.go
@@ -13,19 +13,17 @@ import (
 /// uses arithmetic
 
 func mpcmpfixflt(a *Mpint, b *Mpflt) int {
-	var buf string
 	var c Mpflt
 
-	buf = fmt.Sprintf("%v", Bconv(a, 0))
+	buf := fmt.Sprintf("%v", Bconv(a, 0))
 	mpatoflt(&c, buf)
 	return mpcmpfltflt(&c, b)
 }
 
 func mpcmpfltfix(a *Mpflt, b *Mpint) int {
-	var buf string
 	var c Mpflt
 
-	buf = fmt.Sprintf("%v", Bconv(b, 0))
+	buf := fmt.Sprintf("%v", Bconv(b, 0))
 	mpatoflt(&c, buf)
 	return mpcmpfltflt(a, &c)
 }
@@ -133,11 +131,10 @@ func Mpmovefixflt(a *Mpflt, b *Mpint) {
 // convert (truncate) b to a.
 // return -1 (but still convert) if b was non-integer.
 func mpexactfltfix(a *Mpint, b *Mpflt) int {
-	var f Mpflt
-
 	*a = b.Val
 	Mpshiftfix(a, int(b.Exp))
 	if b.Exp < 0 {
+		var f Mpflt
 		f.Val = *a
 		f.Exp = 0
 		mpnorm(&f)
@@ -150,15 +147,12 @@ func mpexactfltfix(a *Mpint, b *Mpflt) int {
 }
 
 func mpmovefltfix(a *Mpint, b *Mpflt) int {
-	var f Mpflt
-	var i int
-
 	if mpexactfltfix(a, b) == 0 {
 		return 0
 	}
 
 	// try rounding down a little
-	f = *b
+	f := *b
 
 	f.Val.A[0] = 0
 	if mpexactfltfix(a, &f) == 0 {
@@ -166,7 +160,7 @@ func mpmovefltfix(a *Mpint, b *Mpflt) int {
 	}
 
 	// try rounding up a little
-	for i = 1; i < Mpprec; i++ {
+	for i := 1; i < Mpprec; i++ {
 		f.Val.A[i]++
 		if f.Val.A[i] != Mpbase {
 			break
@@ -209,12 +203,6 @@ func mppow10flt(a *Mpflt, p int) {
 }
 
 func mphextofix(a *Mpint, s string) {
-	var c int8
-	var d int
-	var bit int
-	var hexdigitp int
-	var end int
-
 	for s != "" && s[0] == '0' {
 		s = s[1:]
 	}
@@ -225,8 +213,11 @@ func mphextofix(a *Mpint, s string) {
 		return
 	}
 
-	end = len(s) - 1
-	for hexdigitp = end; hexdigitp >= 0; hexdigitp-- {
+	end := len(s) - 1
+	var c int8
+	var d int
+	var bit int
+	for hexdigitp := end; hexdigitp >= 0; hexdigitp-- {
 		c = int8(s[hexdigitp])
 		if c >= '0' && c <= '9' {
 			d = int(c) - '0'
@@ -252,25 +243,14 @@ func mphextofix(a *Mpint, s string) {
 // required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
 //
 func mpatoflt(a *Mpflt, as string) {
-	var b Mpflt
-	var dp int
-	var c int
-	var f int
-	var ef int
-	var ex int
-	var eb int
-	var base int
-	var s string
-	var start string
-
 	for as[0] == ' ' || as[0] == '\t' {
 		as = as[1:]
 	}
 
 	/* determine base */
-	s = as
+	s := as
 
-	base = -1
+	base := -1
 	for base == -1 {
 		if s == "" {
 			base = 10
@@ -296,14 +276,17 @@ func mpatoflt(a *Mpflt, as string) {
 	}
 
 	s = as
-	dp = 0 /* digits after decimal point */
-	f = 0  /* sign */
-	ex = 0 /* exponent */
-	eb = 0 /* binary point */
+	dp := 0 /* digits after decimal point */
+	f := 0  /* sign */
+	ex := 0 /* exponent */
+	eb := 0 /* binary point */
 
 	Mpmovecflt(a, 0.0)
+	var ef int
+	var c int
 	if base == 16 {
-		start = ""
+		start := ""
+		var c int
 		for {
 			c, _ = intstarstringplusplus(s)
 			if c == '-' {
@@ -437,6 +420,7 @@ func mpatoflt(a *Mpflt, as string) {
 	}
 	if mpcmpfltc(a, 0.0) != 0 {
 		if ex >= dp {
+			var b Mpflt
 			mppow10flt(&b, ex-dp)
 			mpmulfltflt(a, &b)
 		} else {
@@ -444,6 +428,7 @@ func mpatoflt(a *Mpflt, as string) {
 			if dp-ex >= 1<<(32-3) || int(int16(4*(dp-ex))) != 4*(dp-ex) {
 				Mpmovecflt(a, 0.0)
 			} else {
+				var b Mpflt
 				mppow10flt(&b, dp-ex)
 				mpdivfltflt(a, &b)
 			}
@@ -466,12 +451,10 @@ bad:
 //
 func mpatofix(a *Mpint, as string) {
 	var c int
-	var f int
-	var s string
 	var s0 string
 
-	s = as
-	f = 0
+	s := as
+	f := 0
 	Mpmovecfix(a, 0)
 
 	c, s = intstarstringplusplus(s)
@@ -555,29 +538,24 @@ bad:
 }
 
 func Bconv(xval *Mpint, flag int) string {
-	var buf [500]byte
-	var p int
-	var fp string
-
 	var q Mpint
-	var r Mpint
-	var ten Mpint
-	var sixteen Mpint
-	var f int
-	var digit int
 
 	mpmovefixfix(&q, xval)
-	f = 0
+	f := 0
 	if mptestfix(&q) < 0 {
 		f = 1
 		mpnegfix(&q)
 	}
 
-	p = len(buf)
+	var buf [500]byte
+	p := len(buf)
+	var r Mpint
 	if flag&obj.FmtSharp != 0 /*untyped*/ {
 		// Hexadecimal
+		var sixteen Mpint
 		Mpmovecfix(&sixteen, 16)
 
+		var digit int
 		for {
 			mpdivmodfixfix(&q, &r, &q, &sixteen)
 			digit = int(Mpgetfix(&r))
@@ -599,6 +577,7 @@ func Bconv(xval *Mpint, flag int) string {
 		buf[p] = '0'
 	} else {
 		// Decimal
+		var ten Mpint
 		Mpmovecfix(&ten, 10)
 
 		for {
@@ -615,26 +594,20 @@ func Bconv(xval *Mpint, flag int) string {
 		p--
 		buf[p] = '-'
 	}
+	var fp string
 	fp += string(buf[p:])
 	return fp
 }
 
 func Fconv(fvp *Mpflt, flag int) string {
-	var buf string
-	var fp string
-
-	var fv Mpflt
-	var d float64
-	var dexp float64
-	var exp int
-
 	if flag&obj.FmtSharp != 0 /*untyped*/ {
 		// alternate form - decimal for error messages.
 		// for well in range, convert to double and use print's %g
-		exp = int(fvp.Exp) + sigfig(fvp)*Mpscale
+		exp := int(fvp.Exp) + sigfig(fvp)*Mpscale
 
+		var fp string
 		if -900 < exp && exp < 900 {
-			d = mpgetflt(fvp)
+			d := mpgetflt(fvp)
 			if d >= 0 && (flag&obj.FmtSign != 0 /*untyped*/) {
 				fp += fmt.Sprintf("+")
 			}
@@ -644,15 +617,15 @@ func Fconv(fvp *Mpflt, flag int) string {
 
 		// very out of range. compute decimal approximation by hand.
 		// decimal exponent
-		dexp = float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
+		dexp := float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
 		exp = int(dexp)
 
 		// decimal mantissa
-		fv = *fvp
+		fv := *fvp
 
 		fv.Val.Neg = 0
 		fv.Exp = 0
-		d = mpgetflt(&fv)
+		d := mpgetflt(&fv)
 		d *= math.Pow(10, dexp-float64(exp))
 		for d >= 9.99995 {
 			d /= 10
@@ -668,6 +641,8 @@ func Fconv(fvp *Mpflt, flag int) string {
 		return fp
 	}
 
+	var fv Mpflt
+	var buf string
 	if sigfig(fvp) == 0 {
 		buf = fmt.Sprintf("0p+0")
 		goto out
@@ -693,6 +668,7 @@ func Fconv(fvp *Mpflt, flag int) string {
 	buf = fmt.Sprintf("%vp-%d", Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
 
 out:
+	var fp string
 	fp += buf
 	return fp
 }
diff --git a/src/cmd/internal/gc/mparith2.go b/src/cmd/internal/gc/mparith2.go
index 057585c65c4aa82ab05d5b81b15a9cab5a891650..c9c9230bd5bf095c351770f9ce917360409cc32c 100644
--- a/src/cmd/internal/gc/mparith2.go
+++ b/src/cmd/internal/gc/mparith2.go
@@ -9,11 +9,8 @@ package gc
 // words of the argument
 //
 func mplen(a *Mpint) int {
-	var i int
-	var n int
-
-	n = -1
-	for i = 0; i < Mpprec; i++ {
+	n := -1
+	for i := 0; i < Mpprec; i++ {
 		if a.A[i] != 0 {
 			n = i
 		}
@@ -28,11 +25,9 @@ func mplen(a *Mpint) int {
 //
 func mplsh(a *Mpint, quiet int) {
 	var x int
-	var i int
-	var c int
 
-	c = 0
-	for i = 0; i < Mpprec; i++ {
+	c := 0
+	for i := 0; i < Mpprec; i++ {
 		x = (a.A[i] << 1) + c
 		c = 0
 		if x >= Mpbase {
@@ -54,9 +49,7 @@ func mplsh(a *Mpint, quiet int) {
 // ignores sign
 //
 func mplshw(a *Mpint, quiet int) {
-	var i int
-
-	i = Mpprec - 1
+	i := Mpprec - 1
 	if a.A[i] != 0 {
 		a.Ovf = 1
 		if quiet == 0 {
@@ -76,13 +69,10 @@ func mplshw(a *Mpint, quiet int) {
 //
 func mprsh(a *Mpint) {
 	var x int
-	var lo int
-	var i int
-	var c int
 
-	c = 0
-	lo = a.A[0] & 1
-	for i = Mpprec - 1; i >= 0; i-- {
+	c := 0
+	lo := a.A[0] & 1
+	for i := Mpprec - 1; i >= 0; i-- {
 		x = a.A[i]
 		a.A[i] = (x + c) >> 1
 		c = 0
@@ -101,10 +91,9 @@ func mprsh(a *Mpint) {
 // ignores sign and overflow
 //
 func mprshw(a *Mpint) {
-	var lo int
 	var i int
 
-	lo = a.A[0]
+	lo := a.A[0]
 	for i = 0; i < Mpprec-1; i++ {
 		a.A[i] = a.A[i+1]
 	}
@@ -119,9 +108,6 @@ func mprshw(a *Mpint) {
 // return the sign of (abs(a)-abs(b))
 //
 func mpcmp(a *Mpint, b *Mpint) int {
-	var x int
-	var i int
-
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in cmp")
@@ -129,7 +115,8 @@ func mpcmp(a *Mpint, b *Mpint) int {
 		return 0
 	}
 
-	for i = Mpprec - 1; i >= 0; i-- {
+	var x int
+	for i := Mpprec - 1; i >= 0; i-- {
 		x = a.A[i] - b.A[i]
 		if x > 0 {
 			return +1
@@ -148,11 +135,9 @@ func mpcmp(a *Mpint, b *Mpint) int {
 //
 func mpneg(a *Mpint) {
 	var x int
-	var i int
-	var c int
 
-	c = 0
-	for i = 0; i < Mpprec; i++ {
+	c := 0
+	for i := 0; i < Mpprec; i++ {
 		x = -a.A[i] - c
 		c = 0
 		if x < 0 {
@@ -193,10 +178,6 @@ func Mpshiftfix(a *Mpint, s int) {
 /// implements fix arihmetic
 
 func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
-	var i int
-	var c int
-	var x int
-
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpaddxx")
@@ -205,13 +186,14 @@ func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
 		return
 	}
 
-	c = 0
+	c := 0
+	var x int
 	if a.Neg != b.Neg {
 		goto sub
 	}
 
 	// perform a+b
-	for i = 0; i < Mpprec; i++ {
+	for i := 0; i < Mpprec; i++ {
 		x = a.A[i] + b.A[i] + c
 		c = 0
 		if x >= Mpbase {
@@ -236,7 +218,8 @@ sub:
 		Mpmovecfix(a, 0)
 
 	case 1:
-		for i = 0; i < Mpprec; i++ {
+		var x int
+		for i := 0; i < Mpprec; i++ {
 			x = a.A[i] - b.A[i] - c
 			c = 0
 			if x < 0 {
@@ -249,7 +232,8 @@ sub:
 
 	case -1:
 		a.Neg ^= 1
-		for i = 0; i < Mpprec; i++ {
+		var x int
+		for i := 0; i < Mpprec; i++ {
 			x = b.A[i] - a.A[i] - c
 			c = 0
 			if x < 0 {
@@ -263,15 +247,6 @@ sub:
 }
 
 func mpmulfixfix(a *Mpint, b *Mpint) {
-	var i int
-	var j int
-	var na int
-	var nb int
-	var x int
-	var s Mpint
-	var q Mpint
-	var c *Mpint
-
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpmulfixfix")
@@ -282,9 +257,11 @@ func mpmulfixfix(a *Mpint, b *Mpint) {
 
 	// pick the smaller
 	// to test for bits
-	na = mplen(a)
+	na := mplen(a)
 
-	nb = mplen(b)
+	nb := mplen(b)
+	var s Mpint
+	var c *Mpint
 	if na > nb {
 		mpmovefixfix(&s, a)
 		c = b
@@ -296,8 +273,11 @@ func mpmulfixfix(a *Mpint, b *Mpint) {
 
 	s.Neg = 0
 
+	var q Mpint
 	Mpmovecfix(&q, 0)
-	for i = 0; i < na; i++ {
+	var j int
+	var x int
+	for i := 0; i < na; i++ {
 		x = c.A[i]
 		for j = 0; j < Mpscale; j++ {
 			if x&1 != 0 {
@@ -326,12 +306,6 @@ out:
 }
 
 func mpmulfract(a *Mpint, b *Mpint) {
-	var i int
-	var j int
-	var x int
-	var s Mpint
-	var q Mpint
-
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpmulflt")
@@ -340,16 +314,19 @@ func mpmulfract(a *Mpint, b *Mpint) {
 		return
 	}
 
+	var s Mpint
 	mpmovefixfix(&s, b)
 	s.Neg = 0
+	var q Mpint
 	Mpmovecfix(&q, 0)
 
-	i = Mpprec - 1
-	x = a.A[i]
+	i := Mpprec - 1
+	x := a.A[i]
 	if x != 0 {
 		Yyerror("mpmulfract not normal")
 	}
 
+	var j int
 	for i--; i >= 0; i-- {
 		x = a.A[i]
 		if x == 0 {
@@ -374,10 +351,7 @@ func mpmulfract(a *Mpint, b *Mpint) {
 }
 
 func mporfixfix(a *Mpint, b *Mpint) {
-	var i int
-	var x int
-
-	x = 0
+	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mporfixfix")
@@ -396,7 +370,7 @@ func mporfixfix(a *Mpint, b *Mpint) {
 		mpneg(b)
 	}
 
-	for i = 0; i < Mpprec; i++ {
+	for i := 0; i < Mpprec; i++ {
 		x = a.A[i] | b.A[i]
 		a.A[i] = x
 	}
@@ -411,10 +385,7 @@ func mporfixfix(a *Mpint, b *Mpint) {
 }
 
 func mpandfixfix(a *Mpint, b *Mpint) {
-	var i int
-	var x int
-
-	x = 0
+	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpandfixfix")
@@ -433,7 +404,7 @@ func mpandfixfix(a *Mpint, b *Mpint) {
 		mpneg(b)
 	}
 
-	for i = 0; i < Mpprec; i++ {
+	for i := 0; i < Mpprec; i++ {
 		x = a.A[i] & b.A[i]
 		a.A[i] = x
 	}
@@ -448,10 +419,7 @@ func mpandfixfix(a *Mpint, b *Mpint) {
 }
 
 func mpandnotfixfix(a *Mpint, b *Mpint) {
-	var i int
-	var x int
-
-	x = 0
+	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpandnotfixfix")
@@ -470,7 +438,7 @@ func mpandnotfixfix(a *Mpint, b *Mpint) {
 		mpneg(b)
 	}
 
-	for i = 0; i < Mpprec; i++ {
+	for i := 0; i < Mpprec; i++ {
 		x = a.A[i] &^ b.A[i]
 		a.A[i] = x
 	}
@@ -485,10 +453,7 @@ func mpandnotfixfix(a *Mpint, b *Mpint) {
 }
 
 func mpxorfixfix(a *Mpint, b *Mpint) {
-	var i int
-	var x int
-
-	x = 0
+	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mporfixfix")
@@ -507,7 +472,7 @@ func mpxorfixfix(a *Mpint, b *Mpint) {
 		mpneg(b)
 	}
 
-	for i = 0; i < Mpprec; i++ {
+	for i := 0; i < Mpprec; i++ {
 		x = a.A[i] ^ b.A[i]
 		a.A[i] = x
 	}
@@ -522,8 +487,6 @@ func mpxorfixfix(a *Mpint, b *Mpint) {
 }
 
 func mplshfixfix(a *Mpint, b *Mpint) {
-	var s int64
-
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mporfixfix")
@@ -533,7 +496,7 @@ func mplshfixfix(a *Mpint, b *Mpint) {
 		return
 	}
 
-	s = Mpgetfix(b)
+	s := Mpgetfix(b)
 	if s < 0 || s >= Mpprec*Mpscale {
 		Yyerror("stupid shift: %d", s)
 		Mpmovecfix(a, 0)
@@ -544,8 +507,6 @@ func mplshfixfix(a *Mpint, b *Mpint) {
 }
 
 func mprshfixfix(a *Mpint, b *Mpint) {
-	var s int64
-
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mprshfixfix")
@@ -555,7 +516,7 @@ func mprshfixfix(a *Mpint, b *Mpint) {
 		return
 	}
 
-	s = Mpgetfix(b)
+	s := Mpgetfix(b)
 	if s < 0 || s >= Mpprec*Mpscale {
 		Yyerror("stupid shift: %d", s)
 		if a.Neg != 0 {
@@ -574,8 +535,6 @@ func mpnegfix(a *Mpint) {
 }
 
 func Mpgetfix(a *Mpint) int64 {
-	var v int64
-
 	if a.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("constant overflow")
@@ -583,7 +542,7 @@ func Mpgetfix(a *Mpint) int64 {
 		return 0
 	}
 
-	v = int64(uint64(a.A[0]))
+	v := int64(uint64(a.A[0]))
 	v |= int64(uint64(a.A[1]) << Mpscale)
 	v |= int64(uint64(a.A[2]) << (Mpscale + Mpscale))
 	if a.Neg != 0 {
@@ -593,19 +552,16 @@ func Mpgetfix(a *Mpint) int64 {
 }
 
 func Mpmovecfix(a *Mpint, c int64) {
-	var i int
-	var x int64
-
 	a.Neg = 0
 	a.Ovf = 0
 
-	x = c
+	x := c
 	if x < 0 {
 		a.Neg = 1
 		x = int64(-uint64(x))
 	}
 
-	for i = 0; i < Mpprec; i++ {
+	for i := 0; i < Mpprec; i++ {
 		a.A[i] = int(x & Mpmask)
 		x >>= Mpscale
 	}
@@ -613,11 +569,9 @@ func Mpmovecfix(a *Mpint, c int64) {
 
 func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
 	var i int
-	var ns int
-	var ds int
 
-	ns = int(n.Neg)
-	ds = int(d.Neg)
+	ns := int(n.Neg)
+	ds := int(d.Neg)
 	n.Neg = 0
 	d.Neg = 0
 
@@ -664,9 +618,7 @@ func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
 }
 
 func mpiszero(a *Mpint) bool {
-	var i int
-
-	for i = Mpprec - 1; i >= 0; i-- {
+	for i := Mpprec - 1; i >= 0; i-- {
 		if a.A[i] != 0 {
 			return false
 		}
@@ -677,19 +629,17 @@ func mpiszero(a *Mpint) bool {
 func mpdivfract(a *Mpint, b *Mpint) {
 	var n Mpint
 	var d Mpint
-	var i int
 	var j int
-	var neg int
 	var x int
 
 	mpmovefixfix(&n, a) // numerator
 	mpmovefixfix(&d, b) // denominator
 
-	neg = int(n.Neg) ^ int(d.Neg)
+	neg := int(n.Neg) ^ int(d.Neg)
 
 	n.Neg = 0
 	d.Neg = 0
-	for i = Mpprec - 1; i >= 0; i-- {
+	for i := Mpprec - 1; i >= 0; i-- {
 		x = 0
 		for j = 0; j < Mpscale; j++ {
 			x <<= 1
@@ -711,10 +661,9 @@ func mpdivfract(a *Mpint, b *Mpint) {
 
 func mptestfix(a *Mpint) int {
 	var b Mpint
-	var r int
 
 	Mpmovecfix(&b, 0)
-	r = mpcmp(a, &b)
+	r := mpcmp(a, &b)
 	if a.Neg != 0 {
 		if r > 0 {
 			return -1
diff --git a/src/cmd/internal/gc/mparith3.go b/src/cmd/internal/gc/mparith3.go
index 61bf9e9aadc425efdf2e99da4f31033853c14915..103c53dac4f269844e93c038010031af694f6336 100644
--- a/src/cmd/internal/gc/mparith3.go
+++ b/src/cmd/internal/gc/mparith3.go
@@ -49,11 +49,7 @@ func mpsetexp(a *Mpflt, exp int) {
  * word of the number to Mpnorm
  */
 func mpnorm(a *Mpflt) {
-	var s int
-	var os int
-	var x int
-
-	os = sigfig(a)
+	os := sigfig(a)
 	if os == 0 {
 		// zero
 		a.Exp = 0
@@ -63,9 +59,9 @@ func mpnorm(a *Mpflt) {
 	}
 
 	// this will normalize to the nearest word
-	x = a.Val.A[os-1]
+	x := a.Val.A[os-1]
 
-	s = (Mpnorm - os) * Mpscale
+	s := (Mpnorm - os) * Mpscale
 
 	// further normalize to the nearest bit
 	for {
@@ -91,16 +87,13 @@ func mpnorm(a *Mpflt) {
 /// implements float arihmetic
 
 func mpaddfltflt(a *Mpflt, b *Mpflt) {
-	var sa int
-	var sb int
-	var s int
-	var c Mpflt
-
 	if Mpdebug != 0 /*TypeKind(100016)*/ {
 		fmt.Printf("\n%v + %v", Fconv(a, 0), Fconv(b, 0))
 	}
 
-	sa = sigfig(a)
+	sa := sigfig(a)
+	var s int
+	var sb int
 	if sa == 0 {
 		mpmovefltflt(a, b)
 		goto out
@@ -114,6 +107,7 @@ func mpaddfltflt(a *Mpflt, b *Mpflt) {
 	s = int(a.Exp) - int(b.Exp)
 	if s > 0 {
 		// a is larger, shift b right
+		var c Mpflt
 		mpmovefltflt(&c, b)
 
 		Mpshiftfix(&c.Val, -s)
@@ -140,14 +134,11 @@ out:
 }
 
 func mpmulfltflt(a *Mpflt, b *Mpflt) {
-	var sa int
-	var sb int
-
 	if Mpdebug != 0 /*TypeKind(100016)*/ {
 		fmt.Printf("%v\n * %v\n", Fconv(a, 0), Fconv(b, 0))
 	}
 
-	sa = sigfig(a)
+	sa := sigfig(a)
 	if sa == 0 {
 		// zero
 		a.Exp = 0
@@ -156,7 +147,7 @@ func mpmulfltflt(a *Mpflt, b *Mpflt) {
 		return
 	}
 
-	sb = sigfig(b)
+	sb := sigfig(b)
 	if sb == 0 {
 		// zero
 		mpmovefltflt(a, b)
@@ -174,15 +165,11 @@ func mpmulfltflt(a *Mpflt, b *Mpflt) {
 }
 
 func mpdivfltflt(a *Mpflt, b *Mpflt) {
-	var sa int
-	var sb int
-	var c Mpflt
-
 	if Mpdebug != 0 /*TypeKind(100016)*/ {
 		fmt.Printf("%v\n / %v\n", Fconv(a, 0), Fconv(b, 0))
 	}
 
-	sb = sigfig(b)
+	sb := sigfig(b)
 	if sb == 0 {
 		// zero and ovfl
 		a.Exp = 0
@@ -193,7 +180,7 @@ func mpdivfltflt(a *Mpflt, b *Mpflt) {
 		return
 	}
 
-	sa = sigfig(a)
+	sa := sigfig(a)
 	if sa == 0 {
 		// zero
 		a.Exp = 0
@@ -203,6 +190,7 @@ func mpdivfltflt(a *Mpflt, b *Mpflt) {
 	}
 
 	// adjust b to top
+	var c Mpflt
 	mpmovefltflt(&c, b)
 
 	Mpshiftfix(&c.Val, Mpscale)
@@ -219,18 +207,11 @@ func mpdivfltflt(a *Mpflt, b *Mpflt) {
 }
 
 func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
-	var s int
-	var i int
-	var e int
-	var minexp int
-	var v uint64
-	var f float64
-
 	if a.Val.Ovf != 0 && nsavederrors+nerrors == 0 {
 		Yyerror("mpgetflt ovf")
 	}
 
-	s = sigfig(a)
+	s := sigfig(a)
 	if s == 0 {
 		return 0
 	}
@@ -252,7 +233,8 @@ func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
 	// pick up the mantissa, a rounding bit, and a tie-breaking bit in a uvlong
 	s = prec + 2
 
-	v = 0
+	v := uint64(0)
+	var i int
 	for i = Mpnorm - 1; s >= Mpscale; i-- {
 		v = v<<Mpscale | uint64(a.Val.A[i])
 		s -= Mpscale
@@ -273,11 +255,11 @@ func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
 	}
 
 	// gradual underflow
-	e = Mpnorm*Mpscale + int(a.Exp) - prec
+	e := Mpnorm*Mpscale + int(a.Exp) - prec
 
-	minexp = bias + 1 - prec + 1
+	minexp := bias + 1 - prec + 1
 	if e < minexp {
-		s = minexp - e
+		s := minexp - e
 		if s > prec+1 {
 			s = prec + 1
 		}
@@ -294,7 +276,7 @@ func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
 	v += v & 1
 	v >>= 2
 
-	f = float64(v)
+	f := float64(v)
 	f = math.Ldexp(f, e)
 
 	if a.Val.Neg != 0 {
@@ -313,15 +295,14 @@ func mpgetflt32(a *Mpflt) float64 {
 }
 
 func Mpmovecflt(a *Mpflt, c float64) {
-	var i int
-	var f float64
-	var l int
-
 	if Mpdebug != 0 /*TypeKind(100016)*/ {
 		fmt.Printf("\nconst %g", c)
 	}
 	Mpmovecfix(&a.Val, 0)
 	a.Exp = 0
+	var f float64
+	var l int
+	var i int
 	if c == 0 {
 		goto out
 	}
@@ -333,7 +314,7 @@ func Mpmovecflt(a *Mpflt, c float64) {
 	f, i = math.Frexp(c)
 	a.Exp = int16(i)
 
-	for i = 0; i < 10; i++ {
+	for i := 0; i < 10; i++ {
 		f = f * Mpbase
 		l = int(math.Floor(f))
 		f = f - float64(l)
@@ -357,12 +338,10 @@ func mpnegflt(a *Mpflt) {
 }
 
 func mptestflt(a *Mpflt) int {
-	var s int
-
 	if Mpdebug != 0 /*TypeKind(100016)*/ {
 		fmt.Printf("\n%v?", Fconv(a, 0))
 	}
-	s = sigfig(a)
+	s := sigfig(a)
 	if s != 0 {
 		s = +1
 		if a.Val.Neg != 0 {
diff --git a/src/cmd/internal/gc/obj.go b/src/cmd/internal/gc/obj.go
index afaf87c4f5f95c4a654bd3a956245af340f026c6..27a18118324e8a2bd3c803877e718a0afb5760d8 100644
--- a/src/cmd/internal/gc/obj.go
+++ b/src/cmd/internal/gc/obj.go
@@ -21,13 +21,6 @@ func formathdr(arhdr []byte, name string, size int64) {
 }
 
 func dumpobj() {
-	var externs *NodeList
-	var tmp *NodeList
-	var arhdr [ArhdrSize]byte
-	var startobj int64
-	var size int64
-	var zero *Sym
-
 	var err error
 	bout, err = obj.Bopenw(outfile)
 	if err != nil {
@@ -36,7 +29,8 @@ func dumpobj() {
 		errorexit()
 	}
 
-	startobj = 0
+	startobj := int64(0)
+	var arhdr [ArhdrSize]byte
 	if writearchive != 0 {
 		obj.Bwritestring(bout, "!<arch>\n")
 		arhdr = [ArhdrSize]byte{}
@@ -49,7 +43,7 @@ func dumpobj() {
 
 	if writearchive != 0 {
 		obj.Bflush(bout)
-		size = obj.Boffset(bout) - startobj
+		size := obj.Boffset(bout) - startobj
 		if size&1 != 0 {
 			obj.Bputc(bout, 0)
 		}
@@ -77,7 +71,7 @@ func dumpobj() {
 
 	fmt.Fprintf(bout, "\n!\n")
 
-	externs = nil
+	externs := (*NodeList)(nil)
 	if externdcl != nil {
 		externs = externdcl.End
 	}
@@ -86,7 +80,7 @@ func dumpobj() {
 	dumptypestructs()
 
 	// Dump extra globals.
-	tmp = externdcl
+	tmp := externdcl
 
 	if externs != nil {
 		externdcl = externs.Next
@@ -94,7 +88,7 @@ func dumpobj() {
 	dumpglobls()
 	externdcl = tmp
 
-	zero = Pkglookup("zerovalue", Runtimepkg)
+	zero := Pkglookup("zerovalue", Runtimepkg)
 	ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
 
 	dumpdata()
@@ -102,7 +96,7 @@ func dumpobj() {
 
 	if writearchive != 0 {
 		obj.Bflush(bout)
-		size = obj.Boffset(bout) - startobj
+		size := obj.Boffset(bout) - startobj
 		if size&1 != 0 {
 			obj.Bputc(bout, 0)
 		}
@@ -117,10 +111,9 @@ func dumpobj() {
 
 func dumpglobls() {
 	var n *Node
-	var l *NodeList
 
 	// add globals
-	for l = externdcl; l != nil; l = l.Next {
+	for l := externdcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Op != ONAME {
 			continue
@@ -140,7 +133,7 @@ func dumpglobls() {
 		ggloblnod(n)
 	}
 
-	for l = funcsyms; l != nil; l = l.Next {
+	for l := funcsyms; l != nil; l = l.Next {
 		n = l.N
 		dsymptr(n.Sym, 0, n.Sym.Def.Shortname.Sym, 0)
 		ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
@@ -156,8 +149,6 @@ func Bputname(b *obj.Biobuf, s *obj.LSym) {
 }
 
 func Linksym(s *Sym) *obj.LSym {
-	var p string
-
 	if s == nil {
 		return nil
 	}
@@ -169,7 +160,7 @@ func Linksym(s *Sym) *obj.LSym {
 	} else if s.Linkname != "" {
 		s.Lsym = obj.Linklookup(Ctxt, s.Linkname, 0)
 	} else {
-		p = fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
+		p := fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
 		s.Lsym = obj.Linklookup(Ctxt, p, 0)
 	}
 
@@ -208,10 +199,6 @@ func duintptr(s *Sym, off int, v uint64) int {
 var stringsym_gen int
 
 func stringsym(s string) *Sym {
-	var sym *Sym
-	var off int
-	var n int
-	var m int
 	var tmp struct {
 		lit Strlit
 		buf string
@@ -233,7 +220,7 @@ func stringsym(s string) *Sym {
 		pkg = gostringpkg
 	}
 
-	sym = Pkglookup(namebuf, pkg)
+	sym := Pkglookup(namebuf, pkg)
 
 	// SymUniq flag indicates that data is generated already
 	if sym.Flags&SymUniq != 0 {
@@ -242,14 +229,15 @@ func stringsym(s string) *Sym {
 	sym.Flags |= SymUniq
 	sym.Def = newname(sym)
 
-	off = 0
+	off := 0
 
 	// string header
 	off = dsymptr(sym, off, sym, Widthptr+Widthint)
 	off = duintxx(sym, off, uint64(len(s)), Widthint)
 
 	// string data
-	for n = 0; n < len(s); n += m {
+	var m int
+	for n := 0; n < len(s); n += m {
 		m = 8
 		if m > len(s)-n {
 			m = len(s) - n
@@ -267,18 +255,15 @@ func stringsym(s string) *Sym {
 var slicebytes_gen int
 
 func slicebytes(nam *Node, s string, len int) {
-	var off int
-	var n int
 	var m int
-	var sym *Sym
 
 	slicebytes_gen++
 	namebuf = fmt.Sprintf(".gobytes.%d", slicebytes_gen)
-	sym = Pkglookup(namebuf, localpkg)
+	sym := Pkglookup(namebuf, localpkg)
 	sym.Def = newname(sym)
 
-	off = 0
-	for n = 0; n < len; n += m {
+	off := 0
+	for n := 0; n < len; n += m {
 		m = 8
 		if m > len-n {
 			m = len - n
@@ -298,10 +283,8 @@ func slicebytes(nam *Node, s string, len int) {
 }
 
 func dstringptr(s *Sym, off int, str string) int {
-	var p *obj.Prog
-
 	off = int(Rnd(int64(off), int64(Widthptr)))
-	p = Thearch.Gins(obj.ADATA, nil, nil)
+	p := Thearch.Gins(obj.ADATA, nil, nil)
 	p.From.Type = obj.TYPE_MEM
 	p.From.Name = obj.NAME_EXTERN
 	p.From.Sym = Linksym(s)
@@ -321,9 +304,7 @@ func dstringptr(s *Sym, off int, str string) int {
  * gobj.c
  */
 func Datastring(s string, a *obj.Addr) {
-	var sym *Sym
-
-	sym = stringsym(s)
+	sym := stringsym(s)
 	a.Type = obj.TYPE_MEM
 	a.Name = obj.NAME_EXTERN
 	a.Sym = Linksym(sym)
@@ -333,9 +314,7 @@ func Datastring(s string, a *obj.Addr) {
 }
 
 func datagostring(sval *Strlit, a *obj.Addr) {
-	var sym *Sym
-
-	sym = stringsym(sval.S)
+	sym := stringsym(sval.S)
 	a.Type = obj.TYPE_MEM
 	a.Name = obj.NAME_EXTERN
 	a.Sym = Linksym(sym)
@@ -345,29 +324,24 @@ func datagostring(sval *Strlit, a *obj.Addr) {
 }
 
 func dgostringptr(s *Sym, off int, str string) int {
-	var n int
-	var lit *Strlit
-
 	if str == "" {
 		return duintptr(s, off, 0)
 	}
 
-	n = len(str)
-	lit = new(Strlit)
+	n := len(str)
+	lit := new(Strlit)
 	lit.S = str
 	lit.S = lit.S[:n]
 	return dgostrlitptr(s, off, lit)
 }
 
 func dgostrlitptr(s *Sym, off int, lit *Strlit) int {
-	var p *obj.Prog
-
 	if lit == nil {
 		return duintptr(s, off, 0)
 	}
 
 	off = int(Rnd(int64(off), int64(Widthptr)))
-	p = Thearch.Gins(obj.ADATA, nil, nil)
+	p := Thearch.Gins(obj.ADATA, nil, nil)
 	p.From.Type = obj.TYPE_MEM
 	p.From.Name = obj.NAME_EXTERN
 	p.From.Sym = Linksym(s)
@@ -383,9 +357,7 @@ func dgostrlitptr(s *Sym, off int, lit *Strlit) int {
 }
 
 func dsname(s *Sym, off int, t string) int {
-	var p *obj.Prog
-
-	p = Thearch.Gins(obj.ADATA, nil, nil)
+	p := Thearch.Gins(obj.ADATA, nil, nil)
 	p.From.Type = obj.TYPE_MEM
 	p.From.Name = obj.NAME_EXTERN
 	p.From.Offset = int64(off)
@@ -399,11 +371,9 @@ func dsname(s *Sym, off int, t string) int {
 }
 
 func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
-	var p *obj.Prog
-
 	off = int(Rnd(int64(off), int64(Widthptr)))
 
-	p = Thearch.Gins(obj.ADATA, nil, nil)
+	p := Thearch.Gins(obj.ADATA, nil, nil)
 	p.From.Type = obj.TYPE_MEM
 	p.From.Name = obj.NAME_EXTERN
 	p.From.Sym = Linksym(s)
@@ -420,8 +390,6 @@ func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
 }
 
 func gdata(nam *Node, nr *Node, wid int) {
-	var p *obj.Prog
-
 	if nr.Op == OLITERAL {
 		switch nr.Val.Ctype {
 		case CTCPLX:
@@ -434,19 +402,16 @@ func gdata(nam *Node, nr *Node, wid int) {
 		}
 	}
 
-	p = Thearch.Gins(obj.ADATA, nam, nr)
+	p := Thearch.Gins(obj.ADATA, nam, nr)
 	p.From3.Type = obj.TYPE_CONST
 	p.From3.Offset = int64(wid)
 }
 
 func gdatacomplex(nam *Node, cval *Mpcplx) {
-	var p *obj.Prog
-	var w int
-
-	w = cplxsubtype(int(nam.Type.Etype))
+	w := cplxsubtype(int(nam.Type.Etype))
 	w = int(Types[w].Width)
 
-	p = Thearch.Gins(obj.ADATA, nam, nil)
+	p := Thearch.Gins(obj.ADATA, nam, nil)
 	p.From3.Type = obj.TYPE_CONST
 	p.From3.Offset = int64(w)
 	p.To.Type = obj.TYPE_FCONST
@@ -461,10 +426,9 @@ func gdatacomplex(nam *Node, cval *Mpcplx) {
 }
 
 func gdatastring(nam *Node, sval *Strlit) {
-	var p *obj.Prog
 	var nod1 Node
 
-	p = Thearch.Gins(obj.ADATA, nam, nil)
+	p := Thearch.Gins(obj.ADATA, nam, nil)
 	Datastring(sval.S, &p.To)
 	p.From3.Type = obj.TYPE_CONST
 	p.From3.Offset = Types[Tptr].Width
diff --git a/src/cmd/internal/gc/order.go b/src/cmd/internal/gc/order.go
index 743ca80cb84e2f7dcce2d226382d275bd1f80a6f..e8744d7d0b48f5db1b1fe55e0452cb9c72be9175 100644
--- a/src/cmd/internal/gc/order.go
+++ b/src/cmd/internal/gc/order.go
@@ -49,10 +49,8 @@ type Order struct {
 // Order rewrites fn->nbody to apply the ordering constraints
 // described in the comment at the top of the file.
 func order(fn *Node) {
-	var s string
-
 	if Debug['W'] > 1 {
-		s = fmt.Sprintf("\nbefore order %v", Sconv(fn.Nname.Sym, 0))
+		s := fmt.Sprintf("\nbefore order %v", Sconv(fn.Nname.Sym, 0))
 		dumplist(s, fn.Nbody)
 	}
 
@@ -63,18 +61,14 @@ func order(fn *Node) {
 // pushes it onto the temp stack, and returns it.
 // If clear is true, ordertemp emits code to zero the temporary.
 func ordertemp(t *Type, order *Order, clear bool) *Node {
-	var var_ *Node
-	var a *Node
-	var l *NodeList
-
-	var_ = temp(t)
+	var_ := temp(t)
 	if clear {
-		a = Nod(OAS, var_, nil)
+		a := Nod(OAS, var_, nil)
 		typecheck(&a, Etop)
 		order.out = list(order.out, a)
 	}
 
-	l = order.free
+	l := order.free
 	if l == nil {
 		l = new(NodeList)
 	}
@@ -98,11 +92,8 @@ func ordertemp(t *Type, order *Order, clear bool) *Node {
 // returns a pointer to the result data instead of taking a pointer
 // to be filled in.)
 func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
-	var a *Node
-	var var_ *Node
-
-	var_ = ordertemp(t, order, clear != 0)
-	a = Nod(OAS, var_, n)
+	var_ := ordertemp(t, order, clear != 0)
+	a := Nod(OAS, var_, n)
 	typecheck(&a, Etop)
 	order.out = list(order.out, a)
 	return var_
@@ -130,21 +121,17 @@ func ordercheapexpr(n *Node, order *Order) *Node {
 //
 // The intended use is to apply to x when rewriting x += y into x = x + y.
 func ordersafeexpr(n *Node, order *Order) *Node {
-	var l *Node
-	var r *Node
-	var a *Node
-
 	switch n.Op {
 	case ONAME,
 		OLITERAL:
 		return n
 
 	case ODOT:
-		l = ordersafeexpr(n.Left, order)
+		l := ordersafeexpr(n.Left, order)
 		if l == n.Left {
 			return n
 		}
-		a = Nod(OXXX, nil, nil)
+		a := Nod(OXXX, nil, nil)
 		*a = *n
 		a.Orig = a
 		a.Left = l
@@ -153,11 +140,11 @@ func ordersafeexpr(n *Node, order *Order) *Node {
 
 	case ODOTPTR,
 		OIND:
-		l = ordercheapexpr(n.Left, order)
+		l := ordercheapexpr(n.Left, order)
 		if l == n.Left {
 			return n
 		}
-		a = Nod(OXXX, nil, nil)
+		a := Nod(OXXX, nil, nil)
 		*a = *n
 		a.Orig = a
 		a.Left = l
@@ -166,16 +153,17 @@ func ordersafeexpr(n *Node, order *Order) *Node {
 
 	case OINDEX,
 		OINDEXMAP:
+		var l *Node
 		if Isfixedarray(n.Left.Type) {
 			l = ordersafeexpr(n.Left, order)
 		} else {
 			l = ordercheapexpr(n.Left, order)
 		}
-		r = ordercheapexpr(n.Right, order)
+		r := ordercheapexpr(n.Right, order)
 		if l == n.Left && r == n.Right {
 			return n
 		}
-		a = Nod(OXXX, nil, nil)
+		a := Nod(OXXX, nil, nil)
 		*a = *n
 		a.Orig = a
 		a.Left = l
@@ -210,9 +198,7 @@ func isaddrokay(n *Node) bool {
 // If the original argument *np is not okay, orderaddrtemp creates a tmp, emits
 // tmp = *np, and then sets *np to the tmp variable.
 func orderaddrtemp(np **Node, order *Order) {
-	var n *Node
-
-	n = *np
+	n := *np
 	if isaddrokay(n) {
 		return
 	}
@@ -244,10 +230,9 @@ func poptemp(mark *NodeList, order *Order) {
 // above the mark on the temporary stack, but it does not pop them
 // from the stack.
 func cleantempnopop(mark *NodeList, order *Order, out **NodeList) {
-	var l *NodeList
 	var kill *Node
 
-	for l = order.temp; l != mark; l = l.Next {
+	for l := order.temp; l != mark; l = l.Next {
 		kill = Nod(OVARKILL, l.N, nil)
 		typecheck(&kill, Etop)
 		*out = list(*out, kill)
@@ -271,11 +256,8 @@ func orderstmtlist(l *NodeList, order *Order) {
 // Orderblock orders the block of statements *l onto a new list,
 // and then replaces *l with that list.
 func orderblock(l **NodeList) {
-	var order Order
-	var mark *NodeList
-
-	order = Order{}
-	mark = marktemp(&order)
+	order := Order{}
+	mark := marktemp(&order)
 	orderstmtlist(*l, &order)
 	cleantemp(mark, &order)
 	*l = order.out
@@ -284,18 +266,14 @@ func orderblock(l **NodeList) {
 // Orderexprinplace orders the side effects in *np and
 // leaves them as the init list of the final *np.
 func orderexprinplace(np **Node, outer *Order) {
-	var n *Node
-	var lp **NodeList
-	var order Order
-
-	n = *np
-	order = Order{}
+	n := *np
+	order := Order{}
 	orderexpr(&n, &order)
 	addinit(&n, order.out)
 
 	// insert new temporaries from order
 	// at head of outer list.
-	lp = &order.temp
+	lp := &order.temp
 
 	for *lp != nil {
 		lp = &(*lp).Next
@@ -309,13 +287,9 @@ func orderexprinplace(np **Node, outer *Order) {
 // Orderstmtinplace orders the side effects of the single statement *np
 // and replaces it with the resulting statement list.
 func orderstmtinplace(np **Node) {
-	var n *Node
-	var order Order
-	var mark *NodeList
-
-	n = *np
-	order = Order{}
-	mark = marktemp(&order)
+	n := *np
+	order := Order{}
+	mark := marktemp(&order)
 	orderstmt(n, &order)
 	cleantemp(mark, &order)
 	*np = liststmt(order.out)
@@ -330,13 +304,11 @@ func orderinit(n *Node, order *Order) {
 // Ismulticall reports whether the list l is f() for a multi-value function.
 // Such an f() could appear as the lone argument to a multi-arg function.
 func ismulticall(l *NodeList) bool {
-	var n *Node
-
 	// one arg only
 	if l == nil || l.Next != nil {
 		return false
 	}
-	n = l.N
+	n := l.N
 
 	// must be call
 	switch n.Op {
@@ -356,26 +328,21 @@ func ismulticall(l *NodeList) bool {
 // Copyret emits t1, t2, ... = n, where n is a function call,
 // and then returns the list t1, t2, ....
 func copyret(n *Node, order *Order) *NodeList {
-	var t *Type
-	var tmp *Node
-	var as *Node
-	var l1 *NodeList
-	var l2 *NodeList
-	var tl Iter
-
 	if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
 		Fatal("copyret %v %d", Tconv(n.Type, 0), n.Left.Type.Outtuple)
 	}
 
-	l1 = nil
-	l2 = nil
-	for t = Structfirst(&tl, &n.Type); t != nil; t = structnext(&tl) {
+	l1 := (*NodeList)(nil)
+	l2 := (*NodeList)(nil)
+	var tl Iter
+	var tmp *Node
+	for t := Structfirst(&tl, &n.Type); t != nil; t = structnext(&tl) {
 		tmp = temp(t.Type)
 		l1 = list(l1, tmp)
 		l2 = list(l2, tmp)
 	}
 
-	as = Nod(OAS2, nil, nil)
+	as := Nod(OAS2, nil, nil)
 	as.List = l1
 	as.Rlist = list1(n)
 	typecheck(&as, Etop)
@@ -426,11 +393,6 @@ func ordercall(n *Node, order *Order) {
 // Ordermapassign also inserts these temporaries if needed for
 // calling writebarrierfat with a pointer to n->right.
 func ordermapassign(n *Node, order *Order) {
-	var m *Node
-	var a *Node
-	var l *NodeList
-	var post *NodeList
-
 	switch n.Op {
 	default:
 		Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
@@ -440,9 +402,9 @@ func ordermapassign(n *Node, order *Order) {
 
 		// We call writebarrierfat only for values > 4 pointers long. See walk.c.
 		if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
-			m = n.Left
+			m := n.Left
 			n.Left = ordertemp(m.Type, order, false)
-			a = Nod(OAS, m, n.Left)
+			a := Nod(OAS, m, n.Left)
 			typecheck(&a, Etop)
 			order.out = list(order.out, a)
 		}
@@ -451,8 +413,10 @@ func ordermapassign(n *Node, order *Order) {
 		OAS2DOTTYPE,
 		OAS2MAPR,
 		OAS2FUNC:
-		post = nil
-		for l = n.List; l != nil; l = l.Next {
+		post := (*NodeList)(nil)
+		var m *Node
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
 			if l.N.Op == OINDEXMAP {
 				m = l.N
 				if !istemp(m.Left) {
@@ -477,22 +441,11 @@ func ordermapassign(n *Node, order *Order) {
 // Temporaries created during the statement are cleaned
 // up using VARKILL instructions as possible.
 func orderstmt(n *Node, order *Order) {
-	var lno int
-	var l *NodeList
-	var t *NodeList
-	var t1 *NodeList
-	var r *Node
-	var tmp1 *Node
-	var tmp2 *Node
-	var np **Node
-	var ch *Type
-	var typ *Type
-
 	if n == nil {
 		return
 	}
 
-	lno = int(setlineno(n))
+	lno := int(setlineno(n))
 
 	orderinit(n, order)
 
@@ -511,7 +464,7 @@ func orderstmt(n *Node, order *Order) {
 		OPRINTN,
 		ORECOVER,
 		ORECV:
-		t = marktemp(order)
+		t := marktemp(order)
 		orderexpr(&n.Left, order)
 		orderexpr(&n.Right, order)
 		orderexprlist(n.List, order)
@@ -534,11 +487,11 @@ func orderstmt(n *Node, order *Order) {
 	// out map read from map write when l is
 	// a map index expression.
 	case OASOP:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexpr(&n.Left, order)
 		n.Left = ordersafeexpr(n.Left, order)
-		tmp1 = treecopy(n.Left)
+		tmp1 := treecopy(n.Left)
 		if tmp1.Op == OINDEXMAP {
 			tmp1.Etype = 0 // now an rvalue not an lvalue
 		}
@@ -554,10 +507,10 @@ func orderstmt(n *Node, order *Order) {
 		// Special: make sure key is addressable,
 	// and make sure OINDEXMAP is not copied out.
 	case OAS2MAPR:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexprlist(n.List, order)
-		r = n.Rlist.N
+		r := n.Rlist.N
 		orderexpr(&r.Left, order)
 		orderexpr(&r.Right, order)
 
@@ -571,7 +524,7 @@ func orderstmt(n *Node, order *Order) {
 
 		// Special: avoid copy of func call n->rlist->n.
 	case OAS2FUNC:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexprlist(n.List, order)
 		ordercall(n.Rlist.N, order)
@@ -582,17 +535,17 @@ func orderstmt(n *Node, order *Order) {
 	// so that assertI2Tetc can take address of temporary.
 	// No temporary for blank assignment.
 	case OAS2DOTTYPE:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexprlist(n.List, order)
 		orderexpr(&n.Rlist.N.Left, order) // i in i.(T)
 		if isblank(n.List.N) {
 			order.out = list(order.out, n)
 		} else {
-			typ = n.Rlist.N.Type
-			tmp1 = ordertemp(typ, order, haspointers(typ))
+			typ := n.Rlist.N.Type
+			tmp1 := ordertemp(typ, order, haspointers(typ))
 			order.out = list(order.out, n)
-			r = Nod(OAS, n.List.N, tmp1)
+			r := Nod(OAS, n.List.N, tmp1)
 			typecheck(&r, Etop)
 			ordermapassign(r, order)
 			n.List = list(list1(tmp1), n.List.Next.N)
@@ -603,19 +556,20 @@ func orderstmt(n *Node, order *Order) {
 		// Special: use temporary variables to hold result,
 	// so that chanrecv can take address of temporary.
 	case OAS2RECV:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexprlist(n.List, order)
 		orderexpr(&n.Rlist.N.Left, order) // arg to recv
-		ch = n.Rlist.N.Left.Type
-		tmp1 = ordertemp(ch.Type, order, haspointers(ch.Type))
+		ch := n.Rlist.N.Left.Type
+		tmp1 := ordertemp(ch.Type, order, haspointers(ch.Type))
+		var tmp2 *Node
 		if !isblank(n.List.Next.N) {
 			tmp2 = ordertemp(n.List.Next.N.Type, order, false)
 		} else {
 			tmp2 = ordertemp(Types[TBOOL], order, false)
 		}
 		order.out = list(order.out, n)
-		r = Nod(OAS, n.List.N, tmp1)
+		r := Nod(OAS, n.List.N, tmp1)
 		typecheck(&r, Etop)
 		ordermapassign(r, order)
 		r = Nod(OAS, n.List.Next.N, tmp2)
@@ -646,7 +600,7 @@ func orderstmt(n *Node, order *Order) {
 	case OCALLFUNC,
 		OCALLINTER,
 		OCALLMETH:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		ordercall(n, order)
 		order.out = list(order.out, n)
@@ -655,7 +609,7 @@ func orderstmt(n *Node, order *Order) {
 		// Special: order arguments to inner call but not call itself.
 	case ODEFER,
 		OPROC:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		switch n.Left.Op {
 		// Delete will take the address of the key.
@@ -664,8 +618,8 @@ func orderstmt(n *Node, order *Order) {
 		case ODELETE:
 			orderexprlist(n.Left.List, order)
 
-			t1 = marktemp(order)
-			np = &n.Left.List.Next.N // map key
+			t1 := marktemp(order)
+			np := &n.Left.List.Next.N // map key
 			*np = ordercopyexpr(*np, (*np).Type, order, 0)
 			poptemp(t1, order)
 
@@ -677,7 +631,7 @@ func orderstmt(n *Node, order *Order) {
 		cleantemp(t, order)
 
 	case ODELETE:
-		t = marktemp(order)
+		t := marktemp(order)
 		orderexpr(&n.List.N, order)
 		orderexpr(&n.List.Next.N, order)
 		orderaddrtemp(&n.List.Next.N, order) // map key
@@ -687,10 +641,10 @@ func orderstmt(n *Node, order *Order) {
 		// Clean temporaries from condition evaluation at
 	// beginning of loop body and after for statement.
 	case OFOR:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexprinplace(&n.Ntest, order)
-		l = nil
+		l := (*NodeList)(nil)
 		cleantempnopop(t, order, &l)
 		n.Nbody = concat(l, n.Nbody)
 		orderblock(&n.Nbody)
@@ -701,10 +655,10 @@ func orderstmt(n *Node, order *Order) {
 		// Clean temporaries from condition at
 	// beginning of both branches.
 	case OIF:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexprinplace(&n.Ntest, order)
-		l = nil
+		l := (*NodeList)(nil)
 		cleantempnopop(t, order, &l)
 		n.Nbody = concat(l, n.Nbody)
 		l = nil
@@ -718,7 +672,7 @@ func orderstmt(n *Node, order *Order) {
 		// Special: argument will be converted to interface using convT2E
 	// so make sure it is an addressable temporary.
 	case OPANIC:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexpr(&n.Left, order)
 		if !Isinter(n.Left.Type) {
@@ -736,7 +690,7 @@ func orderstmt(n *Node, order *Order) {
 	// which must make a copy to avoid seeing updates made during
 	// the range body. Ranging over an array value is uncommon though.
 	case ORANGE:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexpr(&n.Right, order)
 		switch n.Type.Etype {
@@ -761,7 +715,7 @@ func orderstmt(n *Node, order *Order) {
 		// fall through
 		case TCHAN,
 			TSTRING:
-			r = n.Right
+			r := n.Right
 
 			if r.Type.Etype == TSTRING && r.Type != Types[TSTRING] {
 				r = Nod(OCONV, r, nil)
@@ -775,7 +729,7 @@ func orderstmt(n *Node, order *Order) {
 		// TODO(rsc): Make tmp = literal expressions reuse tmp.
 		// For maps tmp is just one word so it hardly matters.
 		case TMAP:
-			r = n.Right
+			r := n.Right
 
 			n.Right = ordercopyexpr(r, r.Type, order, 0)
 
@@ -783,7 +737,7 @@ func orderstmt(n *Node, order *Order) {
 			n.Alloc = ordertemp(Types[TUINT8], order, true)
 		}
 
-		for l = n.List; l != nil; l = l.Next {
+		for l := n.List; l != nil; l = l.Next {
 			orderexprinplace(&l.N, order)
 		}
 		orderblock(&n.Nbody)
@@ -804,9 +758,12 @@ func orderstmt(n *Node, order *Order) {
 	// case (if p were nil, then the timing of the fault would
 	// give this away).
 	case OSELECT:
-		t = marktemp(order)
+		t := marktemp(order)
 
-		for l = n.List; l != nil; l = l.Next {
+		var tmp1 *Node
+		var tmp2 *Node
+		var r *Node
+		for l := n.List; l != nil; l = l.Next {
 			if l.N.Op != OXCASE {
 				Fatal("order select case %v", Oconv(int(l.N.Op), 0))
 			}
@@ -931,7 +888,7 @@ func orderstmt(n *Node, order *Order) {
 		// Now that we have accumulated all the temporaries, clean them.
 		// Also insert any ninit queued during the previous loop.
 		// (The temporary cleaning must follow that ninit work.)
-		for l = n.List; l != nil; l = l.Next {
+		for l := n.List; l != nil; l = l.Next {
 			cleantempnopop(t, order, &l.N.Ninit)
 			l.N.Nbody = concat(l.N.Ninit, l.N.Nbody)
 			l.N.Ninit = nil
@@ -942,7 +899,7 @@ func orderstmt(n *Node, order *Order) {
 
 		// Special: value being sent is passed as a pointer; make it addressable.
 	case OSEND:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexpr(&n.Left, order)
 		orderexpr(&n.Right, order)
@@ -958,10 +915,10 @@ func orderstmt(n *Node, order *Order) {
 	// For now just clean all the temporaries at the end.
 	// In practice that's fine.
 	case OSWITCH:
-		t = marktemp(order)
+		t := marktemp(order)
 
 		orderexpr(&n.Ntest, order)
-		for l = n.List; l != nil; l = l.Next {
+		for l := n.List; l != nil; l = l.Next {
 			if l.N.Op != OXCASE {
 				Fatal("order switch case %v", Oconv(int(l.N.Op), 0))
 			}
@@ -994,20 +951,12 @@ func orderexprlistinplace(l *NodeList, order *Order) {
 // Orderexpr orders a single expression, appending side
 // effects to order->out as needed.
 func orderexpr(np **Node, order *Order) {
-	var n *Node
-	var mark *NodeList
-	var l *NodeList
-	var t *Type
-	var lno int
-	var haslit bool
-	var hasbyte bool
-
-	n = *np
+	n := *np
 	if n == nil {
 		return
 	}
 
-	lno = int(setlineno(n))
+	lno := int(setlineno(n))
 	orderinit(n, order)
 
 	switch n.Op {
@@ -1024,7 +973,7 @@ func orderexpr(np **Node, order *Order) {
 		orderexprlist(n.List, order)
 
 		if count(n.List) > 5 {
-			t = typ(TARRAY)
+			t := typ(TARRAY)
 			t.Bound = int64(count(n.List))
 			t.Type = Types[TSTRING]
 			n.Alloc = ordertemp(t, order, false)
@@ -1037,16 +986,16 @@ func orderexpr(np **Node, order *Order) {
 		// Otherwise if all other arguments are empty strings,
 		// concatstrings will return the reference to the temp string
 		// to the caller.
-		hasbyte = false
+		hasbyte := false
 
-		haslit = false
-		for l = n.List; l != nil; l = l.Next {
+		haslit := false
+		for l := n.List; l != nil; l = l.Next {
 			hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
 			haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0
 		}
 
 		if haslit && hasbyte {
-			for l = n.List; l != nil; l = l.Next {
+			for l := n.List; l != nil; l = l.Next {
 				if l.N.Op == OARRAYBYTESTR {
 					l.N.Op = OARRAYBYTESTRTMP
 				}
@@ -1105,13 +1054,13 @@ func orderexpr(np **Node, order *Order) {
 
 	case OANDAND,
 		OOROR:
-		mark = marktemp(order)
+		mark := marktemp(order)
 		orderexpr(&n.Left, order)
 
 		// Clean temporaries from first branch at beginning of second.
 		// Leave them on the stack so that they can be killed in the outer
 		// context in case the short circuit is taken.
-		l = nil
+		l := (*NodeList)(nil)
 
 		cleantempnopop(mark, order, &l)
 		n.Right.Ninit = concat(l, n.Right.Ninit)
@@ -1168,7 +1117,7 @@ func orderexpr(np **Node, order *Order) {
 		ONE:
 		orderexpr(&n.Left, order)
 		orderexpr(&n.Right, order)
-		t = n.Left.Type
+		t := n.Left.Type
 		if t.Etype == TSTRUCT || Isfixedarray(t) {
 			// for complex comparisons, we need both args to be
 			// addressable so we can pass them to the runtime.
diff --git a/src/cmd/internal/gc/pgen.go b/src/cmd/internal/gc/pgen.go
index c4c0dd4936cf5b5d27ae2c41987642cdb756eb11..cb6cb5b76f106ed8560d200ad673246c0002ba2c 100644
--- a/src/cmd/internal/gc/pgen.go
+++ b/src/cmd/internal/gc/pgen.go
@@ -20,13 +20,11 @@ var makefuncdatasym_nsym int32
 
 func makefuncdatasym(namefmt string, funcdatakind int64) *Sym {
 	var nod Node
-	var pnod *Node
-	var sym *Sym
 
 	namebuf = fmt.Sprintf(namefmt, makefuncdatasym_nsym)
 	makefuncdatasym_nsym++
-	sym = Lookup(namebuf)
-	pnod = newname(sym)
+	sym := Lookup(namebuf)
+	pnod := newname(sym)
 	pnod.Class = PEXTERN
 	Nodconst(&nod, Types[TINT32], funcdatakind)
 	Thearch.Gins(obj.AFUNCDATA, &nod, pnod)
@@ -115,9 +113,7 @@ func gvarkill(n *Node) {
 }
 
 func removevardef(firstp *obj.Prog) {
-	var p *obj.Prog
-
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL) {
 			p.Link = p.Link.Link
 		}
@@ -130,41 +126,31 @@ func removevardef(firstp *obj.Prog) {
 }
 
 func gcsymdup(s *Sym) {
-	var ls *obj.LSym
-	var lo uint64
-	var hi uint64
-
-	ls = Linksym(s)
+	ls := Linksym(s)
 	if len(ls.R) > 0 {
 		Fatal("cannot rosymdup %s with relocations", ls.Name)
 	}
 	var d MD5
 	md5reset(&d)
 	md5write(&d, ls.P, len(ls.P))
-	lo = md5sum(&d, &hi)
+	var hi uint64
+	lo := md5sum(&d, &hi)
 	ls.Name = fmt.Sprintf("gclocals·%016x%016x", lo, hi)
 	ls.Dupok = 1
 }
 
 func emitptrargsmap() {
-	var nptr int
-	var nbitmap int
-	var j int
-	var off int
-	var xoffset int64
-	var bv *Bvec
-	var sym *Sym
-
-	sym = Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Nname.Sym.Name))
+	sym := Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Nname.Sym.Name))
 
-	nptr = int(Curfn.Type.Argwid / int64(Widthptr))
-	bv = bvalloc(int32(nptr) * 2)
-	nbitmap = 1
+	nptr := int(Curfn.Type.Argwid / int64(Widthptr))
+	bv := bvalloc(int32(nptr) * 2)
+	nbitmap := 1
 	if Curfn.Type.Outtuple > 0 {
 		nbitmap = 2
 	}
-	off = duint32(sym, 0, uint32(nbitmap))
+	off := duint32(sym, 0, uint32(nbitmap))
 	off = duint32(sym, off, uint32(bv.n))
+	var xoffset int64
 	if Curfn.Type.Thistuple > 0 {
 		xoffset = 0
 		twobitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
@@ -175,13 +161,13 @@ func emitptrargsmap() {
 		twobitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
 	}
 
-	for j = 0; int32(j) < bv.n; j += 32 {
+	for j := 0; int32(j) < bv.n; j += 32 {
 		off = duint32(sym, off, bv.b[j/32])
 	}
 	if Curfn.Type.Outtuple > 0 {
 		xoffset = 0
 		twobitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
-		for j = 0; int32(j) < bv.n; j += 32 {
+		for j := 0; int32(j) < bv.n; j += 32 {
 			off = duint32(sym, off, bv.b[j/32])
 		}
 	}
@@ -198,9 +184,6 @@ func emitptrargsmap() {
 // the top of the stack and increasing in size.
 // Non-autos sort on offset.
 func cmpstackvar(a *Node, b *Node) int {
-	var ap int
-	var bp int
-
 	if a.Class != b.Class {
 		if a.Class == PAUTO {
 			return +1
@@ -222,8 +205,8 @@ func cmpstackvar(a *Node, b *Node) int {
 		return int(b.Used) - int(a.Used)
 	}
 
-	ap = bool2int(haspointers(a.Type))
-	bp = bool2int(haspointers(b.Type))
+	ap := bool2int(haspointers(a.Type))
+	bp := bool2int(haspointers(b.Type))
 	if ap != bp {
 		return bp - ap
 	}
@@ -246,10 +229,6 @@ func cmpstackvar(a *Node, b *Node) int {
 
 // TODO(lvd) find out where the PAUTO/OLITERAL nodes come from.
 func allocauto(ptxt *obj.Prog) {
-	var ll *NodeList
-	var n *Node
-	var w int64
-
 	Stksize = 0
 	stkptrsize = 0
 
@@ -258,7 +237,7 @@ func allocauto(ptxt *obj.Prog) {
 	}
 
 	// Mark the PAUTO's unused.
-	for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+	for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 		if ll.N.Class == PAUTO {
 			ll.N.Used = 0
 		}
@@ -269,9 +248,9 @@ func allocauto(ptxt *obj.Prog) {
 	listsort(&Curfn.Dcl, cmpstackvar)
 
 	// Unused autos are at the end, chop 'em off.
-	ll = Curfn.Dcl
+	ll := Curfn.Dcl
 
-	n = ll.N
+	n := ll.N
 	if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
 		// No locals used at all
 		Curfn.Dcl = nil
@@ -280,7 +259,7 @@ func allocauto(ptxt *obj.Prog) {
 		return
 	}
 
-	for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
+	for ll := Curfn.Dcl; ll.Next != nil; ll = ll.Next {
 		n = ll.Next.N
 		if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
 			ll.Next = nil
@@ -290,7 +269,8 @@ func allocauto(ptxt *obj.Prog) {
 	}
 
 	// Reassign stack offsets of the locals that are still there.
-	for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+	var w int64
+	for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 		n = ll.N
 		if n.Class != PAUTO || n.Op != ONAME {
 			continue
@@ -323,7 +303,7 @@ func allocauto(ptxt *obj.Prog) {
 	fixautoused(ptxt)
 
 	// The debug information needs accurate offsets on the symbols.
-	for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+	for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 		if ll.N.Class != PAUTO || ll.N.Op != ONAME {
 			continue
 		}
@@ -341,10 +321,9 @@ func movelarge(l *NodeList) {
 }
 
 func movelargefn(fn *Node) {
-	var l *NodeList
 	var n *Node
 
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Class == PAUTO && n.Type != nil && n.Type.Width > MaxStackVarSize {
 			addrescapes(n)
@@ -353,8 +332,6 @@ func movelargefn(fn *Node) {
 }
 
 func Cgen_checknil(n *Node) {
-	var reg Node
-
 	if Disable_checknil != 0 {
 		return
 	}
@@ -366,6 +343,7 @@ func Cgen_checknil(n *Node) {
 	}
 
 	if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
+		var reg Node
 		Thearch.Regalloc(&reg, Types[Tptr], n)
 		Thearch.Cgen(n, &reg)
 		Thearch.Gins(obj.ACHECKNIL, &reg, nil)
@@ -380,20 +358,6 @@ func Cgen_checknil(n *Node) {
  * ggen.c
  */
 func compile(fn *Node) {
-	var pl *obj.Plist
-	var nod1 Node
-	var n *Node
-	var ptxt *obj.Prog
-	var p *obj.Prog
-	var lno int32
-	var t *Type
-	var save Iter
-	var oldstksize int64
-	var l *NodeList
-	var nam *Node
-	var gcargs *Sym
-	var gclocals *Sym
-
 	if Newproc == nil {
 		Newproc = Sysfunc("newproc")
 		Deferproc = Sysfunc("deferproc")
@@ -403,11 +367,20 @@ func compile(fn *Node) {
 		throwreturn = Sysfunc("throwreturn")
 	}
 
-	lno = setlineno(fn)
+	lno := setlineno(fn)
 
 	Curfn = fn
 	dowidth(Curfn.Type)
 
+	var oldstksize int64
+	var nod1 Node
+	var ptxt *obj.Prog
+	var pl *obj.Plist
+	var p *obj.Prog
+	var n *Node
+	var nam *Node
+	var gcargs *Sym
+	var gclocals *Sym
 	if fn.Nbody == nil {
 		if pure_go != 0 || strings.HasPrefix(fn.Nname.Sym.Name, "init.") {
 			Yyerror("missing function body", fn)
@@ -428,7 +401,8 @@ func compile(fn *Node) {
 
 	if Curfn.Type.Outnamed != 0 {
 		// add clearing of the output parameters
-		t = Structfirst(&save, Getoutarg(Curfn.Type))
+		var save Iter
+		t := Structfirst(&save, Getoutarg(Curfn.Type))
 
 		for t != nil {
 			if t.Nname != nil {
@@ -501,11 +475,11 @@ func compile(fn *Node) {
 	gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
 	gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
 
-	for t = Curfn.Paramfld; t != nil; t = t.Down {
+	for t := Curfn.Paramfld; t != nil; t = t.Down {
 		gtrack(tracksym(t.Type))
 	}
 
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Op != ONAME { // might be OTYPE or OLITERAL
 			continue
diff --git a/src/cmd/internal/gc/plive.go b/src/cmd/internal/gc/plive.go
index 73f6086c9447a62e23a23cf184d9ef9ce1fe761c..99654c5079259cc260dc54fcbee0421d6e1173a3 100644
--- a/src/cmd/internal/gc/plive.go
+++ b/src/cmd/internal/gc/plive.go
@@ -61,9 +61,7 @@ type Liveness struct {
 }
 
 func xmalloc(size uint32) interface{} {
-	var result interface{}
-
-	result = make([]byte, size)
+	result := (interface{})(make([]byte, size))
 	if result == nil {
 		Fatal("malloc failed")
 	}
@@ -72,12 +70,10 @@ func xmalloc(size uint32) interface{} {
 
 // Constructs a new basic block containing a single instruction.
 func newblock(prog *obj.Prog) *BasicBlock {
-	var result *BasicBlock
-
 	if prog == nil {
 		Fatal("newblock: prog cannot be nil")
 	}
-	result = new(BasicBlock)
+	result := new(BasicBlock)
 	result.rpo = -1
 	result.mark = UNVISITED
 	result.first = prog
@@ -111,9 +107,6 @@ func addedge(from *BasicBlock, to *BasicBlock) {
 // stream.  Any control flow, such as branches or fall throughs, that target the
 // existing instruction are adjusted to target the new instruction.
 func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
-	var next *obj.Prog
-	var tmp obj.Prog
-
 	// There may be other instructions pointing at curr,
 	// and we want them to now point at prev. Instead of
 	// trying to find all such instructions, swap the contents
@@ -121,14 +114,14 @@ func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog)
 	// The "opt" field is the backward link in the linked list.
 
 	// Overwrite curr's data with prev, but keep the list links.
-	tmp = *curr
+	tmp := *curr
 
 	*curr = *prev
 	curr.Opt = tmp.Opt
 	curr.Link = tmp.Link
 
 	// Overwrite prev (now next) with curr's old data.
-	next = prev
+	next := prev
 
 	*next = tmp
 	next.Opt = nil
@@ -151,27 +144,25 @@ func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog)
 // A pretty printer for basic blocks.
 func printblock(bb *BasicBlock) {
 	var pred *BasicBlock
-	var succ *BasicBlock
-	var prog *obj.Prog
-	var i int
 
 	fmt.Printf("basic block %d\n", bb.rpo)
 	fmt.Printf("\tpred:")
-	for i = 0; i < len(bb.pred); i++ {
+	for i := 0; i < len(bb.pred); i++ {
 		pred = bb.pred[i]
 		fmt.Printf(" %d", pred.rpo)
 	}
 
 	fmt.Printf("\n")
 	fmt.Printf("\tsucc:")
-	for i = 0; i < len(bb.succ); i++ {
+	var succ *BasicBlock
+	for i := 0; i < len(bb.succ); i++ {
 		succ = bb.succ[i]
 		fmt.Printf(" %d", succ.rpo)
 	}
 
 	fmt.Printf("\n")
 	fmt.Printf("\tprog:\n")
-	for prog = bb.first; ; prog = prog.Link {
+	for prog := bb.first; ; prog = prog.Link {
 		fmt.Printf("\t\t%v\n", prog)
 		if prog == bb.last {
 			break
@@ -195,11 +186,8 @@ func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
 // Collects and returns and array of Node*s for functions arguments and local
 // variables.
 func getvariables(fn *Node) []*Node {
-	var result []*Node
-	var ll *NodeList
-
-	result = make([]*Node, 0, 0)
-	for ll = fn.Dcl; ll != nil; ll = ll.Next {
+	result := make([]*Node, 0, 0)
+	for ll := fn.Dcl; ll != nil; ll = ll.Next {
 		if ll.N.Op == ONAME {
 			// In order for GODEBUG=gcdead=1 to work, each bitmap needs
 			// to contain information about all variables covered by the bitmap.
@@ -244,9 +232,8 @@ func getvariables(fn *Node) []*Node {
 // A pretty printer for control flow graphs.  Takes an array of BasicBlock*s.
 func printcfg(cfg []*BasicBlock) {
 	var bb *BasicBlock
-	var i int32
 
-	for i = 0; i < int32(len(cfg)); i++ {
+	for i := int32(0); i < int32(len(cfg)); i++ {
 		bb = cfg[i]
 		printblock(bb)
 	}
@@ -256,10 +243,9 @@ func printcfg(cfg []*BasicBlock) {
 // standard algorithm.  Unconnected blocks will not be affected.
 func reversepostorder(root *BasicBlock, rpo *int32) {
 	var bb *BasicBlock
-	var i int
 
 	root.mark = VISITED
-	for i = 0; i < len(root.succ); i++ {
+	for i := 0; i < len(root.succ); i++ {
 		bb = root.succ[i]
 		if bb.mark == UNVISITED {
 			reversepostorder(bb, rpo)
@@ -299,8 +285,6 @@ func iscall(prog *obj.Prog, name *obj.LSym) bool {
 var isselectcommcasecall_names [5]*obj.LSym
 
 func isselectcommcasecall(prog *obj.Prog) bool {
-	var i int32
-
 	if isselectcommcasecall_names[0] == nil {
 		isselectcommcasecall_names[0] = Linksym(Pkglookup("selectsend", Runtimepkg))
 		isselectcommcasecall_names[1] = Linksym(Pkglookup("selectrecv", Runtimepkg))
@@ -308,7 +292,7 @@ func isselectcommcasecall(prog *obj.Prog) bool {
 		isselectcommcasecall_names[3] = Linksym(Pkglookup("selectdefault", Runtimepkg))
 	}
 
-	for i = 0; isselectcommcasecall_names[i] != nil; i++ {
+	for i := int32(0); isselectcommcasecall_names[i] != nil; i++ {
 		if iscall(prog, isselectcommcasecall_names[i]) {
 			return true
 		}
@@ -352,10 +336,9 @@ func isdeferreturn(prog *obj.Prog) bool {
 // are implicit successors of the runtime·selectgo call node.  The goal of this
 // analysis is to add these missing edges to complete the control flow graph.
 func addselectgosucc(selectgo *BasicBlock) {
-	var pred *BasicBlock
 	var succ *BasicBlock
 
-	pred = selectgo
+	pred := selectgo
 	for {
 		if len(pred.pred) == 0 {
 			Fatal("selectgo does not have a newselect")
@@ -392,9 +375,8 @@ func addselectgosucc(selectgo *BasicBlock) {
 // array of BasicBlock*s containing selectgo calls.
 func fixselectgo(selectgo []*BasicBlock) {
 	var bb *BasicBlock
-	var i int32
 
-	for i = 0; i < int32(len(selectgo)); i++ {
+	for i := int32(0); i < int32(len(selectgo)); i++ {
 		bb = selectgo[i]
 		addselectgosucc(bb)
 	}
@@ -406,33 +388,25 @@ func fixselectgo(selectgo []*BasicBlock) {
 // array of BasicBlock*s in control flow graph form (basic blocks ordered by
 // their RPO number).
 func newcfg(firstp *obj.Prog) []*BasicBlock {
-	var p *obj.Prog
-	var prev *obj.Prog
-	var bb *BasicBlock
-	var cfg []*BasicBlock
-	var selectgo []*BasicBlock
-	var i int32
-	var rpo int32
-
 	// Reset the opt field of each prog to nil.  In the first and second
 	// passes, instructions that are labels temporarily use the opt field to
 	// point to their basic block.  In the third pass, the opt field reset
 	// to point to the predecessor of an instruction in its basic block.
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		p.Opt = nil
 	}
 
 	// Allocate an array to remember where we have seen selectgo calls.
 	// These blocks will be revisited to add successor control flow edges.
-	selectgo = make([]*BasicBlock, 0, 0)
+	selectgo := make([]*BasicBlock, 0, 0)
 
 	// Loop through all instructions identifying branch targets
 	// and fall-throughs and allocate basic blocks.
-	cfg = make([]*BasicBlock, 0, 0)
+	cfg := make([]*BasicBlock, 0, 0)
 
-	bb = newblock(firstp)
+	bb := newblock(firstp)
 	cfg = append(cfg, bb)
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if p.To.Type == obj.TYPE_BRANCH {
 			if p.To.U.Branch == nil {
 				Fatal("prog branch to nil")
@@ -458,7 +432,8 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
 	// Loop through all basic blocks maximally growing the list of
 	// contained instructions until a label is reached.  Add edges
 	// for branches and fall-through instructions.
-	for i = 0; i < int32(len(cfg)); i++ {
+	var p *obj.Prog
+	for i := int32(0); i < int32(len(cfg)); i++ {
 		bb = cfg[i]
 		for p = bb.last; p != nil; p = p.Link {
 			if p.Opt != nil && p != bb.last {
@@ -492,7 +467,8 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
 
 	// Add back links so the instructions in a basic block can be traversed
 	// backward.  This is the final state of the instruction opt field.
-	for i = 0; i < int32(len(cfg)); i++ {
+	var prev *obj.Prog
+	for i := int32(0); i < int32(len(cfg)); i++ {
 		bb = cfg[i]
 		p = bb.first
 		prev = nil
@@ -513,13 +489,13 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
 
 	// Find a depth-first order and assign a depth-first number to
 	// all basic blocks.
-	for i = 0; i < int32(len(cfg)); i++ {
+	for i := int32(0); i < int32(len(cfg)); i++ {
 		bb = cfg[i]
 		bb.mark = UNVISITED
 	}
 
 	bb = cfg[0]
-	rpo = int32(len(cfg))
+	rpo := int32(len(cfg))
 	reversepostorder(bb, &rpo)
 
 	// Sort the basic blocks by their depth first number.  The
@@ -544,20 +520,15 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
 // Frees a control flow graph (an array of BasicBlock*s) and all of its leaf
 // data structures.
 func freecfg(cfg []*BasicBlock) {
-	var bb *BasicBlock
-	var bb0 *BasicBlock
-	var p *obj.Prog
-	var i int32
-	var n int32
-
-	n = int32(len(cfg))
+	n := int32(len(cfg))
 	if n > 0 {
-		bb0 = cfg[0]
-		for p = bb0.first; p != nil; p = p.Link {
+		bb0 := cfg[0]
+		for p := bb0.first; p != nil; p = p.Link {
 			p.Opt = nil
 		}
 
-		for i = 0; i < n; i++ {
+		var bb *BasicBlock
+		for i := int32(0); i < n; i++ {
 			bb = cfg[i]
 			freeblock(bb)
 		}
@@ -586,10 +557,6 @@ func isfunny(n *Node) bool {
 // initialization.
 func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
 	var info ProgInfo
-	var from *obj.Addr
-	var to *obj.Addr
-	var node *Node
-	var i int32
 
 	bvresetall(uevar)
 	bvresetall(varkill)
@@ -607,7 +574,8 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avari
 		// all the parameters for correctness, and similarly it must not
 		// read the out arguments - they won't be set until the new
 		// function runs.
-		for i = 0; i < int32(len(vars)); i++ {
+		var node *Node
+		for i := int32(0); i < int32(len(vars)); i++ {
 			node = vars[i]
 			switch node.Class &^ PHEAP {
 			case PPARAM:
@@ -634,7 +602,8 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avari
 	if prog.As == obj.ATEXT {
 		// A text instruction marks the entry point to a function and
 		// the definition point of all in arguments.
-		for i = 0; i < int32(len(vars)); i++ {
+		var node *Node
+		for i := int32(0); i < int32(len(vars)); i++ {
 			node = vars[i]
 			switch node.Class &^ PHEAP {
 			case PPARAM:
@@ -649,7 +618,7 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avari
 	}
 
 	if info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
-		from = &prog.From
+		from := &prog.From
 		if from.Node != nil && from.Sym != nil && ((from.Node).(*Node)).Curfn == Curfn {
 			switch ((from.Node).(*Node)).Class &^ PHEAP {
 			case PAUTO,
@@ -680,7 +649,7 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avari
 
 Next:
 	if info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
-		to = &prog.To
+		to := &prog.To
 		if to.Node != nil && to.Sym != nil && ((to.Node).(*Node)).Curfn == Curfn {
 			switch ((to.Node).(*Node)).Class &^ PHEAP {
 			case PAUTO,
@@ -729,18 +698,13 @@ Next1:
 // liveness computation.  The cfg argument is an array of BasicBlock*s and the
 // vars argument is an array of Node*s.
 func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
-	var result *Liveness
-	var i int32
-	var nblocks int32
-	var nvars int32
-
-	result = new(Liveness)
+	result := new(Liveness)
 	result.fn = fn
 	result.ptxt = ptxt
 	result.cfg = cfg
 	result.vars = vars
 
-	nblocks = int32(len(cfg))
+	nblocks := int32(len(cfg))
 	result.uevar = make([]*Bvec, nblocks)
 	result.varkill = make([]*Bvec, nblocks)
 	result.livein = make([]*Bvec, nblocks)
@@ -749,8 +713,8 @@ func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liv
 	result.avarinitany = make([]*Bvec, nblocks)
 	result.avarinitall = make([]*Bvec, nblocks)
 
-	nvars = int32(len(vars))
-	for i = 0; i < nblocks; i++ {
+	nvars := int32(len(vars))
+	for i := int32(0); i < nblocks; i++ {
 		result.uevar[i] = bvalloc(nvars)
 		result.varkill[i] = bvalloc(nvars)
 		result.livein[i] = bvalloc(nvars)
@@ -767,19 +731,17 @@ func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liv
 
 // Frees the liveness structure and all of its leaf data structures.
 func freeliveness(lv *Liveness) {
-	var i int32
-
 	if lv == nil {
 		Fatal("freeliveness: cannot free nil")
 	}
 
-	for i = 0; i < int32(len(lv.livepointers)); i++ {
+	for i := int32(0); i < int32(len(lv.livepointers)); i++ {
 	}
 
-	for i = 0; i < int32(len(lv.argslivepointers)); i++ {
+	for i := int32(0); i < int32(len(lv.argslivepointers)); i++ {
 	}
 
-	for i = 0; i < int32(len(lv.cfg)); i++ {
+	for i := int32(0); i < int32(len(lv.cfg)); i++ {
 	}
 }
 
@@ -798,14 +760,11 @@ func printeffects(p *obj.Prog, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
 // addresses to avoid confusing the C like conventions used in the node variable
 // names.
 func printnode(node *Node) {
-	var p string
-	var a string
-
-	p = ""
+	p := ""
 	if haspointers(node.Type) {
 		p = "^"
 	}
-	a = ""
+	a := ""
 	if node.Addrtaken != 0 {
 		a = "@"
 	}
@@ -814,10 +773,8 @@ func printnode(node *Node) {
 
 // Pretty print a list of variables.  The vars argument is an array of Node*s.
 func printvars(name string, bv *Bvec, vars []*Node) {
-	var i int32
-
 	fmt.Printf("%s:", name)
-	for i = 0; i < int32(len(vars)); i++ {
+	for i := int32(0); i < int32(len(vars)); i++ {
 		if bvget(bv, i) != 0 {
 			printnode(vars[i])
 		}
@@ -829,16 +786,11 @@ func printvars(name string, bv *Bvec, vars []*Node) {
 // analysis.
 func livenessprintblock(lv *Liveness, bb *BasicBlock) {
 	var pred *BasicBlock
-	var succ *BasicBlock
-	var prog *obj.Prog
-	var live *Bvec
-	var i int
-	var pos int32
 
 	fmt.Printf("basic block %d\n", bb.rpo)
 
 	fmt.Printf("\tpred:")
-	for i = 0; i < len(bb.pred); i++ {
+	for i := 0; i < len(bb.pred); i++ {
 		pred = bb.pred[i]
 		fmt.Printf(" %d", pred.rpo)
 	}
@@ -846,7 +798,8 @@ func livenessprintblock(lv *Liveness, bb *BasicBlock) {
 	fmt.Printf("\n")
 
 	fmt.Printf("\tsucc:")
-	for i = 0; i < len(bb.succ); i++ {
+	var succ *BasicBlock
+	for i := 0; i < len(bb.succ); i++ {
 		succ = bb.succ[i]
 		fmt.Printf(" %d", succ.rpo)
 	}
@@ -862,7 +815,9 @@ func livenessprintblock(lv *Liveness, bb *BasicBlock) {
 	printvars("\tavarinitall", lv.avarinitall[bb.rpo], []*Node(lv.vars))
 
 	fmt.Printf("\tprog:\n")
-	for prog = bb.first; ; prog = prog.Link {
+	var live *Bvec
+	var pos int32
+	for prog := bb.first; ; prog = prog.Link {
 		fmt.Printf("\t\t%v", prog)
 		if prog.As == obj.APCDATA && prog.From.Offset == obj.PCDATA_StackMapIndex {
 			pos = int32(prog.To.Offset)
@@ -882,18 +837,15 @@ func livenessprintblock(lv *Liveness, bb *BasicBlock) {
 // liveness analysis.
 func livenessprintcfg(lv *Liveness) {
 	var bb *BasicBlock
-	var i int32
 
-	for i = 0; i < int32(len(lv.cfg)); i++ {
+	for i := int32(0); i < int32(len(lv.cfg)); i++ {
 		bb = lv.cfg[i]
 		livenessprintblock(lv, bb)
 	}
 }
 
 func checkauto(fn *Node, p *obj.Prog, n *Node) {
-	var l *NodeList
-
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		if l.N.Op == ONAME && l.N.Class == PAUTO && l.N == n {
 			return
 		}
@@ -905,21 +857,19 @@ func checkauto(fn *Node, p *obj.Prog, n *Node) {
 	}
 
 	fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %v\n", Nconv(Curfn, 0), Nconv(n, 0), n, n.Class, p)
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		fmt.Printf("\t%v (%p; class=%d)\n", Nconv(l.N, 0), l.N, l.N.Class)
 	}
 	Yyerror("checkauto: invariant lost")
 }
 
 func checkparam(fn *Node, p *obj.Prog, n *Node) {
-	var l *NodeList
-	var a *Node
-	var class int
-
 	if isfunny(n) {
 		return
 	}
-	for l = fn.Dcl; l != nil; l = l.Next {
+	var a *Node
+	var class int
+	for l := fn.Dcl; l != nil; l = l.Next {
 		a = l.N
 		class = int(a.Class) &^ PHEAP
 		if a.Op == ONAME && (class == PPARAM || class == PPARAMOUT) && a == n {
@@ -928,7 +878,7 @@ func checkparam(fn *Node, p *obj.Prog, n *Node) {
 	}
 
 	fmt.Printf("checkparam %v: %v (%p; class=%d) not found in %v\n", Nconv(Curfn, 0), Nconv(n, 0), n, n.Class, p)
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		fmt.Printf("\t%v (%p; class=%d)\n", Nconv(l.N, 0), l.N, l.N.Class)
 	}
 	Yyerror("checkparam: invariant lost")
@@ -955,13 +905,11 @@ func checkprog(fn *Node, p *obj.Prog) {
 // nodes and there are special cases to skip over that stuff.  The analysis will
 // fail if this invariant blindly changes.
 func checkptxt(fn *Node, firstp *obj.Prog) {
-	var p *obj.Prog
-
 	if debuglive == 0 {
 		return
 	}
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if false {
 			fmt.Printf("analyzing '%v'\n", p)
 		}
@@ -976,11 +924,6 @@ func checkptxt(fn *Node, firstp *obj.Prog) {
 // the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, twobitwalktype1
 // accounts for 40% of the 6g execution time.
 func twobitwalktype1(t *Type, xoffset *int64, bv *Bvec) {
-	var fieldoffset int64
-	var i int64
-	var o int64
-	var t1 *Type
-
 	if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
 		Fatal("twobitwalktype1: invalid initial alignment, %v", Tconv(t, 0))
 	}
@@ -1002,7 +945,7 @@ func twobitwalktype1(t *Type, xoffset *int64, bv *Bvec) {
 		TFLOAT64,
 		TCOMPLEX64,
 		TCOMPLEX128:
-		for i = 0; i < t.Width; i++ {
+		for i := int64(0); i < t.Width; i++ {
 			bvset(bv, int32(((*xoffset+i)/int64(Widthptr))*obj.BitsPerPointer)) // 1 = live scalar (BitsScalar)
 		}
 
@@ -1053,14 +996,15 @@ func twobitwalktype1(t *Type, xoffset *int64, bv *Bvec) {
 			bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
 			*xoffset += t.Width
 		} else {
-			for i = 0; i < t.Bound; i++ {
+			for i := int64(0); i < t.Bound; i++ {
 				twobitwalktype1(t.Type, xoffset, bv)
 			}
 		}
 
 	case TSTRUCT:
-		o = 0
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		o := int64(0)
+		var fieldoffset int64
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			fieldoffset = t1.Width
 			*xoffset += fieldoffset - o
 			twobitwalktype1(t1.Type, xoffset, bv)
@@ -1089,12 +1033,9 @@ func argswords() int32 {
 // argument is an array of Node*s.
 func twobitlivepointermap(lv *Liveness, liveout *Bvec, vars []*Node, args *Bvec, locals *Bvec) {
 	var node *Node
-	var thisargtype *Type
-	var inargtype *Type
 	var xoffset int64
-	var i int32
 
-	for i = 0; ; i++ {
+	for i := int32(0); ; i++ {
 		i = int32(bvnext(liveout, i))
 		if i < 0 {
 			break
@@ -1116,14 +1057,14 @@ func twobitlivepointermap(lv *Liveness, liveout *Bvec, vars []*Node, args *Bvec,
 	// If the receiver or arguments are unnamed, they will be omitted
 	// from the list above. Preserve those values - even though they are unused -
 	// in order to keep their addresses live for use in stack traces.
-	thisargtype = getthisx(lv.fn.Type)
+	thisargtype := getthisx(lv.fn.Type)
 
 	if thisargtype != nil {
 		xoffset = 0
 		twobitwalktype1(thisargtype, &xoffset, args)
 	}
 
-	inargtype = getinargx(lv.fn.Type)
+	inargtype := getinargx(lv.fn.Type)
 	if inargtype != nil {
 		xoffset = 0
 		twobitwalktype1(inargtype, &xoffset, args)
@@ -1132,9 +1073,7 @@ func twobitlivepointermap(lv *Liveness, liveout *Bvec, vars []*Node, args *Bvec,
 
 // Construct a disembodied instruction.
 func unlinkedprog(as int) *obj.Prog {
-	var p *obj.Prog
-
-	p = Ctxt.NewProg()
+	p := Ctxt.NewProg()
 	Clearp(p)
 	p.As = int16(as)
 	return p
@@ -1145,11 +1084,10 @@ func unlinkedprog(as int) *obj.Prog {
 func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
 	var from Node
 	var to Node
-	var pcdata *obj.Prog
 
 	Nodconst(&from, Types[TINT32], obj.PCDATA_StackMapIndex)
 	Nodconst(&to, Types[TINT32], int64(index))
-	pcdata = unlinkedprog(obj.APCDATA)
+	pcdata := unlinkedprog(obj.APCDATA)
 	pcdata.Lineno = prog.Lineno
 	Naddr(&from, &pcdata.From, 0)
 	Naddr(&to, &pcdata.To, 0)
@@ -1167,18 +1105,13 @@ func issafepoint(prog *obj.Prog) bool {
 // block
 func livenessprologue(lv *Liveness) {
 	var bb *BasicBlock
-	var uevar *Bvec
-	var varkill *Bvec
-	var avarinit *Bvec
 	var p *obj.Prog
-	var i int32
-	var nvars int32
 
-	nvars = int32(len(lv.vars))
-	uevar = bvalloc(nvars)
-	varkill = bvalloc(nvars)
-	avarinit = bvalloc(nvars)
-	for i = 0; i < int32(len(lv.cfg)); i++ {
+	nvars := int32(len(lv.vars))
+	uevar := bvalloc(nvars)
+	varkill := bvalloc(nvars)
+	avarinit := bvalloc(nvars)
+	for i := int32(0); i < int32(len(lv.cfg)); i++ {
 		bb = lv.cfg[i]
 
 		// Walk the block instructions backward and update the block
@@ -1214,29 +1147,20 @@ func livenessprologue(lv *Liveness) {
 // Solve the liveness dataflow equations.
 func livenesssolve(lv *Liveness) {
 	var bb *BasicBlock
-	var succ *BasicBlock
-	var pred *BasicBlock
-	var newlivein *Bvec
-	var newliveout *Bvec
-	var any *Bvec
-	var all *Bvec
 	var rpo int32
-	var i int32
-	var j int32
-	var change int32
 
 	// These temporary bitvectors exist to avoid successive allocations and
 	// frees within the loop.
-	newlivein = bvalloc(int32(len(lv.vars)))
+	newlivein := bvalloc(int32(len(lv.vars)))
 
-	newliveout = bvalloc(int32(len(lv.vars)))
-	any = bvalloc(int32(len(lv.vars)))
-	all = bvalloc(int32(len(lv.vars)))
+	newliveout := bvalloc(int32(len(lv.vars)))
+	any := bvalloc(int32(len(lv.vars)))
+	all := bvalloc(int32(len(lv.vars)))
 
 	// Push avarinitall, avarinitany forward.
 	// avarinitall says the addressed var is initialized along all paths reaching the block exit.
 	// avarinitany says the addressed var is initialized along some path reaching the block exit.
-	for i = 0; i < int32(len(lv.cfg)); i++ {
+	for i := int32(0); i < int32(len(lv.cfg)); i++ {
 		bb = lv.cfg[i]
 		rpo = int32(bb.rpo)
 		if i == 0 {
@@ -1249,7 +1173,10 @@ func livenesssolve(lv *Liveness) {
 		bvcopy(lv.avarinitany[rpo], lv.avarinit[rpo])
 	}
 
-	change = 1
+	change := int32(1)
+	var j int32
+	var i int32
+	var pred *BasicBlock
 	for change != 0 {
 		change = 0
 		for i = 0; i < int32(len(lv.cfg)); i++ {
@@ -1289,6 +1216,7 @@ func livenesssolve(lv *Liveness) {
 	// so low that it hardly seems to be worth the complexity.
 	change = 1
 
+	var succ *BasicBlock
 	for change != 0 {
 		change = 0
 
@@ -1328,19 +1256,17 @@ func livenesssolve(lv *Liveness) {
 // This function is slow but it is only used for generating debug prints.
 // Check whether n is marked live in args/locals.
 func islive(n *Node, args *Bvec, locals *Bvec) bool {
-	var i int
-
 	switch n.Class {
 	case PPARAM,
 		PPARAMOUT:
-		for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
+		for i := 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
 			if bvget(args, int32(n.Xoffset/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
 				return true
 			}
 		}
 
 	case PAUTO:
-		for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
+		for i := 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
 			if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
 				return true
 			}
@@ -1355,44 +1281,28 @@ func islive(n *Node, args *Bvec, locals *Bvec) bool {
 func livenessepilogue(lv *Liveness) {
 	var bb *BasicBlock
 	var pred *BasicBlock
-	var ambig *Bvec
-	var livein *Bvec
-	var liveout *Bvec
-	var uevar *Bvec
-	var varkill *Bvec
 	var args *Bvec
 	var locals *Bvec
-	var avarinit *Bvec
-	var any *Bvec
-	var all *Bvec
 	var n *Node
 	var p *obj.Prog
-	var next *obj.Prog
-	var i int32
 	var j int32
-	var numlive int32
-	var startmsg int32
-	var nmsg int32
-	var nvars int32
 	var pos int32
 	var xoffset int64
-	var msg []string
-	var fmt_ string
 
-	nvars = int32(len(lv.vars))
-	livein = bvalloc(nvars)
-	liveout = bvalloc(nvars)
-	uevar = bvalloc(nvars)
-	varkill = bvalloc(nvars)
-	avarinit = bvalloc(nvars)
-	any = bvalloc(nvars)
-	all = bvalloc(nvars)
-	ambig = bvalloc(localswords() * obj.BitsPerPointer)
-	msg = nil
-	nmsg = 0
-	startmsg = 0
-
-	for i = 0; i < int32(len(lv.cfg)); i++ {
+	nvars := int32(len(lv.vars))
+	livein := bvalloc(nvars)
+	liveout := bvalloc(nvars)
+	uevar := bvalloc(nvars)
+	varkill := bvalloc(nvars)
+	avarinit := bvalloc(nvars)
+	any := bvalloc(nvars)
+	all := bvalloc(nvars)
+	ambig := bvalloc(localswords() * obj.BitsPerPointer)
+	msg := []string(nil)
+	nmsg := int32(0)
+	startmsg := int32(0)
+
+	for i := int32(0); i < int32(len(lv.cfg)); i++ {
 		bb = lv.cfg[i]
 
 		// Compute avarinitany and avarinitall for entry to block.
@@ -1481,7 +1391,10 @@ func livenessepilogue(lv *Liveness) {
 		bb.lastbitmapindex = len(lv.livepointers) - 1
 	}
 
-	for i = 0; i < int32(len(lv.cfg)); i++ {
+	var fmt_ string
+	var next *obj.Prog
+	var numlive int32
+	for i := int32(0); i < int32(len(lv.cfg)); i++ {
 		bb = lv.cfg[i]
 
 		if debuglive >= 1 && Curfn.Nname.Sym.Name != "init" && Curfn.Nname.Sym.Name[0] != '.' {
@@ -1627,12 +1540,10 @@ const (
 )
 
 func hashbitmap(h uint32, bv *Bvec) uint32 {
-	var i int
-	var n int
 	var w uint32
 
-	n = int((bv.n + 31) / 32)
-	for i = 0; i < n; i++ {
+	n := int((bv.n + 31) / 32)
+	for i := 0; i < n; i++ {
 		w = bv.b[i]
 		h = (h * Hp) ^ (w & 0xff)
 		h = (h * Hp) ^ ((w >> 8) & 0xff)
@@ -1658,45 +1569,37 @@ func hashbitmap(h uint32, bv *Bvec) uint32 {
 // PCDATA tables cost about 100k. So for now we keep using a single index for
 // both bitmap lists.
 func livenesscompact(lv *Liveness) {
-	var table []int
-	var remap []int
-	var i int
-	var j int
-	var n int
-	var tablesize int
-	var uniq int
-	var h uint32
-	var local *Bvec
-	var arg *Bvec
-	var jlocal *Bvec
-	var jarg *Bvec
-	var p *obj.Prog
-
 	// Linear probing hash table of bitmaps seen so far.
 	// The hash table has 4n entries to keep the linear
 	// scan short. An entry of -1 indicates an empty slot.
-	n = len(lv.livepointers)
+	n := len(lv.livepointers)
 
-	tablesize = 4 * n
-	table = make([]int, tablesize)
+	tablesize := 4 * n
+	table := make([]int, tablesize)
 	for i := range table {
 		table[i] = -1
 	}
 
 	// remap[i] = the new index of the old bit vector #i.
-	remap = make([]int, n)
+	remap := make([]int, n)
 
 	for i := range remap {
 		remap[i] = -1
 	}
-	uniq = 0 // unique tables found so far
+	uniq := 0 // unique tables found so far
 
 	// Consider bit vectors in turn.
 	// If new, assign next number using uniq,
 	// record in remap, record in lv->livepointers and lv->argslivepointers
 	// under the new index, and add entry to hash table.
 	// If already seen, record earlier index in remap and free bitmaps.
-	for i = 0; i < n; i++ {
+	var jarg *Bvec
+	var j int
+	var h uint32
+	var arg *Bvec
+	var jlocal *Bvec
+	var local *Bvec
+	for i := 0; i < n; i++ {
 		local = lv.livepointers[i]
 		arg = lv.argslivepointers[i]
 		h = hashbitmap(hashbitmap(H0, local), arg) % uint32(tablesize)
@@ -1732,13 +1635,14 @@ func livenesscompact(lv *Liveness) {
 	// we don't need anymore. Clear the pointers later in the
 	// array so that we can tell where the coalesced bitmaps stop
 	// and so that we don't double-free when cleaning up.
-	for j = uniq; j < n; j++ {
+	for j := uniq; j < n; j++ {
 		lv.livepointers[j] = nil
 		lv.argslivepointers[j] = nil
 	}
 
 	// Rewrite PCDATA instructions to use new numbering.
-	for p = lv.ptxt; p != nil; p = p.Link {
+	var i int
+	for p := lv.ptxt; p != nil; p = p.Link {
 		if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
 			i = int(p.To.Offset)
 			if i >= 0 {
@@ -1749,12 +1653,10 @@ func livenesscompact(lv *Liveness) {
 }
 
 func printbitset(printed int, name string, vars []*Node, bits *Bvec) int {
-	var i int
-	var started int
 	var n *Node
 
-	started = 0
-	for i = 0; i < len(vars); i++ {
+	started := 0
+	for i := 0; i < len(vars); i++ {
 		if bvget(bits, int32(i)) == 0 {
 			continue
 		}
@@ -1782,27 +1684,22 @@ func printbitset(printed int, name string, vars []*Node, bits *Bvec) int {
 // This format synthesizes the information used during the multiple passes
 // into a single presentation.
 func livenessprintdebug(lv *Liveness) {
-	var i int
 	var j int
-	var pcdata int
 	var printed int
 	var bb *BasicBlock
 	var p *obj.Prog
-	var uevar *Bvec
-	var varkill *Bvec
-	var avarinit *Bvec
 	var args *Bvec
 	var locals *Bvec
 	var n *Node
 
 	fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
 
-	uevar = bvalloc(int32(len(lv.vars)))
-	varkill = bvalloc(int32(len(lv.vars)))
-	avarinit = bvalloc(int32(len(lv.vars)))
+	uevar := bvalloc(int32(len(lv.vars)))
+	varkill := bvalloc(int32(len(lv.vars)))
+	avarinit := bvalloc(int32(len(lv.vars)))
 
-	pcdata = 0
-	for i = 0; i < len(lv.cfg); i++ {
+	pcdata := 0
+	for i := 0; i < len(lv.cfg); i++ {
 		if i > 0 {
 			fmt.Printf("\n")
 		}
@@ -1898,17 +1795,14 @@ func livenessprintdebug(lv *Liveness) {
 // words that are followed are the raw bitmap words.  The arr argument is an
 // array of Node*s.
 func twobitwritesymbol(arr []*Bvec, sym *Sym) {
-	var bv *Bvec
-	var off int
 	var i int
 	var j int
-	var n int
 	var word uint32
 
-	n = len(arr)
-	off = 0
+	n := len(arr)
+	off := 0
 	off += 4 // number of bitmaps, to fill in later
-	bv = arr[0]
+	bv := arr[0]
 	off = duint32(sym, off, uint32(bv.n)) // number of bits in each bitmap
 	for i = 0; i < n; i++ {
 		// bitmap words
@@ -1944,14 +1838,8 @@ func printprog(p *obj.Prog) {
 // the liveness of pointer variables in the function, and emits a runtime data
 // structure read by the garbage collector.
 func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
-	var cfg []*BasicBlock
-	var vars []*Node
-	var lv *Liveness
-	var debugdelta int
-	var l *NodeList
-
 	// Change name to dump debugging information only for a specific function.
-	debugdelta = 0
+	debugdelta := 0
 
 	if Curfn.Nname.Sym.Name == "!" {
 		debugdelta = 2
@@ -1966,13 +1854,13 @@ func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
 	checkptxt(fn, firstp)
 
 	// Construct the global liveness state.
-	cfg = newcfg(firstp)
+	cfg := newcfg(firstp)
 
 	if debuglive >= 3 {
 		printcfg([]*BasicBlock(cfg))
 	}
-	vars = getvariables(fn)
-	lv = newliveness(fn, firstp, cfg, vars)
+	vars := getvariables(fn)
+	lv := newliveness(fn, firstp, cfg, vars)
 
 	// Run the dataflow framework.
 	livenessprologue(lv)
@@ -2000,7 +1888,7 @@ func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
 	twobitwritesymbol(lv.argslivepointers, argssym)
 
 	// Free everything.
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		if l.N != nil {
 			l.N.Opt = nil
 		}
diff --git a/src/cmd/internal/gc/popt.go b/src/cmd/internal/gc/popt.go
index 8a3601b8659d07826793dd116ca52179b7fad53a..756f8ebd9404f69455d57affa3aecc2b465122f8 100644
--- a/src/cmd/internal/gc/popt.go
+++ b/src/cmd/internal/gc/popt.go
@@ -180,9 +180,6 @@ void proginfo(ProgInfo*, Prog*);
 var noreturn_symlist [10]*Sym
 
 func Noreturn(p *obj.Prog) bool {
-	var s *Sym
-	var i int
-
 	if noreturn_symlist[0] == nil {
 		noreturn_symlist[0] = Pkglookup("panicindex", Runtimepkg)
 		noreturn_symlist[1] = Pkglookup("panicslice", Runtimepkg)
@@ -197,11 +194,11 @@ func Noreturn(p *obj.Prog) bool {
 	if p.To.Node == nil {
 		return false
 	}
-	s = ((p.To.Node).(*Node)).Sym
+	s := ((p.To.Node).(*Node)).Sym
 	if s == nil {
 		return false
 	}
-	for i = 0; noreturn_symlist[i] != nil; i++ {
+	for i := 0; noreturn_symlist[i] != nil; i++ {
 		if s == noreturn_symlist[i] {
 			return true
 		}
@@ -219,9 +216,7 @@ func Noreturn(p *obj.Prog) bool {
 
 /* what instruction does a JMP to p eventually land on? */
 func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
-	var n int
-
-	n = 0
+	n := 0
 	for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
 		n++
 		if n > 10 {
@@ -244,9 +239,7 @@ var dead interface{} = 1
 
 /* mark all code reachable from firstp as alive */
 func mark(firstp *obj.Prog) {
-	var p *obj.Prog
-
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if p.Opt != dead {
 			break
 		}
@@ -261,18 +254,14 @@ func mark(firstp *obj.Prog) {
 }
 
 func fixjmp(firstp *obj.Prog) {
-	var jmploop int
-	var p *obj.Prog
-	var last *obj.Prog
-
 	if Debug['R'] != 0 && Debug['v'] != 0 {
 		fmt.Printf("\nfixjmp\n")
 	}
 
 	// pass 1: resolve jump to jump, mark all code as dead.
-	jmploop = 0
+	jmploop := 0
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if Debug['R'] != 0 && Debug['v'] != 0 {
 			fmt.Printf("%v\n", p)
 		}
@@ -294,9 +283,9 @@ func fixjmp(firstp *obj.Prog) {
 	mark(firstp)
 
 	// pass 3: delete dead code (mostly JMPs).
-	last = nil
+	last := (*obj.Prog)(nil)
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		if p.Opt == dead {
 			if p.Link == nil && p.As == obj.ARET && last != nil && last.As != obj.ARET {
 				// This is the final ARET, and the code so far doesn't have one.
@@ -326,8 +315,8 @@ func fixjmp(firstp *obj.Prog) {
 	// pass 4: elide JMP to next instruction.
 	// only safe if there are no jumps to JMPs anymore.
 	if jmploop == 0 {
-		last = nil
-		for p = firstp; p != nil; p = p.Link {
+		last := (*obj.Prog)(nil)
+		for p := firstp; p != nil; p = p.Link {
 			if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch == p.Link {
 				if Debug['R'] != 0 && Debug['v'] != 0 {
 					fmt.Printf("del %v\n", p)
@@ -346,7 +335,7 @@ func fixjmp(firstp *obj.Prog) {
 
 	if Debug['R'] != 0 && Debug['v'] != 0 {
 		fmt.Printf("\n")
-		for p = firstp; p != nil; p = p.Link {
+		for p := firstp; p != nil; p = p.Link {
 			fmt.Printf("%v\n", p)
 		}
 		fmt.Printf("\n")
@@ -374,20 +363,12 @@ func fixjmp(firstp *obj.Prog) {
 // If size == 0, f->data will be nil.
 
 func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
-	var id int
-	var nf int
-	var f *Flow
-	var f1 *Flow
-	var start *Flow
-	var last *Flow
-	var graph *Graph
-	var p *obj.Prog
 	var info ProgInfo
 
 	// Count and mark instructions to annotate.
-	nf = 0
+	nf := 0
 
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		p.Opt = nil // should be already, but just in case
 		Thearch.Proginfo(&info, p)
 		if info.Flags&Skip != 0 {
@@ -407,11 +388,12 @@ func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
 	}
 
 	// Allocate annotations and assign to instructions.
-	graph = new(Graph)
+	graph := new(Graph)
 	ff := make([]Flow, nf)
-	start = &ff[0]
-	id = 0
-	for p = firstp; p != nil; p = p.Link {
+	start := &ff[0]
+	id := 0
+	var last *Flow
+	for p := firstp; p != nil; p = p.Link {
 		if p.Opt == nil {
 			continue
 		}
@@ -431,7 +413,9 @@ func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
 	}
 
 	// Fill in pred/succ information.
-	for f = start; f != nil; f = f.Link {
+	var f1 *Flow
+	var p *obj.Prog
+	for f := start; f != nil; f = f.Link {
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
 		if info.Flags&Break == 0 {
@@ -465,9 +449,7 @@ func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
 }
 
 func Flowend(graph *Graph) {
-	var f *Flow
-
-	for f = graph.Start; f != nil; f = f.Link {
+	for f := graph.Start; f != nil; f = f.Link {
 		f.Prog.Opt = nil
 	}
 }
@@ -488,10 +470,8 @@ func Flowend(graph *Graph) {
  *	recursively, all preds with a greater rpo number are in the loop
  */
 func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
-	var r1 *Flow
-
 	r.Rpo = 1
-	r1 = r.S1
+	r1 := r.S1
 	if r1 != nil && r1.Rpo == 0 {
 		n = postorder(r1, rpo2r, n)
 	}
@@ -505,11 +485,10 @@ func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
 }
 
 func rpolca(idom []int32, rpo1 int32, rpo2 int32) int32 {
-	var t int32
-
 	if rpo1 == -1 {
 		return rpo2
 	}
+	var t int32
 	for rpo1 != rpo2 {
 		if rpo1 > rpo2 {
 			t = rpo2
@@ -537,9 +516,7 @@ func doms(idom []int32, r int32, s int32) bool {
 }
 
 func loophead(idom []int32, r *Flow) bool {
-	var src int32
-
-	src = r.Rpo
+	src := r.Rpo
 	if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
 		return true
 	}
@@ -566,40 +543,34 @@ func loopmark(rpo2r **Flow, head int32, r *Flow) {
 }
 
 func flowrpo(g *Graph) {
-	var r1 *Flow
-	var i int32
-	var d int32
-	var me int32
-	var nr int32
-	var idom []int32
-	var rpo2r []*Flow
-
 	g.Rpo = make([]*Flow, g.Num)
-	idom = make([]int32, g.Num)
+	idom := make([]int32, g.Num)
 
-	for r1 = g.Start; r1 != nil; r1 = r1.Link {
+	for r1 := g.Start; r1 != nil; r1 = r1.Link {
 		r1.Active = 0
 	}
 
-	rpo2r = g.Rpo
-	d = postorder(g.Start, rpo2r, 0)
-	nr = int32(g.Num)
+	rpo2r := g.Rpo
+	d := postorder(g.Start, rpo2r, 0)
+	nr := int32(g.Num)
 	if d > nr {
 		Fatal("too many reg nodes %d %d", d, nr)
 	}
 	nr = d
-	for i = 0; i < nr/2; i++ {
+	var r1 *Flow
+	for i := int32(0); i < nr/2; i++ {
 		r1 = rpo2r[i]
 		rpo2r[i] = rpo2r[nr-1-i]
 		rpo2r[nr-1-i] = r1
 	}
 
-	for i = 0; i < nr; i++ {
+	for i := int32(0); i < nr; i++ {
 		rpo2r[i].Rpo = i
 	}
 
 	idom[0] = 0
-	for i = 0; i < nr; i++ {
+	var me int32
+	for i := int32(0); i < nr; i++ {
 		r1 = rpo2r[i]
 		me = r1.Rpo
 		d = -1
@@ -617,7 +588,7 @@ func flowrpo(g *Graph) {
 		idom[i] = d
 	}
 
-	for i = 0; i < nr; i++ {
+	for i := int32(0); i < nr; i++ {
 		r1 = rpo2r[i]
 		r1.Loop++
 		if r1.P2 != nil && loophead(idom, r1) {
@@ -625,15 +596,13 @@ func flowrpo(g *Graph) {
 		}
 	}
 
-	for r1 = g.Start; r1 != nil; r1 = r1.Link {
+	for r1 := g.Start; r1 != nil; r1 = r1.Link {
 		r1.Active = 0
 	}
 }
 
 func Uniqp(r *Flow) *Flow {
-	var r1 *Flow
-
-	r1 = r.P1
+	r1 := r.P1
 	if r1 == nil {
 		r1 = r.P2
 		if r1 == nil || r1.P2link != nil {
@@ -646,9 +615,7 @@ func Uniqp(r *Flow) *Flow {
 }
 
 func Uniqs(r *Flow) *Flow {
-	var r1 *Flow
-
-	r1 = r.S1
+	r1 := r.S1
 	if r1 == nil {
 		r1 = r.S2
 		if r1 == nil {
@@ -691,11 +658,8 @@ func (x startcmp) Swap(i, j int) {
 }
 
 func (x startcmp) Less(i, j int) bool {
-	var a *TempVar
-	var b *TempVar
-
-	a = x[i]
-	b = x[j]
+	a := x[i]
+	b := x[j]
 
 	if a.start < b.start {
 		return true
@@ -723,48 +687,28 @@ func canmerge(n *Node) bool {
 }
 
 func mergetemp(firstp *obj.Prog) {
-	var i int
-	var j int
-	var nvar int
-	var ninuse int
-	var nfree int
-	var nkill int
-	var var_ []TempVar
-	var v *TempVar
-	var v1 *TempVar
-	var bystart []*TempVar
-	var inuse []*TempVar
-	var f *Flow
-	var l *NodeList
-	var lp **NodeList
-	var n *Node
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var t *Type
-	var info ProgInfo
-	var info1 ProgInfo
-	var gen int32
-	var g *Graph
 	const (
 		debugmerge = 1
 	)
 
-	g = Flowstart(firstp, nil)
+	g := Flowstart(firstp, nil)
 	if g == nil {
 		return
 	}
 
 	// Build list of all mergeable variables.
-	nvar = 0
-	for l = Curfn.Dcl; l != nil; l = l.Next {
+	nvar := 0
+	for l := Curfn.Dcl; l != nil; l = l.Next {
 		if canmerge(l.N) {
 			nvar++
 		}
 	}
 
-	var_ = make([]TempVar, nvar)
+	var_ := make([]TempVar, nvar)
 	nvar = 0
-	for l = Curfn.Dcl; l != nil; l = l.Next {
+	var n *Node
+	var v *TempVar
+	for l := Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
 		if canmerge(n) {
 			v = &var_[nvar]
@@ -778,7 +722,9 @@ func mergetemp(firstp *obj.Prog) {
 	// We assume that the earliest reference to a temporary is its definition.
 	// This is not true of variables in general but our temporaries are all
 	// single-use (that's why we have so many!).
-	for f = g.Start; f != nil; f = f.Link {
+	var p *obj.Prog
+	var info ProgInfo
+	for f := g.Start; f != nil; f = f.Link {
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
 
@@ -812,10 +758,13 @@ func mergetemp(firstp *obj.Prog) {
 		Dumpit("before", g.Start, 0)
 	}
 
-	nkill = 0
+	nkill := 0
 
 	// Special case.
-	for i = 0; i < len(var_); i++ {
+	var p1 *obj.Prog
+	var info1 ProgInfo
+	var f *Flow
+	for i := 0; i < len(var_); i++ {
 		v = &var_[i]
 		if v.addr != 0 {
 			continue
@@ -868,9 +817,9 @@ func mergetemp(firstp *obj.Prog) {
 	// Traverse live range of each variable to set start, end.
 	// Each flood uses a new value of gen so that we don't have
 	// to clear all the r->active words after each variable.
-	gen = 0
+	gen := int32(0)
 
-	for i = 0; i < len(var_); i++ {
+	for i := 0; i < len(var_); i++ {
 		v = &var_[i]
 		gen++
 		for f = v.use; f != nil; f = f.Data.(*Flow) {
@@ -885,9 +834,9 @@ func mergetemp(firstp *obj.Prog) {
 	}
 
 	// Sort variables by start.
-	bystart = make([]*TempVar, len(var_))
+	bystart := make([]*TempVar, len(var_))
 
-	for i = 0; i < len(var_); i++ {
+	for i := 0; i < len(var_); i++ {
 		bystart[i] = &var_[i]
 	}
 	sort.Sort(startcmp(bystart[:len(var_)]))
@@ -898,11 +847,14 @@ func mergetemp(firstp *obj.Prog) {
 	// In theory we should use a sorted tree so that insertions are
 	// guaranteed O(log n) and then the loop is guaranteed O(n log n).
 	// In practice, it doesn't really matter.
-	inuse = make([]*TempVar, len(var_))
+	inuse := make([]*TempVar, len(var_))
 
-	ninuse = 0
-	nfree = len(var_)
-	for i = 0; i < len(var_); i++ {
+	ninuse := 0
+	nfree := len(var_)
+	var t *Type
+	var v1 *TempVar
+	var j int
+	for i := 0; i < len(var_); i++ {
 		v = bystart[i]
 		if debugmerge > 0 && Debug['v'] != 0 {
 			fmt.Printf("consider %v: removed=%d\n", Nconv(v.node, obj.FmtSharp), v.removed)
@@ -965,7 +917,8 @@ func mergetemp(firstp *obj.Prog) {
 
 	if debugmerge > 0 && Debug['v'] != 0 {
 		fmt.Printf("%v [%d - %d]\n", Sconv(Curfn.Nname.Sym, 0), len(var_), nkill)
-		for i = 0; i < len(var_); i++ {
+		var v *TempVar
+		for i := 0; i < len(var_); i++ {
 			v = &var_[i]
 			fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), Tconv(v.node.Type, 0), v.start, v.end)
 			if v.addr != 0 {
@@ -989,7 +942,7 @@ func mergetemp(firstp *obj.Prog) {
 	}
 
 	// Update node references to use merged temporaries.
-	for f = g.Start; f != nil; f = f.Link {
+	for f := g.Start; f != nil; f = f.Link {
 		p = f.Prog
 		n, _ = p.From.Node.(*Node)
 		if n != nil {
@@ -1008,7 +961,8 @@ func mergetemp(firstp *obj.Prog) {
 	}
 
 	// Delete merged nodes from declaration list.
-	for lp = &Curfn.Dcl; ; {
+	var l *NodeList
+	for lp := &Curfn.Dcl; ; {
 		l = *lp
 		if l == nil {
 			break
@@ -1026,7 +980,7 @@ func mergetemp(firstp *obj.Prog) {
 	}
 
 	// Clear aux structures.
-	for i = 0; i < len(var_); i++ {
+	for i := 0; i < len(var_); i++ {
 		var_[i].node.Opt = nil
 	}
 
@@ -1036,8 +990,6 @@ func mergetemp(firstp *obj.Prog) {
 func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
 	var p *obj.Prog
 	var f1 *Flow
-	var f *Flow
-	var f2 *Flow
 
 	for f1 = f0; f1 != nil; f1 = f1.P1 {
 		if uint32(f1.Active) == gen {
@@ -1054,7 +1006,8 @@ func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
 		}
 	}
 
-	for f = f0; f != f1; f = f.P1 {
+	var f2 *Flow
+	for f := f0; f != f1; f = f.P1 {
 		for f2 = f.P2; f2 != nil; f2 = f2.P2link {
 			mergewalk(v, f2, gen)
 		}
@@ -1064,7 +1017,6 @@ func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
 func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
 	var p *obj.Prog
 	var f1 *Flow
-	var f *Flow
 
 	for f1 = f0; f1 != nil; f1 = f1.S1 {
 		if uint32(f1.Active) == gen {
@@ -1083,7 +1035,7 @@ func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
 		}
 	}
 
-	for f = f0; f != f1; f = f.S1 {
+	for f := f0; f != f1; f = f.S1 {
 		varkillwalk(v, f.S2, gen)
 	}
 }
@@ -1107,13 +1059,7 @@ type NilVar struct {
 var killed int // f->data is either nil or &killed
 
 func nilopt(firstp *obj.Prog) {
-	var f *Flow
-	var p *obj.Prog
-	var g *Graph
-	var ncheck int
-	var nkill int
-
-	g = Flowstart(firstp, nil)
+	g := Flowstart(firstp, nil)
 	if g == nil {
 		return
 	}
@@ -1122,9 +1068,10 @@ func nilopt(firstp *obj.Prog) {
 		Dumpit("nilopt", g.Start, 0)
 	}
 
-	ncheck = 0
-	nkill = 0
-	for f = g.Start; f != nil; f = f.Link {
+	ncheck := 0
+	nkill := 0
+	var p *obj.Prog
+	for f := g.Start; f != nil; f = f.Link {
 		p = f.Prog
 		if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
 			continue
@@ -1155,7 +1102,7 @@ func nilopt(firstp *obj.Prog) {
 		}
 	}
 
-	for f = g.Start; f != nil; f = f.Link {
+	for f := g.Start; f != nil; f = f.Link {
 		if f.Data != nil {
 			nkill++
 			Thearch.Excise(f)
@@ -1172,9 +1119,8 @@ func nilopt(firstp *obj.Prog) {
 func nilwalkback(fcheck *Flow) {
 	var p *obj.Prog
 	var info ProgInfo
-	var f *Flow
 
-	for f = fcheck; f != nil; f = Uniqp(f) {
+	for f := fcheck; f != nil; f = Uniqp(f) {
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
 		if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
@@ -1231,8 +1177,6 @@ for(f = f0; f != f1; f = f->p1)
 		nilwalkback(fcheck, f2, gen);
 */
 func nilwalkfwd(fcheck *Flow) {
-	var f *Flow
-	var last *Flow
 	var p *obj.Prog
 	var info ProgInfo
 
@@ -1243,9 +1187,9 @@ func nilwalkfwd(fcheck *Flow) {
 	// avoid problems like:
 	//	_ = *x // should panic
 	//	for {} // no writes but infinite loop may be considered visible
-	last = nil
+	last := (*Flow)(nil)
 
-	for f = Uniqs(fcheck); f != nil; f = Uniqs(f) {
+	for f := Uniqs(fcheck); f != nil; f = Uniqs(f) {
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
 
diff --git a/src/cmd/internal/gc/racewalk.go b/src/cmd/internal/gc/racewalk.go
index 582f6b42db84415a6c2670f9218c70f7a050813e..3619fed28048e2f466eed44dada4ce9eb2d68f6d 100644
--- a/src/cmd/internal/gc/racewalk.go
+++ b/src/cmd/internal/gc/racewalk.go
@@ -31,10 +31,8 @@ var omit_pkgs = []string{"runtime", "runtime/race"}
 var noinst_pkgs = []string{"sync", "sync/atomic"}
 
 func ispkgin(pkgs []string) bool {
-	var i int
-
 	if myimportpath != "" {
-		for i = 0; i < len(pkgs); i++ {
+		for i := 0; i < len(pkgs); i++ {
 			if myimportpath == pkgs[i] {
 				return true
 			}
@@ -54,10 +52,6 @@ func isforkfunc(fn *Node) bool {
 }
 
 func racewalk(fn *Node) {
-	var nd *Node
-	var nodpc *Node
-	var s string
-
 	if ispkgin(omit_pkgs) || isforkfunc(fn) {
 		return
 	}
@@ -72,18 +66,18 @@ func racewalk(fn *Node) {
 	// nodpc is the PC of the caller as extracted by
 	// getcallerpc. We use -widthptr(FP) for x86.
 	// BUG: this will not work on arm.
-	nodpc = Nod(OXXX, nil, nil)
+	nodpc := Nod(OXXX, nil, nil)
 
 	*nodpc = *nodfp
 	nodpc.Type = Types[TUINTPTR]
 	nodpc.Xoffset = int64(-Widthptr)
-	nd = mkcall("racefuncenter", nil, nil, nodpc)
+	nd := mkcall("racefuncenter", nil, nil, nodpc)
 	fn.Enter = concat(list1(nd), fn.Enter)
 	nd = mkcall("racefuncexit", nil, nil)
 	fn.Exit = list(fn.Exit, nd)
 
 	if Debug['W'] != 0 {
-		s = fmt.Sprintf("after racewalk %v", Sconv(fn.Nname.Sym, 0))
+		s := fmt.Sprintf("after racewalk %v", Sconv(fn.Nname.Sym, 0))
 		dumplist(s, fn.Nbody)
 		s = fmt.Sprintf("enter %v", Sconv(fn.Nname.Sym, 0))
 		dumplist(s, fn.Enter)
@@ -110,12 +104,7 @@ func racewalklist(l *NodeList, init **NodeList) {
 // walks the tree and adds calls to the
 // instrumentation code to top-level (statement) nodes' init
 func racewalknode(np **Node, init **NodeList, wr int, skip int) {
-	var n *Node
-	var n1 *Node
-	var l *NodeList
-	var fini *NodeList
-
-	n = *np
+	n := *np
 
 	if n == nil {
 		return
@@ -132,7 +121,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
 		// If init == &n->ninit and n->ninit is non-nil,
 		// racewalknode might append it to itself.
 		// nil it out and handle it separately before putting it back.
-		l = n.Ninit
+		l := n.Ninit
 
 		n.Ninit = nil
 		racewalklist(l, nil)
@@ -174,7 +163,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
 			OCALLINTER:
 			racewalknode(&n.List.N, &n.List.N.Ninit, 0, 0)
 
-			fini = nil
+			fini := (*NodeList)(nil)
 			racewalklist(n.List.Next, &fini)
 			n.List = concat(n.List, fini)
 
@@ -204,6 +193,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
 		if n.Left.Sym != nil && n.Left.Sym.Pkg == Runtimepkg && (strings.HasPrefix(n.Left.Sym.Name, "writebarrier") || n.Left.Sym.Name == "typedmemmove") {
 			// Find the dst argument.
 			// The list can be reordered, so it's not necessary just the first or the second element.
+			var l *NodeList
 			for l = n.List; l != nil; l = l.Next {
 				if n.Left.Sym.Name == "typedmemmove" {
 					if l.N.Left.Xoffset == int64(Widthptr) {
@@ -263,7 +253,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
 		OCAP:
 		racewalknode(&n.Left, init, 0, 0)
 		if Istype(n.Left.Type, TMAP) {
-			n1 = Nod(OCONVNOP, n.Left, nil)
+			n1 := Nod(OCONVNOP, n.Left, nil)
 			n1.Type = Ptrto(Types[TUINT8])
 			n1 = Nod(OIND, n1, nil)
 			typecheck(&n1, Erv)
@@ -495,15 +485,7 @@ func isartificial(n *Node) bool {
 }
 
 func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
-	var name string
-	var f *Node
-	var b *Node
-	var n *Node
-	var t *Type
-	var class int
-	var hascalls int
-
-	n = *np
+	n := *np
 
 	//print("callinstr for %+N [ %O ] etype=%E class=%d\n",
 	//	  n, n->op, n->type ? n->type->etype : -1, n->class);
@@ -511,25 +493,25 @@ func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
 	if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
 		return false
 	}
-	t = n.Type
+	t := n.Type
 	if isartificial(n) {
 		return false
 	}
 
-	b = outervalue(n)
+	b := outervalue(n)
 
 	// it skips e.g. stores to ... parameter array
 	if isartificial(b) {
 		return false
 	}
-	class = int(b.Class)
+	class := int(b.Class)
 
 	// BUG: we _may_ want to instrument PAUTO sometimes
 	// e.g. if we've got a local variable/method receiver
 	// that has got a pointer inside. Whether it points to
 	// the heap or not is impossible to know at compile time
 	if (class&PHEAP != 0) || class == PPARAMREF || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
-		hascalls = 0
+		hascalls := 0
 		foreach(n, hascallspred, &hascalls)
 		if hascalls != 0 {
 			n = detachexpr(n, init)
@@ -538,14 +520,15 @@ func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
 
 		n = treecopy(n)
 		makeaddable(n)
+		var f *Node
 		if t.Etype == TSTRUCT || Isfixedarray(t) {
-			name = "racereadrange"
+			name := "racereadrange"
 			if wr != 0 {
 				name = "racewriterange"
 			}
 			f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
 		} else {
-			name = "raceread"
+			name := "raceread"
 			if wr != 0 {
 				name = "racewrite"
 			}
@@ -592,9 +575,7 @@ func makeaddable(n *Node) {
 }
 
 func uintptraddr(n *Node) *Node {
-	var r *Node
-
-	r = Nod(OADDR, n, nil)
+	r := Nod(OADDR, n, nil)
 	r.Bounded = true
 	r = conv(r, Types[TUNSAFEPTR])
 	r = conv(r, Types[TUINTPTR])
@@ -602,18 +583,13 @@ func uintptraddr(n *Node) *Node {
 }
 
 func detachexpr(n *Node, init **NodeList) *Node {
-	var addr *Node
-	var as *Node
-	var ind *Node
-	var l *Node
-
-	addr = Nod(OADDR, n, nil)
-	l = temp(Ptrto(n.Type))
-	as = Nod(OAS, l, addr)
+	addr := Nod(OADDR, n, nil)
+	l := temp(Ptrto(n.Type))
+	as := Nod(OAS, l, addr)
 	typecheck(&as, Etop)
 	walkexpr(&as, init)
 	*init = list(*init, as)
-	ind = Nod(OIND, l, nil)
+	ind := Nod(OIND, l, nil)
 	typecheck(&ind, Erv)
 	walkexpr(&ind, init)
 	return ind
@@ -656,13 +632,11 @@ func hascallspred(n *Node, c interface{}) {
 // appendinit is like addinit in subr.c
 // but appends rather than prepends.
 func appendinit(np **Node, init *NodeList) {
-	var n *Node
-
 	if init == nil {
 		return
 	}
 
-	n = *np
+	n := *np
 	switch n.Op {
 	// There may be multiple refs to this node;
 	// introduce OCONVNOP to hold init list.
diff --git a/src/cmd/internal/gc/range.go b/src/cmd/internal/gc/range.go
index bb30bcf283f0cab6d9de01af833fdd19561e5ffb..616c859a5ad1ffb0c3aa53eff66012f0fb481a7e 100644
--- a/src/cmd/internal/gc/range.go
+++ b/src/cmd/internal/gc/range.go
@@ -12,12 +12,10 @@ import "cmd/internal/obj"
 func typecheckrange(n *Node) {
 	var toomany int
 	var why string
-	var t *Type
 	var t1 *Type
 	var t2 *Type
 	var v1 *Node
 	var v2 *Node
-	var ll *NodeList
 
 	// Typechecking order is important here:
 	// 0. first typecheck range expression (slice/map/chan),
@@ -31,13 +29,13 @@ func typecheckrange(n *Node) {
 
 	typecheck(&n.Right, Erv)
 
-	t = n.Right.Type
+	t := n.Right.Type
 	if t == nil {
 		goto out
 	}
 
 	// delicate little dance.  see typecheckas2
-	for ll = n.List; ll != nil; ll = ll.Next {
+	for ll := n.List; ll != nil; ll = ll.Next {
 		if ll.N.Defn != n {
 			typecheck(&ll.N, Erv|Easgn)
 		}
@@ -125,7 +123,7 @@ func typecheckrange(n *Node) {
 out:
 	n.Typecheck = 1
 
-	for ll = n.List; ll != nil; ll = ll.Next {
+	for ll := n.List; ll != nil; ll = ll.Next {
 		if ll.N.Typecheck == 0 {
 			typecheck(&ll.N, Erv|Easgn)
 		}
@@ -137,40 +135,17 @@ out:
 }
 
 func walkrange(n *Node) {
-	var ohv1 *Node
-	var hv1 *Node // hidden (old) val 1, 2 // hidden aggregate, iterator // hidden len, pointer // hidden bool // not hidden aggregate, val 1, 2
-	var hv2 *Node
-	var ha *Node
-	var hit *Node
-	var hn *Node
-	var hp *Node
-	var hb *Node
-	var a *Node
-	var v1 *Node
-	var v2 *Node
-	var fn *Node
-	var tmp *Node
-	var keyname *Node
-	var valname *Node
-	var key *Node
-	var val *Node
-	var body *NodeList
-	var init *NodeList
-	var th *Type
-	var t *Type
-	var lno int
+	t := n.Type
+	init := (*NodeList)(nil)
 
-	t = n.Type
-	init = nil
+	a := n.Right
+	lno := int(setlineno(a))
 
-	a = n.Right
-	lno = int(setlineno(a))
-
-	v1 = nil
+	v1 := (*Node)(nil)
 	if n.List != nil {
 		v1 = n.List.N
 	}
-	v2 = nil
+	v2 := (*Node)(nil)
 	if n.List != nil && n.List.Next != nil && !isblank(n.List.Next.N) {
 		v2 = n.List.Next.N
 	}
@@ -179,8 +154,9 @@ func walkrange(n *Node) {
 	// to avoid erroneous processing by racewalk.
 	n.List = nil
 
-	hv2 = nil
+	hv2 := (*Node)(nil)
 
+	var body *NodeList
 	switch t.Etype {
 	default:
 		Fatal("walkrange")
@@ -202,7 +178,7 @@ func walkrange(n *Node) {
 						if n.Nbody != nil {
 							if n.Nbody.N != nil { // at least one statement in body
 								if n.Nbody.Next == nil { // at most one statement in body
-									tmp = n.Nbody.N // first statement of body
+									tmp := n.Nbody.N // first statement of body
 									if tmp.Op == OAS {
 										if tmp.Left.Op == OINDEX {
 											if samesafeexpr(tmp.Left.Left, a) {
@@ -223,9 +199,9 @@ func walkrange(n *Node) {
 															n.Nincr = nil
 
 															// hp = &a[0]
-															hp = temp(Ptrto(Types[TUINT8]))
+															hp := temp(Ptrto(Types[TUINT8]))
 
-															tmp = Nod(OINDEX, a, Nodintconst(0))
+															tmp := Nod(OINDEX, a, Nodintconst(0))
 															tmp.Bounded = true
 															tmp = Nod(OADDR, tmp, nil)
 															tmp = Nod(OCONVNOP, tmp, nil)
@@ -233,7 +209,7 @@ func walkrange(n *Node) {
 															n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
 
 															// hn = len(a) * sizeof(elem(a))
-															hn = temp(Types[TUINTPTR])
+															hn := temp(Types[TUINTPTR])
 
 															tmp = Nod(OLEN, a, nil)
 															tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
@@ -241,7 +217,7 @@ func walkrange(n *Node) {
 															n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
 
 															// memclr(hp, hn)
-															fn = mkcall("memclr", nil, nil, hp, hn)
+															fn := mkcall("memclr", nil, nil, hp, hn)
 
 															n.Nbody = list(n.Nbody, fn)
 
@@ -270,17 +246,17 @@ func walkrange(n *Node) {
 		}
 
 		// orderstmt arranged for a copy of the array/slice variable if needed.
-		ha = a
+		ha := a
 
-		hv1 = temp(Types[TINT])
-		hn = temp(Types[TINT])
-		hp = nil
+		hv1 := temp(Types[TINT])
+		hn := temp(Types[TINT])
+		hp := (*Node)(nil)
 
 		init = list(init, Nod(OAS, hv1, nil))
 		init = list(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
 		if v2 != nil {
 			hp = temp(Ptrto(n.Type.Type))
-			tmp = Nod(OINDEX, ha, Nodintconst(0))
+			tmp := Nod(OINDEX, ha, Nodintconst(0))
 			tmp.Bounded = true
 			init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
 		}
@@ -292,7 +268,7 @@ func walkrange(n *Node) {
 		} else if v2 == nil {
 			body = list1(Nod(OAS, v1, hv1))
 		} else {
-			a = Nod(OAS2, nil, nil)
+			a := Nod(OAS2, nil, nil)
 			a.List = list(list1(v1), v2)
 			a.Rlist = list(list1(hv1), Nod(OIND, hp, nil))
 			body = list1(a)
@@ -305,7 +281,7 @@ func walkrange(n *Node) {
 			// Advancing during the increment ensures that the pointer p only points
 			// pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
 			// after which p is dead, so it cannot confuse the collector.
-			tmp = Nod(OADD, hp, Nodintconst(t.Type.Width))
+			tmp := Nod(OADD, hp, Nodintconst(t.Type.Width))
 
 			tmp.Type = hp.Type
 			tmp.Typecheck = 1
@@ -319,16 +295,16 @@ func walkrange(n *Node) {
 		// orderstmt allocated the iterator for us.
 	// we only use a once, so no copy needed.
 	case TMAP:
-		ha = a
+		ha := a
 
-		th = hiter(t)
-		hit = n.Alloc
+		th := hiter(t)
+		hit := n.Alloc
 		hit.Type = th
 		n.Left = nil
-		keyname = newname(th.Type.Sym)      // depends on layout of iterator struct.  See reflect.c:hiter
-		valname = newname(th.Type.Down.Sym) // ditto
+		keyname := newname(th.Type.Sym)      // depends on layout of iterator struct.  See reflect.c:hiter
+		valname := newname(th.Type.Down.Sym) // ditto
 
-		fn = syslook("mapiterinit", 1)
+		fn := syslook("mapiterinit", 1)
 
 		argtype(fn, t.Down)
 		argtype(fn, t.Type)
@@ -340,16 +316,16 @@ func walkrange(n *Node) {
 		argtype(fn, th)
 		n.Nincr = mkcall1(fn, nil, nil, Nod(OADDR, hit, nil))
 
-		key = Nod(ODOT, hit, keyname)
+		key := Nod(ODOT, hit, keyname)
 		key = Nod(OIND, key, nil)
 		if v1 == nil {
 			body = nil
 		} else if v2 == nil {
 			body = list1(Nod(OAS, v1, key))
 		} else {
-			val = Nod(ODOT, hit, valname)
+			val := Nod(ODOT, hit, valname)
 			val = Nod(OIND, val, nil)
-			a = Nod(OAS2, nil, nil)
+			a := Nod(OAS2, nil, nil)
 			a.List = list(list1(v1), v2)
 			a.Rlist = list(list1(key), val)
 			body = list1(a)
@@ -357,19 +333,19 @@ func walkrange(n *Node) {
 
 		// orderstmt arranged for a copy of the channel variable.
 	case TCHAN:
-		ha = a
+		ha := a
 
 		n.Ntest = nil
 
-		hv1 = temp(t.Type)
+		hv1 := temp(t.Type)
 		hv1.Typecheck = 1
 		if haspointers(t.Type) {
 			init = list(init, Nod(OAS, hv1, nil))
 		}
-		hb = temp(Types[TBOOL])
+		hb := temp(Types[TBOOL])
 
 		n.Ntest = Nod(ONE, hb, Nodbool(false))
-		a = Nod(OAS2RECV, nil, nil)
+		a := Nod(OAS2RECV, nil, nil)
 		a.Typecheck = 1
 		a.List = list(list1(hv1), hb)
 		a.Rlist = list1(Nod(ORECV, ha, nil))
@@ -382,20 +358,21 @@ func walkrange(n *Node) {
 
 		// orderstmt arranged for a copy of the string variable.
 	case TSTRING:
-		ha = a
+		ha := a
 
-		ohv1 = temp(Types[TINT])
+		ohv1 := temp(Types[TINT])
 
-		hv1 = temp(Types[TINT])
+		hv1 := temp(Types[TINT])
 		init = list(init, Nod(OAS, hv1, nil))
 
+		var a *Node
 		if v2 == nil {
 			a = Nod(OAS, hv1, mkcall("stringiter", Types[TINT], nil, ha, hv1))
 		} else {
 			hv2 = temp(runetype)
 			a = Nod(OAS2, nil, nil)
 			a.List = list(list1(hv1), hv2)
-			fn = syslook("stringiter2", 0)
+			fn := syslook("stringiter2", 0)
 			a.Rlist = list1(mkcall1(fn, getoutargx(fn.Type), nil, ha, hv1))
 		}
 
diff --git a/src/cmd/internal/gc/reflect.go b/src/cmd/internal/gc/reflect.go
index ee080404b3f7656714aca8beeb066058ef07f685..6059e35045e2c22ee5e36ee028958893847ec531 100644
--- a/src/cmd/internal/gc/reflect.go
+++ b/src/cmd/internal/gc/reflect.go
@@ -15,9 +15,7 @@ import (
 var signatlist *NodeList
 
 func sigcmp(a *Sig, b *Sig) int {
-	var i int
-
-	i = stringsCompare(a.name, b.name)
+	i := stringsCompare(a.name, b.name)
 	if i != 0 {
 		return i
 	}
@@ -34,16 +32,12 @@ func sigcmp(a *Sig, b *Sig) int {
 }
 
 func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
-	var l1 *Sig
-	var l2 *Sig
-	var le *Sig
-
 	if l == nil || l.link == nil {
 		return l
 	}
 
-	l1 = l
-	l2 = l
+	l1 := l
+	l2 := l
 	for {
 		l2 = l2.link
 		if l2 == nil {
@@ -70,7 +64,7 @@ func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
 		l2 = l2.link
 	}
 
-	le = l
+	le := l
 
 	for {
 		if l1 == nil {
@@ -121,9 +115,7 @@ const (
 )
 
 func makefield(name string, t *Type) *Type {
-	var f *Type
-
-	f = typ(TFIELD)
+	f := typ(TFIELD)
 	f.Type = t
 	f.Sym = new(Sym)
 	f.Sym.Name = name
@@ -131,20 +123,13 @@ func makefield(name string, t *Type) *Type {
 }
 
 func mapbucket(t *Type) *Type {
-	var keytype *Type
-	var valtype *Type
-	var bucket *Type
-	var arr *Type
-	var field [4]*Type
-	var n int32
-
 	if t.Bucket != nil {
 		return t.Bucket
 	}
 
-	bucket = typ(TSTRUCT)
-	keytype = t.Down
-	valtype = t.Type
+	bucket := typ(TSTRUCT)
+	keytype := t.Down
+	valtype := t.Type
 	dowidth(keytype)
 	dowidth(valtype)
 	if keytype.Width > MAXKEYSIZE {
@@ -155,10 +140,11 @@ func mapbucket(t *Type) *Type {
 	}
 
 	// The first field is: uint8 topbits[BUCKETSIZE].
-	arr = typ(TARRAY)
+	arr := typ(TARRAY)
 
 	arr.Type = Types[TUINT8]
 	arr.Bound = BUCKETSIZE
+	var field [4]*Type
 	field[0] = makefield("topbits", arr)
 	arr = typ(TARRAY)
 	arr.Type = keytype
@@ -175,7 +161,7 @@ func mapbucket(t *Type) *Type {
 
 	bucket.Local = t.Local
 	bucket.Type = field[0]
-	for n = 0; n < int32(len(field)-1); n++ {
+	for n := int32(0); n < int32(len(field)-1); n++ {
 		field[n].Down = field[n+1]
 	}
 	field[len(field)-1].Down = nil
@@ -201,16 +187,12 @@ func mapbucket(t *Type) *Type {
 // Builds a type representing a Hmap structure for the given map type.
 // Make sure this stays in sync with ../../runtime/hashmap.go!
 func hmap(t *Type) *Type {
-	var h *Type
-	var bucket *Type
-	var field [8]*Type
-	var n int32
-
 	if t.Hmap != nil {
 		return t.Hmap
 	}
 
-	bucket = mapbucket(t)
+	bucket := mapbucket(t)
+	var field [8]*Type
 	field[0] = makefield("count", Types[TINT])
 	field[1] = makefield("flags", Types[TUINT8])
 	field[2] = makefield("B", Types[TUINT8])
@@ -220,11 +202,11 @@ func hmap(t *Type) *Type {
 	field[6] = makefield("nevacuate", Types[TUINTPTR])
 	field[7] = makefield("overflow", Types[TUNSAFEPTR])
 
-	h = typ(TSTRUCT)
+	h := typ(TSTRUCT)
 	h.Noalg = 1
 	h.Local = t.Local
 	h.Type = field[0]
-	for n = 0; n < int32(len(field)-1); n++ {
+	for n := int32(0); n < int32(len(field)-1); n++ {
 		field[n].Down = field[n+1]
 	}
 	field[len(field)-1].Down = nil
@@ -235,10 +217,6 @@ func hmap(t *Type) *Type {
 }
 
 func hiter(t *Type) *Type {
-	var n int32
-	var field [12]*Type
-	var i *Type
-
 	if t.Hiter != nil {
 		return t.Hiter
 	}
@@ -259,6 +237,7 @@ func hiter(t *Type) *Type {
 	//    checkBucket uintptr
 	// }
 	// must match ../../runtime/hashmap.c:hash_iter.
+	var field [12]*Type
 	field[0] = makefield("key", Ptrto(t.Down))
 
 	field[1] = makefield("val", Ptrto(t.Type))
@@ -274,11 +253,11 @@ func hiter(t *Type) *Type {
 	field[11] = makefield("checkBucket", Types[TUINTPTR])
 
 	// build iterator struct holding the above fields
-	i = typ(TSTRUCT)
+	i := typ(TSTRUCT)
 
 	i.Noalg = 1
 	i.Type = field[0]
-	for n = 0; n < int32(len(field)-1); n++ {
+	for n := int32(0); n < int32(len(field)-1); n++ {
 		field[n].Down = field[n+1]
 	}
 	field[len(field)-1].Down = nil
@@ -296,33 +275,29 @@ func hiter(t *Type) *Type {
  * return function type, receiver as first argument (or not).
  */
 func methodfunc(f *Type, receiver *Type) *Type {
-	var in *NodeList
-	var out *NodeList
-	var d *Node
-	var t *Type
-
-	in = nil
+	in := (*NodeList)(nil)
 	if receiver != nil {
-		d = Nod(ODCLFIELD, nil, nil)
+		d := Nod(ODCLFIELD, nil, nil)
 		d.Type = receiver
 		in = list(in, d)
 	}
 
-	for t = getinargx(f).Type; t != nil; t = t.Down {
+	var d *Node
+	for t := getinargx(f).Type; t != nil; t = t.Down {
 		d = Nod(ODCLFIELD, nil, nil)
 		d.Type = t.Type
 		d.Isddd = t.Isddd
 		in = list(in, d)
 	}
 
-	out = nil
-	for t = getoutargx(f).Type; t != nil; t = t.Down {
+	out := (*NodeList)(nil)
+	for t := getoutargx(f).Type; t != nil; t = t.Down {
 		d = Nod(ODCLFIELD, nil, nil)
 		d.Type = t.Type
 		out = list(out, d)
 	}
 
-	t = functype(nil, in, out)
+	t := functype(nil, in, out)
 	if f.Nname != nil {
 		// Link to name of original method function.
 		t.Nname = f.Nname
@@ -336,16 +311,8 @@ func methodfunc(f *Type, receiver *Type) *Type {
  * generates stub functions as needed.
  */
 func methods(t *Type) *Sig {
-	var f *Type
-	var mt *Type
-	var it *Type
-	var this *Type
-	var a *Sig
-	var b *Sig
-	var method *Sym
-
 	// method type
-	mt = methtype(t, 0)
+	mt := methtype(t, 0)
 
 	if mt == nil {
 		return nil
@@ -353,7 +320,7 @@ func methods(t *Type) *Sig {
 	expandmeth(mt)
 
 	// type stored in interface word
-	it = t
+	it := t
 
 	if !isdirectiface(it) {
 		it = Ptrto(t)
@@ -361,9 +328,12 @@ func methods(t *Type) *Sig {
 
 	// make list of methods for t,
 	// generating code if necessary.
-	a = nil
+	a := (*Sig)(nil)
 
-	for f = mt.Xmethod; f != nil; f = f.Down {
+	var this *Type
+	var b *Sig
+	var method *Sym
+	for f := mt.Xmethod; f != nil; f = f.Down {
 		if f.Etype != TFIELD {
 			Fatal("methods: not field %v", Tconv(f, 0))
 		}
@@ -439,15 +409,12 @@ func methods(t *Type) *Sig {
  */
 func imethods(t *Type) *Sig {
 	var a *Sig
-	var all *Sig
-	var last *Sig
-	var f *Type
 	var method *Sym
 	var isym *Sym
 
-	all = nil
-	last = nil
-	for f = t.Type; f != nil; f = f.Down {
+	all := (*Sig)(nil)
+	last := (*Sig)(nil)
+	for f := t.Type; f != nil; f = f.Down {
 		if f.Etype != TFIELD {
 			Fatal("imethods: not field")
 		}
@@ -501,9 +468,6 @@ func imethods(t *Type) *Sig {
 var dimportpath_gopkg *Pkg
 
 func dimportpath(p *Pkg) {
-	var nam string
-	var n *Node
-
 	if p.Pathsym != nil {
 		return
 	}
@@ -513,9 +477,9 @@ func dimportpath(p *Pkg) {
 		dimportpath_gopkg.Name = "go"
 	}
 
-	nam = fmt.Sprintf("importpath.%s.", p.Prefix)
+	nam := fmt.Sprintf("importpath.%s.", p.Prefix)
 
-	n = Nod(ONAME, nil, nil)
+	n := Nod(ONAME, nil, nil)
 	n.Sym = Pkglookup(nam, dimportpath_gopkg)
 
 	n.Class = PEXTERN
@@ -552,13 +516,7 @@ func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
  * ../../runtime/type.go:/uncommonType
  */
 func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
-	var ot int
-	var n int
-	var s *Sym
-	var a *Sig
-	var m *Sig
-
-	m = methods(t)
+	m := methods(t)
 	if t.Sym == nil && m == nil {
 		return off
 	}
@@ -568,14 +526,14 @@ func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
 
 	dsymptr(sym, ptroff, sym, off)
 
-	n = 0
-	for a = m; a != nil; a = a.link {
+	n := 0
+	for a := m; a != nil; a = a.link {
 		dtypesym(a.type_)
 		n++
 	}
 
-	ot = off
-	s = sym
+	ot := off
+	s := sym
 	if t.Sym != nil {
 		ot = dgostringptr(s, ot, t.Sym.Name)
 		if t != Types[t.Etype] && t != errortype {
@@ -595,7 +553,7 @@ func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
 	ot = duintxx(s, ot, uint64(n), Widthint)
 
 	// methods
-	for a = m; a != nil; a = a.link {
+	for a := m; a != nil; a = a.link {
 		// method
 		// ../../runtime/type.go:/method
 		ot = dgostringptr(s, ot, a.name)
@@ -648,13 +606,11 @@ var kinds = []int{
 }
 
 func haspointers(t *Type) bool {
-	var t1 *Type
-	var ret bool
-
 	if t.Haspointers != 0 {
 		return t.Haspointers-1 != 0
 	}
 
+	var ret bool
 	switch t.Etype {
 	case TINT,
 		TUINT,
@@ -689,7 +645,7 @@ func haspointers(t *Type) bool {
 
 	case TSTRUCT:
 		ret = false
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			if haspointers(t1.Type) {
 				ret = true
 				break
@@ -721,36 +677,22 @@ func haspointers(t *Type) bool {
 var dcommontype_algarray *Sym
 
 func dcommontype(s *Sym, ot int, t *Type) int {
-	var i int
-	var alg int
-	var sizeofAlg int
-	var gcprog bool
-	var sptr *Sym
-	var algsym *Sym
-	var zero *Sym
-	var gcprog0 *Sym
-	var gcprog1 *Sym
-	var sbits *Sym
-	var gcmask [16]uint8
-	var x1 uint64
-	var x2 uint64
-	var p string
-
 	if ot != 0 {
 		Fatal("dcommontype %d", ot)
 	}
 
-	sizeofAlg = 2 * Widthptr
+	sizeofAlg := 2 * Widthptr
 	if dcommontype_algarray == nil {
 		dcommontype_algarray = Pkglookup("algarray", Runtimepkg)
 	}
 	dowidth(t)
-	alg = algtype(t)
-	algsym = nil
+	alg := algtype(t)
+	algsym := (*Sym)(nil)
 	if alg < 0 || alg == AMEM {
 		algsym = dalgsym(t)
 	}
 
+	var sptr *Sym
 	if t.Sym != nil && Isptr[t.Etype] == 0 {
 		sptr = dtypesym(Ptrto(t))
 	} else {
@@ -762,7 +704,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 	// might be returned by a runtime call (map access return value,
 	// 2-arg type cast) declares the size of the zerovalue it needs.
 	// The linker magically takes the max of all the sizes.
-	zero = Pkglookup("zerovalue", Runtimepkg)
+	zero := Pkglookup("zerovalue", Runtimepkg)
 
 	// We use size 0 here so we get the pointer to the zero value,
 	// but don't allocate space for the zero value unless we need it.
@@ -791,7 +733,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 	ot = duint8(s, ot, 0) // unused
 
 	// runtime (and common sense) expects alignment to be a power of two.
-	i = int(t.Align)
+	i := int(t.Align)
 
 	if i == 0 {
 		i = 1
@@ -802,7 +744,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 	ot = duint8(s, ot, t.Align) // align
 	ot = duint8(s, ot, t.Align) // fieldAlign
 
-	gcprog = usegcprog(t)
+	gcprog := usegcprog(t)
 
 	i = kinds[t.Etype]
 	if t.Etype == TARRAY && t.Bound < 0 {
@@ -826,6 +768,8 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 
 	// gc
 	if gcprog {
+		var gcprog1 *Sym
+		var gcprog0 *Sym
 		gengcprog(t, &gcprog0, &gcprog1)
 		if gcprog0 != nil {
 			ot = dsymptr(s, ot, gcprog0, 0)
@@ -834,25 +778,27 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 		}
 		ot = dsymptr(s, ot, gcprog1, 0)
 	} else {
+		var gcmask [16]uint8
 		gengcmask(t, gcmask[:])
-		x1 = 0
-		for i = 0; i < 8; i++ {
+		x1 := uint64(0)
+		for i := 0; i < 8; i++ {
 			x1 = x1<<8 | uint64(gcmask[i])
 		}
+		var p string
 		if Widthptr == 4 {
 			p = fmt.Sprintf("gcbits.0x%016x", x1)
 		} else {
-			x2 = 0
-			for i = 0; i < 8; i++ {
+			x2 := uint64(0)
+			for i := 0; i < 8; i++ {
 				x2 = x2<<8 | uint64(gcmask[i+8])
 			}
 			p = fmt.Sprintf("gcbits.0x%016x%016x", x1, x2)
 		}
 
-		sbits = Pkglookup(p, Runtimepkg)
+		sbits := Pkglookup(p, Runtimepkg)
 		if sbits.Flags&SymUniq == 0 {
 			sbits.Flags |= SymUniq
-			for i = 0; i < 2*Widthptr; i++ {
+			for i := 0; i < 2*Widthptr; i++ {
 				duint8(sbits, i, gcmask[i])
 			}
 			ggloblsym(sbits, 2*int32(Widthptr), obj.DUPOK|obj.RODATA)
@@ -862,7 +808,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 		ot = duintptr(s, ot, 0)
 	}
 
-	p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+	p := fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
 
 	//print("dcommontype: %s\n", p);
 	ot = dgostringptr(s, ot, p) // string
@@ -879,11 +825,8 @@ func dcommontype(s *Sym, ot int, t *Type) int {
 }
 
 func typesym(t *Type) *Sym {
-	var p string
-	var s *Sym
-
-	p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
-	s = Pkglookup(p, typepkg)
+	p := fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
+	s := Pkglookup(p, typepkg)
 
 	//print("typesym: %s -> %+S\n", p, s);
 
@@ -891,19 +834,13 @@ func typesym(t *Type) *Sym {
 }
 
 func tracksym(t *Type) *Sym {
-	var p string
-	var s *Sym
-
-	p = fmt.Sprintf("%v.%s", Tconv(t.Outer, obj.FmtLeft), t.Sym.Name)
-	s = Pkglookup(p, trackpkg)
+	p := fmt.Sprintf("%v.%s", Tconv(t.Outer, obj.FmtLeft), t.Sym.Name)
+	s := Pkglookup(p, trackpkg)
 
 	return s
 }
 
 func typelinksym(t *Type) *Sym {
-	var p string
-	var s *Sym
-
 	// %-uT is what the generated Type's string field says.
 	// It uses (ambiguous) package names instead of import paths.
 	// %-T is the complete, unambiguous type name.
@@ -912,9 +849,9 @@ func typelinksym(t *Type) *Sym {
 	// disambiguate. The names are a little long but they are
 	// discarded by the linker and do not end up in the symbol
 	// table of the final binary.
-	p = fmt.Sprintf("%v/%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned), Tconv(t, obj.FmtLeft))
+	p := fmt.Sprintf("%v/%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned), Tconv(t, obj.FmtLeft))
 
-	s = Pkglookup(p, typelinkpkg)
+	s := Pkglookup(p, typelinkpkg)
 
 	//print("typelinksym: %s -> %+S\n", p, s);
 
@@ -922,11 +859,8 @@ func typelinksym(t *Type) *Sym {
 }
 
 func typesymprefix(prefix string, t *Type) *Sym {
-	var p string
-	var s *Sym
-
-	p = fmt.Sprintf("%s.%v", prefix, Tconv(t, obj.FmtLeft))
-	s = Pkglookup(p, typepkg)
+	p := fmt.Sprintf("%s.%v", prefix, Tconv(t, obj.FmtLeft))
+	s := Pkglookup(p, typepkg)
 
 	//print("algsym: %s -> %+S\n", p, s);
 
@@ -934,15 +868,12 @@ func typesymprefix(prefix string, t *Type) *Sym {
 }
 
 func typenamesym(t *Type) *Sym {
-	var s *Sym
-	var n *Node
-
 	if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) {
 		Fatal("typename %v", Tconv(t, 0))
 	}
-	s = typesym(t)
+	s := typesym(t)
 	if s.Def == nil {
-		n = Nod(ONAME, nil, nil)
+		n := Nod(ONAME, nil, nil)
 		n.Sym = s
 		n.Type = Types[TUINT8]
 		n.Addable = 1
@@ -959,11 +890,8 @@ func typenamesym(t *Type) *Sym {
 }
 
 func typename(t *Type) *Node {
-	var s *Sym
-	var n *Node
-
-	s = typenamesym(t)
-	n = Nod(OADDR, s.Def, nil)
+	s := typenamesym(t)
+	n := Nod(OADDR, s.Def, nil)
 	n.Type = Ptrto(s.Def.Type)
 	n.Addable = 1
 	n.Ullman = 2
@@ -972,11 +900,8 @@ func typename(t *Type) *Node {
 }
 
 func weaktypesym(t *Type) *Sym {
-	var p string
-	var s *Sym
-
-	p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
-	s = Pkglookup(p, weaktypepkg)
+	p := fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
+	s := Pkglookup(p, weaktypepkg)
 
 	//print("weaktypesym: %s -> %+S\n", p, s);
 
@@ -988,7 +913,6 @@ func weaktypesym(t *Type) *Sym {
  * That is, if x==x for all x of type t.
  */
 func isreflexive(t *Type) bool {
-	var t1 *Type
 	switch t.Etype {
 	case TBOOL,
 		TINT,
@@ -1023,7 +947,7 @@ func isreflexive(t *Type) bool {
 		return isreflexive(t.Type)
 
 	case TSTRUCT:
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			if !isreflexive(t1.Type) {
 				return false
 			}
@@ -1038,22 +962,8 @@ func isreflexive(t *Type) bool {
 }
 
 func dtypesym(t *Type) *Sym {
-	var ot int
-	var xt int
 	var n int
-	var isddd int
-	var dupok int
-	var s *Sym
-	var s1 *Sym
-	var s2 *Sym
-	var s3 *Sym
-	var s4 *Sym
-	var slink *Sym
-	var a *Sig
-	var m *Sig
 	var t1 *Type
-	var tbase *Type
-	var t2 *Type
 
 	// Replace byte, rune aliases with real type.
 	// They've been separate internally to make error messages
@@ -1066,7 +976,7 @@ func dtypesym(t *Type) *Sym {
 		Fatal("dtypesym %v", Tconv(t, 0))
 	}
 
-	s = typesym(t)
+	s := typesym(t)
 	if s.Flags&SymSiggen != 0 {
 		return s
 	}
@@ -1075,12 +985,12 @@ func dtypesym(t *Type) *Sym {
 	// special case (look for runtime below):
 	// when compiling package runtime,
 	// emit the type structures for int, float, etc.
-	tbase = t
+	tbase := t
 
 	if Isptr[t.Etype] != 0 && t.Sym == nil && t.Type.Sym != nil {
 		tbase = t.Type
 	}
-	dupok = 0
+	dupok := 0
 	if tbase.Sym == nil {
 		dupok = obj.DUPOK
 	}
@@ -1098,8 +1008,8 @@ func dtypesym(t *Type) *Sym {
 	}
 
 ok:
-	ot = 0
-	xt = 0
+	ot := 0
+	xt := 0
 	switch t.Etype {
 	default:
 		ot = dcommontype(s, ot, t)
@@ -1108,12 +1018,12 @@ ok:
 	case TARRAY:
 		if t.Bound >= 0 {
 			// ../../runtime/type.go:/ArrayType
-			s1 = dtypesym(t.Type)
+			s1 := dtypesym(t.Type)
 
-			t2 = typ(TARRAY)
+			t2 := typ(TARRAY)
 			t2.Type = t.Type
 			t2.Bound = -1 // slice
-			s2 = dtypesym(t2)
+			s2 := dtypesym(t2)
 			ot = dcommontype(s, ot, t)
 			xt = ot - 3*Widthptr
 			ot = dsymptr(s, ot, s1, 0)
@@ -1121,7 +1031,7 @@ ok:
 			ot = duintptr(s, ot, uint64(t.Bound))
 		} else {
 			// ../../runtime/type.go:/SliceType
-			s1 = dtypesym(t.Type)
+			s1 := dtypesym(t.Type)
 
 			ot = dcommontype(s, ot, t)
 			xt = ot - 3*Widthptr
@@ -1130,7 +1040,7 @@ ok:
 
 		// ../../runtime/type.go:/ChanType
 	case TCHAN:
-		s1 = dtypesym(t.Type)
+		s1 := dtypesym(t.Type)
 
 		ot = dcommontype(s, ot, t)
 		xt = ot - 3*Widthptr
@@ -1141,7 +1051,7 @@ ok:
 		for t1 = getthisx(t).Type; t1 != nil; t1 = t1.Down {
 			dtypesym(t1.Type)
 		}
-		isddd = 0
+		isddd := 0
 		for t1 = getinargx(t).Type; t1 != nil; t1 = t1.Down {
 			isddd = int(t1.Isddd)
 			dtypesym(t1.Type)
@@ -1178,9 +1088,9 @@ ok:
 		}
 
 	case TINTER:
-		m = imethods(t)
+		m := imethods(t)
 		n = 0
-		for a = m; a != nil; a = a.link {
+		for a := m; a != nil; a = a.link {
 			dtypesym(a.type_)
 			n++
 		}
@@ -1192,7 +1102,7 @@ ok:
 		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
 		ot = duintxx(s, ot, uint64(n), Widthint)
 		ot = duintxx(s, ot, uint64(n), Widthint)
-		for a = m; a != nil; a = a.link {
+		for a := m; a != nil; a = a.link {
 			// ../../runtime/type.go:/imethod
 			ot = dgostringptr(s, ot, a.name)
 
@@ -1202,11 +1112,11 @@ ok:
 
 		// ../../runtime/type.go:/MapType
 	case TMAP:
-		s1 = dtypesym(t.Down)
+		s1 := dtypesym(t.Down)
 
-		s2 = dtypesym(t.Type)
-		s3 = dtypesym(mapbucket(t))
-		s4 = dtypesym(hmap(t))
+		s2 := dtypesym(t.Type)
+		s3 := dtypesym(mapbucket(t))
+		s4 := dtypesym(hmap(t))
 		ot = dcommontype(s, ot, t)
 		xt = ot - 3*Widthptr
 		ot = dsymptr(s, ot, s1, 0)
@@ -1242,7 +1152,7 @@ ok:
 		}
 
 		// ../../runtime/type.go:/PtrType
-		s1 = dtypesym(t.Type)
+		s1 := dtypesym(t.Type)
 
 		ot = dcommontype(s, ot, t)
 		xt = ot - 3*Widthptr
@@ -1300,7 +1210,7 @@ ok:
 		case TARRAY,
 			TCHAN,
 			TMAP:
-			slink = typelinksym(t)
+			slink := typelinksym(t)
 			dsymptr(slink, 0, s, 0)
 			ggloblsym(slink, int32(Widthptr), int8(dupok|obj.RODATA))
 		}
@@ -1310,14 +1220,10 @@ ok:
 }
 
 func dumptypestructs() {
-	var i int
-	var l *NodeList
 	var n *Node
-	var t *Type
-	var p *Pkg
 
 	// copy types from externdcl list to signatlist
-	for l = externdcl; l != nil; l = l.Next {
+	for l := externdcl; l != nil; l = l.Next {
 		n = l.N
 		if n.Op != OTYPE {
 			continue
@@ -1326,7 +1232,8 @@ func dumptypestructs() {
 	}
 
 	// process signatlist
-	for l = signatlist; l != nil; l = l.Next {
+	var t *Type
+	for l := signatlist; l != nil; l = l.Next {
 		n = l.N
 		if n.Op != OTYPE {
 			continue
@@ -1339,7 +1246,8 @@ func dumptypestructs() {
 	}
 
 	// generate import strings for imported packages
-	for i = 0; i < len(phash); i++ {
+	var p *Pkg
+	for i := 0; i < len(phash); i++ {
 		for p = phash[i]; p != nil; p = p.Link {
 			if p.Direct != 0 {
 				dimportpath(p)
@@ -1354,7 +1262,7 @@ func dumptypestructs() {
 	// another possible choice would be package main,
 	// but using runtime means fewer copies in .6 files.
 	if compiling_runtime != 0 {
-		for i = 1; i <= TBOOL; i++ {
+		for i := 1; i <= TBOOL; i++ {
 			dtypesym(Ptrto(Types[i]))
 		}
 		dtypesym(Ptrto(Types[TSTRING]))
@@ -1377,20 +1285,16 @@ func dumptypestructs() {
 }
 
 func dalgsym(t *Type) *Sym {
-	var ot int
 	var s *Sym
-	var hash *Sym
 	var hashfunc *Sym
-	var eq *Sym
 	var eqfunc *Sym
-	var p string
 
 	// dalgsym is only called for a type that needs an algorithm table,
 	// which implies that the type is comparable (or else it would use ANOEQ).
 
 	if algtype(t) == AMEM {
 		// we use one algorithm table for all AMEM types of a given size
-		p = fmt.Sprintf(".alg%d", t.Width)
+		p := fmt.Sprintf(".alg%d", t.Width)
 
 		s = Pkglookup(p, typepkg)
 
@@ -1404,7 +1308,7 @@ func dalgsym(t *Type) *Sym {
 
 		hashfunc = Pkglookup(p, typepkg)
 
-		ot = 0
+		ot := 0
 		ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0)
 		ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure
 		ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
@@ -1422,8 +1326,8 @@ func dalgsym(t *Type) *Sym {
 		// generate an alg table specific to this type
 		s = typesymprefix(".alg", t)
 
-		hash = typesymprefix(".hash", t)
-		eq = typesymprefix(".eq", t)
+		hash := typesymprefix(".hash", t)
+		eq := typesymprefix(".eq", t)
 		hashfunc = typesymprefix(".hashfunc", t)
 		eqfunc = typesymprefix(".eqfunc", t)
 
@@ -1439,7 +1343,7 @@ func dalgsym(t *Type) *Sym {
 	}
 
 	// ../../runtime/alg.go:/typeAlg
-	ot = 0
+	ot := 0
 
 	ot = dsymptr(s, ot, hashfunc, 0)
 	ot = dsymptr(s, ot, eqfunc, 0)
@@ -1448,9 +1352,6 @@ func dalgsym(t *Type) *Sym {
 }
 
 func usegcprog(t *Type) bool {
-	var size int64
-	var nptr int64
-
 	if !haspointers(t) {
 		return false
 	}
@@ -1459,9 +1360,9 @@ func usegcprog(t *Type) bool {
 	}
 
 	// Calculate size of the unrolled GC mask.
-	nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+	nptr := (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
 
-	size = nptr
+	size := nptr
 	if size%2 != 0 {
 		size *= 2 // repeated
 	}
@@ -1478,16 +1379,7 @@ func usegcprog(t *Type) bool {
 
 // Generates sparse GC bitmask (4 bits per word).
 func gengcmask(t *Type, gcmask []byte) {
-	var vec *Bvec
-	var xoffset int64
-	var nptr int64
-	var i int64
-	var j int64
-	var half bool
-	var bits uint8
-	var pos []byte
-
-	for i = 0; i < 16; i++ {
+	for i := int64(0); i < 16; i++ {
 		gcmask[i] = 0
 	}
 	if !haspointers(t) {
@@ -1495,21 +1387,23 @@ func gengcmask(t *Type, gcmask []byte) {
 	}
 
 	// Generate compact mask as stacks use.
-	xoffset = 0
+	xoffset := int64(0)
 
-	vec = bvalloc(2 * int32(Widthptr) * 8)
+	vec := bvalloc(2 * int32(Widthptr) * 8)
 	twobitwalktype1(t, &xoffset, vec)
 
 	// Unfold the mask for the GC bitmap format:
 	// 4 bits per word, 2 high bits encode pointer info.
-	pos = gcmask
+	pos := gcmask
 
-	nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
-	half = false
+	nptr := (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+	half := false
 
 	// If number of words is odd, repeat the mask.
 	// This makes simpler handling of arrays in runtime.
-	for j = 0; j <= (nptr % 2); j++ {
+	var i int64
+	var bits uint8
+	for j := int64(0); j <= (nptr % 2); j++ {
 		for i = 0; i < nptr; i++ {
 			bits = uint8(bvget(vec, int32(i*obj.BitsPerPointer)) | bvget(vec, int32(i*obj.BitsPerPointer+1))<<1)
 
@@ -1553,16 +1447,13 @@ func proggenemit(g *ProgGen, v uint8) {
 
 // Emits insData block from g->data.
 func proggendataflush(g *ProgGen) {
-	var i int32
-	var s int32
-
 	if g.datasize == 0 {
 		return
 	}
 	proggenemit(g, obj.InsData)
 	proggenemit(g, uint8(g.datasize))
-	s = (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
-	for i = 0; i < s; i++ {
+	s := (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
+	for i := int32(0); i < s; i++ {
 		proggenemit(g, g.data[i])
 	}
 	g.datasize = 0
@@ -1579,9 +1470,7 @@ func proggendata(g *ProgGen, d uint8) {
 
 // Skip v bytes due to alignment, etc.
 func proggenskip(g *ProgGen, off int64, v int64) {
-	var i int64
-
-	for i = off; i < off+v; i++ {
+	for i := off; i < off+v; i++ {
 		if (i % int64(Widthptr)) == 0 {
 			proggendata(g, obj.BitsScalar)
 		}
@@ -1612,16 +1501,8 @@ func proggenfini(g *ProgGen) int64 {
 
 // Generates GC program for large types.
 func gengcprog(t *Type, pgc0 **Sym, pgc1 **Sym) {
-	var gc0 *Sym
-	var gc1 *Sym
-	var nptr int64
-	var size int64
-	var ot int64
-	var xoffset int64
-	var g ProgGen
-
-	nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
-	size = nptr
+	nptr := (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+	size := nptr
 	if size%2 != 0 {
 		size *= 2 // repeated twice
 	}
@@ -1633,30 +1514,25 @@ func gengcprog(t *Type, pgc0 **Sym, pgc1 **Sym) {
 
 	// Don't generate it if it's too large, runtime will unroll directly into GC bitmap.
 	if size <= obj.MaxGCMask {
-		gc0 = typesymprefix(".gc", t)
+		gc0 := typesymprefix(".gc", t)
 		ggloblsym(gc0, int32(size), obj.DUPOK|obj.NOPTR)
 		*pgc0 = gc0
 	}
 
 	// program in RODATA
-	gc1 = typesymprefix(".gcprog", t)
+	gc1 := typesymprefix(".gcprog", t)
 
+	var g ProgGen
 	proggeninit(&g, gc1)
-	xoffset = 0
+	xoffset := int64(0)
 	gengcprog1(&g, t, &xoffset)
-	ot = proggenfini(&g)
+	ot := proggenfini(&g)
 	ggloblsym(gc1, int32(ot), obj.DUPOK|obj.RODATA)
 	*pgc1 = gc1
 }
 
 // Recursively walks type t and writes GC program into g.
 func gengcprog1(g *ProgGen, t *Type, xoffset *int64) {
-	var fieldoffset int64
-	var i int64
-	var o int64
-	var n int64
-	var t1 *Type
-
 	switch t.Etype {
 	case TINT8,
 		TUINT8,
@@ -1704,16 +1580,16 @@ func gengcprog1(g *ProgGen, t *Type, xoffset *int64) {
 			proggendata(g, obj.BitsScalar)
 			proggendata(g, obj.BitsScalar)
 		} else {
-			t1 = t.Type
+			t1 := t.Type
 			if t1.Width == 0 {
 			}
 			// ignore
 			if t.Bound <= 1 || t.Bound*t1.Width < int64(32*Widthptr) {
-				for i = 0; i < t.Bound; i++ {
+				for i := int64(0); i < t.Bound; i++ {
 					gengcprog1(g, t1, xoffset)
 				}
 			} else if !haspointers(t1) {
-				n = t.Width
+				n := t.Width
 				n -= -*xoffset & (int64(Widthptr) - 1) // skip to next ptr boundary
 				proggenarray(g, (n+int64(Widthptr)-1)/int64(Widthptr))
 				proggendata(g, obj.BitsScalar)
@@ -1728,8 +1604,9 @@ func gengcprog1(g *ProgGen, t *Type, xoffset *int64) {
 		}
 
 	case TSTRUCT:
-		o = 0
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		o := int64(0)
+		var fieldoffset int64
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			fieldoffset = t1.Width
 			proggenskip(g, *xoffset, fieldoffset-o)
 			*xoffset += fieldoffset - o
diff --git a/src/cmd/internal/gc/reg.go b/src/cmd/internal/gc/reg.go
index 37a394c3a9e178b601d565bfe688d59b5e134c58..42a65daf54c7c7e3ce5efe5a325df981dbe04bd3 100644
--- a/src/cmd/internal/gc/reg.go
+++ b/src/cmd/internal/gc/reg.go
@@ -51,11 +51,8 @@ func (x rcmp) Swap(i, j int) {
 }
 
 func (x rcmp) Less(i, j int) bool {
-	var p1 *Rgn
-	var p2 *Rgn
-
-	p1 = &x[i]
-	p2 = &x[j]
+	p1 := &x[i]
+	p2 := &x[j]
 	if p1.cost != p2.cost {
 		return int(p2.cost)-int(p1.cost) < 0
 	}
@@ -96,7 +93,6 @@ var regnodes [64]*Node
 
 func walkvardef(n *Node, f *Flow, active int) {
 	var f1 *Flow
-	var f2 *Flow
 	var bn int
 	var v *Var
 
@@ -118,7 +114,7 @@ func walkvardef(n *Node, f *Flow, active int) {
 		}
 	}
 
-	for f2 = f; f2 != f1; f2 = f2.S1 {
+	for f2 := f; f2 != f1; f2 = f2.S1 {
 		if f2.S2 != nil {
 			walkvardef(n, f2.S2, active)
 		}
@@ -130,23 +126,18 @@ func walkvardef(n *Node, f *Flow, active int) {
  * just after r
  */
 func addmove(r *Flow, bn int, rn int, f int) {
-	var p *obj.Prog
-	var p1 *obj.Prog
-	var a *obj.Addr
-	var v *Var
-
-	p1 = Ctxt.NewProg()
+	p1 := Ctxt.NewProg()
 	Clearp(p1)
 	p1.Pc = 9999
 
-	p = r.Prog
+	p := r.Prog
 	p1.Link = p.Link
 	p.Link = p1
 	p1.Lineno = p.Lineno
 
-	v = &var_[bn:][0]
+	v := &var_[bn:][0]
 
-	a = &p1.To
+	a := &p1.To
 	a.Offset = v.offset
 	a.Etype = uint8(v.etype)
 	a.Type = obj.TYPE_MEM
@@ -183,11 +174,8 @@ func addmove(r *Flow, bn int, rn int, f int) {
 }
 
 func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
-	var t1 int64
-	var t2 int64
-
-	t1 = o1 + int64(w1)
-	t2 = o2 + int64(w2)
+	t1 := o1 + int64(w1)
+	t2 := o2 + int64(w2)
 
 	if t1 <= o2 || t2 <= o1 {
 		return false
@@ -201,10 +189,8 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
 	var i int
 	var n int
 	var et int
-	var z int
 	var flag int
 	var w int64
-	var regu uint64
 	var o int64
 	var bit Bits
 	var node *Node
@@ -222,16 +208,17 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
 
 	switch a.Type {
 	default:
-		regu = Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
+		regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
 		if regu == 0 {
 			goto none
 		}
-		bit = zbits
+		bit := zbits
 		bit.b[0] = regu
 		return bit
 
 		// TODO(rsc): Remove special case here.
 	case obj.TYPE_ADDR:
+		var bit Bits
 		if Thearch.Thechar == '9' || Thearch.Thechar == '5' {
 			goto memcase
 		}
@@ -285,7 +272,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
 	}
 
 	flag = 0
-	for i = 0; i < nvar; i++ {
+	for i := 0; i < nvar; i++ {
 		v = &var_[i:][0]
 		if v.node == node && int(v.name) == n {
 			if v.offset == o {
@@ -324,7 +311,8 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
 		// having its address taken, so that we keep the whole thing
 		// live at all calls. otherwise we might optimize away part of
 		// a variable but not all of it.
-		for i = 0; i < nvar; i++ {
+		var v *Var
+		for i := 0; i < nvar; i++ {
 			v = &var_[i:][0]
 			if v.node == node {
 				v.addr = 1
@@ -355,23 +343,23 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
 
 	bit = blsh(uint(i))
 	if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
-		for z = 0; z < BITS; z++ {
+		for z := 0; z < BITS; z++ {
 			externs.b[z] |= bit.b[z]
 		}
 	}
 	if n == obj.NAME_PARAM {
-		for z = 0; z < BITS; z++ {
+		for z := 0; z < BITS; z++ {
 			params.b[z] |= bit.b[z]
 		}
 	}
 
 	if node.Class == PPARAM {
-		for z = 0; z < BITS; z++ {
+		for z := 0; z < BITS; z++ {
 			ivar.b[z] |= bit.b[z]
 		}
 	}
 	if node.Class == PPARAMOUT {
-		for z = 0; z < BITS; z++ {
+		for z := 0; z < BITS; z++ {
 			ovar.b[z] |= bit.b[z]
 		}
 	}
@@ -420,8 +408,6 @@ none:
 
 func prop(f *Flow, ref Bits, cal Bits) {
 	var f1 *Flow
-	var f2 *Flow
-	var r *Reg
 	var r1 *Reg
 	var z int
 	var i int
@@ -533,6 +519,8 @@ func prop(f *Flow, ref Bits, cal Bits) {
 		f1.Active = 1
 	}
 
+	var r *Reg
+	var f2 *Flow
 	for ; f != f1; f = f.P1 {
 		r = f.Data.(*Reg)
 		for f2 = f.P2; f2 != nil; f2 = f2.P2link {
@@ -542,11 +530,10 @@ func prop(f *Flow, ref Bits, cal Bits) {
 }
 
 func synch(f *Flow, dif Bits) {
-	var f1 *Flow
 	var r1 *Reg
 	var z int
 
-	for f1 = f; f1 != nil; f1 = f1.S1 {
+	for f1 := f; f1 != nil; f1 = f1.S1 {
 		r1 = f1.Data.(*Reg)
 		for z = 0; z < BITS; z++ {
 			dif.b[z] = dif.b[z]&^(^r1.refbehind.b[z]&r1.refahead.b[z]) | r1.set.b[z] | r1.regdiff.b[z]
@@ -570,10 +557,7 @@ func synch(f *Flow, dif Bits) {
 }
 
 func allreg(b uint64, r *Rgn) uint64 {
-	var v *Var
-	var i int
-
-	v = &var_[r.varno:][0]
+	v := &var_[r.varno:][0]
 	r.regno = 0
 	switch v.etype {
 	default:
@@ -593,7 +577,7 @@ func allreg(b uint64, r *Rgn) uint64 {
 		TBOOL,
 		TPTR32,
 		TPTR64:
-		i = Thearch.BtoR(^b)
+		i := Thearch.BtoR(^b)
 		if i != 0 && r.cost > 0 {
 			r.regno = int16(i)
 			return Thearch.RtoB(i)
@@ -601,7 +585,7 @@ func allreg(b uint64, r *Rgn) uint64 {
 
 	case TFLOAT32,
 		TFLOAT64:
-		i = Thearch.BtoF(^b)
+		i := Thearch.BtoF(^b)
 		if i != 0 && r.cost > 0 {
 			r.regno = int16(i)
 			return Thearch.FtoB(i)
@@ -620,18 +604,14 @@ func STORE(r *Reg, z int) uint64 {
 }
 
 func paint1(f *Flow, bn int) {
-	var f1 *Flow
-	var r *Reg
-	var r1 *Reg
-	var z int
-	var bb uint64
-
-	z = bn / 64
-	bb = 1 << uint(bn%64)
-	r = f.Data.(*Reg)
+	z := bn / 64
+	bb := uint64(1 << uint(bn%64))
+	r := f.Data.(*Reg)
 	if r.act.b[z]&bb != 0 {
 		return
 	}
+	var f1 *Flow
+	var r1 *Reg
 	for {
 		if r.refbehind.b[z]&bb == 0 {
 			break
@@ -703,20 +683,15 @@ func paint1(f *Flow, bn int) {
 }
 
 func paint2(f *Flow, bn int, depth int) uint64 {
-	var f1 *Flow
-	var r *Reg
-	var r1 *Reg
-	var z int
-	var bb uint64
-	var vreg uint64
-
-	z = bn / 64
-	bb = 1 << uint(bn%64)
-	vreg = regbits
-	r = f.Data.(*Reg)
+	z := bn / 64
+	bb := uint64(1 << uint(bn%64))
+	vreg := regbits
+	r := f.Data.(*Reg)
 	if r.act.b[z]&bb == 0 {
 		return vreg
 	}
+	var r1 *Reg
+	var f1 *Flow
 	for {
 		if r.refbehind.b[z]&bb == 0 {
 			break
@@ -779,19 +754,14 @@ func paint2(f *Flow, bn int, depth int) uint64 {
 }
 
 func paint3(f *Flow, bn int, rb uint64, rn int) {
-	var f1 *Flow
-	var r *Reg
-	var r1 *Reg
-	var p *obj.Prog
-	var z int
-	var bb uint64
-
-	z = bn / 64
-	bb = 1 << uint(bn%64)
-	r = f.Data.(*Reg)
+	z := bn / 64
+	bb := uint64(1 << uint(bn%64))
+	r := f.Data.(*Reg)
 	if r.act.b[z]&bb != 0 {
 		return
 	}
+	var r1 *Reg
+	var f1 *Flow
 	for {
 		if r.refbehind.b[z]&bb == 0 {
 			break
@@ -814,6 +784,7 @@ func paint3(f *Flow, bn int, rb uint64, rn int) {
 	if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
 		addmove(f, bn, rn, 0)
 	}
+	var p *obj.Prog
 	for {
 		r.act.b[z] |= bb
 		p = f.Prog
@@ -886,14 +857,11 @@ func addreg(a *obj.Addr, rn int) {
 }
 
 func dumpone(f *Flow, isreg int) {
-	var z int
-	var bit Bits
-	var r *Reg
-
 	fmt.Printf("%d:%v", f.Loop, f.Prog)
 	if isreg != 0 {
-		r = f.Data.(*Reg)
-		for z = 0; z < BITS; z++ {
+		r := f.Data.(*Reg)
+		var bit Bits
+		for z := 0; z < BITS; z++ {
 			bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
 		}
 		if bany(&bit) {
@@ -932,11 +900,10 @@ func dumpone(f *Flow, isreg int) {
 }
 
 func Dumpit(str string, r0 *Flow, isreg int) {
-	var r *Flow
 	var r1 *Flow
 
 	fmt.Printf("\n%s\n", str)
-	for r = r0; r != nil; r = r.Link {
+	for r := r0; r != nil; r = r.Link {
 		dumpone(r, isreg)
 		r1 = r.P2
 		if r1 != nil {
@@ -967,23 +934,6 @@ func Dumpit(str string, r0 *Flow, isreg int) {
 }
 
 func regopt(firstp *obj.Prog) {
-	var f *Flow
-	var f1 *Flow
-	var r *Reg
-	var p *obj.Prog
-	var g *Graph
-	var info ProgInfo
-	var i int
-	var z int
-	var active int
-	var vreg uint64
-	var usedreg uint64
-	var mask uint64
-	var nreg int
-	var regnames []string
-	var bit Bits
-	var rgp *Rgn
-
 	if first != 0 {
 		first = 0
 	}
@@ -995,13 +945,14 @@ func regopt(firstp *obj.Prog) {
 	 * than in generated c code.  define pseudo-variables for
 	 * registers, so we have complete register usage information.
 	 */
-	regnames = Thearch.Regnames(&nreg)
+	var nreg int
+	regnames := Thearch.Regnames(&nreg)
 
 	nvar = nreg
-	for i = 0; i < nreg; i++ {
+	for i := 0; i < nreg; i++ {
 		var_[i] = Var{}
 	}
-	for i = 0; i < nreg; i++ {
+	for i := 0; i < nreg; i++ {
 		if regnodes[i] == nil {
 			regnodes[i] = newname(Lookup(regnames[i]))
 		}
@@ -1022,10 +973,10 @@ func regopt(firstp *obj.Prog) {
 	 * allocate pcs
 	 * find use and set of variables
 	 */
-	g = Flowstart(firstp, func() interface{} { return new(Reg) })
+	g := Flowstart(firstp, func() interface{} { return new(Reg) })
 
 	if g == nil {
-		for i = 0; i < nvar; i++ {
+		for i := 0; i < nvar; i++ {
 			var_[i].node.Opt = nil
 		}
 		return
@@ -1033,7 +984,12 @@ func regopt(firstp *obj.Prog) {
 
 	firstf = g.Start
 
-	for f = firstf; f != nil; f = f.Link {
+	var r *Reg
+	var info ProgInfo
+	var p *obj.Prog
+	var bit Bits
+	var z int
+	for f := firstf; f != nil; f = f.Link {
 		p = f.Prog
 		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
 			continue
@@ -1097,9 +1053,8 @@ func regopt(firstp *obj.Prog) {
 		}
 	}
 
-	for i = 0; i < nvar; i++ {
-		var v *Var
-		v = &var_[i:][0]
+	for i := 0; i < nvar; i++ {
+		v := &var_[i:][0]
 		if v.addr != 0 {
 			bit = blsh(uint(i))
 			for z = 0; z < BITS; z++ {
@@ -1133,15 +1088,15 @@ func regopt(firstp *obj.Prog) {
 	 * (r->act will be reused in pass 5 for something else,
 	 * but we'll be done with it by then.)
 	 */
-	active = 0
+	active := 0
 
-	for f = firstf; f != nil; f = f.Link {
+	for f := firstf; f != nil; f = f.Link {
 		f.Active = 0
 		r = f.Data.(*Reg)
 		r.act = zbits
 	}
 
-	for f = firstf; f != nil; f = f.Link {
+	for f := firstf; f != nil; f = f.Link {
 		p = f.Prog
 		if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
 			active++
@@ -1154,6 +1109,9 @@ func regopt(firstp *obj.Prog) {
 	 * iterate propagating usage
 	 * 	back until flow graph is complete
 	 */
+	var f1 *Flow
+	var i int
+	var f *Flow
 loop1:
 	change = 0
 
@@ -1213,8 +1171,8 @@ loop2:
 	 * pass 4.5
 	 * move register pseudo-variables into regu.
 	 */
-	mask = (1 << uint(nreg)) - 1
-	for f = firstf; f != nil; f = f.Link {
+	mask := uint64((1 << uint(nreg)) - 1)
+	for f := firstf; f != nil; f = f.Link {
 		r = f.Data.(*Reg)
 		r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
 		r.set.b[0] &^= mask
@@ -1240,8 +1198,8 @@ loop2:
 	f = firstf
 
 	if f != nil {
-		r = f.Data.(*Reg)
-		for z = 0; z < BITS; z++ {
+		r := f.Data.(*Reg)
+		for z := 0; z < BITS; z++ {
 			bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
 		}
 		if bany(&bit) && f.Refset == 0 {
@@ -1253,11 +1211,12 @@ loop2:
 		}
 	}
 
-	for f = firstf; f != nil; f = f.Link {
+	for f := firstf; f != nil; f = f.Link {
 		(f.Data.(*Reg)).act = zbits
 	}
 	nregion = 0
-	for f = firstf; f != nil; f = f.Link {
+	var rgp *Rgn
+	for f := firstf; f != nil; f = f.Link {
 		r = f.Data.(*Reg)
 		for z = 0; z < BITS; z++ {
 			bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
@@ -1311,7 +1270,9 @@ brk:
 	if Debug['R'] != 0 && Debug['v'] != 0 {
 		fmt.Printf("\nregisterizing\n")
 	}
-	for i = 0; i < nregion; i++ {
+	var usedreg uint64
+	var vreg uint64
+	for i := 0; i < nregion; i++ {
 		rgp = &region[i]
 		if Debug['R'] != 0 && Debug['v'] != 0 {
 			fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
@@ -1321,9 +1282,7 @@ brk:
 		vreg = allreg(usedreg, rgp)
 		if rgp.regno != 0 {
 			if Debug['R'] != 0 && Debug['v'] != 0 {
-				var v *Var
-
-				v = &var_[rgp.varno:][0]
+				v := &var_[rgp.varno:][0]
 				fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", Nconv(v.node, 0), v.offset, rgp.varno, Econv(int(v.etype), 0), Ctxt.Rconv(int(rgp.regno)), usedreg, vreg)
 			}
 
@@ -1334,7 +1293,7 @@ brk:
 	/*
 	 * free aux structures. peep allocates new ones.
 	 */
-	for i = 0; i < nvar; i++ {
+	for i := 0; i < nvar; i++ {
 		var_[i].node.Opt = nil
 	}
 	Flowend(g)
@@ -1342,7 +1301,7 @@ brk:
 
 	if Debug['R'] != 0 && Debug['v'] != 0 {
 		// Rebuild flow graph, since we inserted instructions
-		g = Flowstart(firstp, nil)
+		g := Flowstart(firstp, nil)
 
 		firstf = g.Start
 		Dumpit("pass6", firstf, 0)
@@ -1361,7 +1320,7 @@ brk:
 	/*
 	 * eliminate nops
 	 */
-	for p = firstp; p != nil; p = p.Link {
+	for p := firstp; p != nil; p = p.Link {
 		for p.Link != nil && p.Link.As == obj.ANOP {
 			p.Link = p.Link.Link
 		}
diff --git a/src/cmd/internal/gc/select.go b/src/cmd/internal/gc/select.go
index ab7a1445be6d2f5aa1db7c271850d8b498f8cacf..ca6a21def9581393e58325c09577f9ee9cbe7665 100644
--- a/src/cmd/internal/gc/select.go
+++ b/src/cmd/internal/gc/select.go
@@ -10,16 +10,12 @@ package gc
 func typecheckselect(sel *Node) {
 	var ncase *Node
 	var n *Node
-	var def *Node
-	var l *NodeList
-	var lno int
-	var count int
-
-	def = nil
-	lno = int(setlineno(sel))
-	count = 0
+
+	def := (*Node)(nil)
+	lno := int(setlineno(sel))
+	count := 0
 	typechecklist(sel.Ninit, Etop)
-	for l = sel.List; l != nil; l = l.Next {
+	for l := sel.List; l != nil; l = l.Next {
 		count++
 		ncase = l.N
 		setlineno(ncase)
@@ -94,27 +90,20 @@ func typecheckselect(sel *Node) {
 }
 
 func walkselect(sel *Node) {
-	var lno int
-	var i int
-	var n *Node
-	var r *Node
-	var a *Node
-	var var_ *Node
-	var selv *Node
-	var cas *Node
-	var dflt *Node
-	var ch *Node
-	var l *NodeList
-	var init *NodeList
-
 	if sel.List == nil && sel.Xoffset != 0 {
 		Fatal("double walkselect") // already rewrote
 	}
 
-	lno = int(setlineno(sel))
-	i = count(sel.List)
+	lno := int(setlineno(sel))
+	i := count(sel.List)
 
 	// optimization: zero-case select
+	var init *NodeList
+	var r *Node
+	var n *Node
+	var var_ *Node
+	var selv *Node
+	var cas *Node
 	if i == 0 {
 		sel.Nbody = list1(mkcall("block", nil, nil))
 		goto out
@@ -124,13 +113,14 @@ func walkselect(sel *Node) {
 	// TODO(rsc): Reenable optimization once order.c can handle it.
 	// golang.org/issue/7672.
 	if i == 1 {
-		cas = sel.List.N
+		cas := sel.List.N
 		setlineno(cas)
-		l = cas.Ninit
+		l := cas.Ninit
 		if cas.Left != nil { // not default:
-			n = cas.Left
+			n := cas.Left
 			l = concat(l, n.Ninit)
 			n.Ninit = nil
+			var ch *Node
 			switch n.Op {
 			default:
 				Fatal("select %v", Oconv(int(n.Op), 0))
@@ -167,7 +157,7 @@ func walkselect(sel *Node) {
 			}
 
 			// if ch == nil { block() }; n;
-			a = Nod(OIF, nil, nil)
+			a := Nod(OIF, nil, nil)
 
 			a.Ntest = Nod(OEQ, ch, nodnil())
 			a.Nbody = list1(mkcall("block", nil, &l))
@@ -183,7 +173,7 @@ func walkselect(sel *Node) {
 
 	// convert case value arguments to addresses.
 	// this rewrite is used by both the general code and the next optimization.
-	for l = sel.List; l != nil; l = l.Next {
+	for l := sel.List; l != nil; l = l.Next {
 		cas = l.N
 		setlineno(cas)
 		n = cas.Left
@@ -216,6 +206,8 @@ func walkselect(sel *Node) {
 
 	// optimization: two-case select but one is default: single non-blocking op.
 	if i == 2 && (sel.List.N.Left == nil || sel.List.Next.N.Left == nil) {
+		var cas *Node
+		var dflt *Node
 		if sel.List.N.Left == nil {
 			cas = sel.List.Next.N
 			dflt = sel.List.N
@@ -224,9 +216,9 @@ func walkselect(sel *Node) {
 			cas = sel.List.N
 		}
 
-		n = cas.Left
+		n := cas.Left
 		setlineno(n)
-		r = Nod(OIF, nil, nil)
+		r := Nod(OIF, nil, nil)
 		r.Ninit = cas.Ninit
 		switch n.Op {
 		default:
@@ -234,7 +226,7 @@ func walkselect(sel *Node) {
 
 			// if selectnbsend(c, v) { body } else { default body }
 		case OSEND:
-			ch = n.Left
+			ch := n.Left
 
 			r.Ntest = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), ch, n.Right)
 
@@ -243,7 +235,7 @@ func walkselect(sel *Node) {
 			r = Nod(OIF, nil, nil)
 
 			r.Ninit = cas.Ninit
-			ch = n.Right.Left
+			ch := n.Right.Left
 			r.Ntest = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
 
 			// if c != nil && selectnbrecv2(&v, c) { body } else { default body }
@@ -251,7 +243,7 @@ func walkselect(sel *Node) {
 			r = Nod(OIF, nil, nil)
 
 			r.Ninit = cas.Ninit
-			ch = n.Right.Left
+			ch := n.Right.Left
 			r.Ntest = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.Ntest, ch)
 		}
 
@@ -278,7 +270,7 @@ func walkselect(sel *Node) {
 	init = list(init, r)
 
 	// register cases
-	for l = sel.List; l != nil; l = l.Next {
+	for l := sel.List; l != nil; l = l.Next {
 		cas = l.N
 		setlineno(cas)
 		n = cas.Left
@@ -334,14 +326,9 @@ out:
 
 // Keep in sync with src/runtime/chan.h.
 func selecttype(size int32) *Type {
-	var sel *Node
-	var sudog *Node
-	var scase *Node
-	var arr *Node
-
 	// TODO(dvyukov): it's possible to generate SudoG and Scase only once
 	// and then cache; and also cache Select per size.
-	sudog = Nod(OTSTRUCT, nil, nil)
+	sudog := Nod(OTSTRUCT, nil, nil)
 
 	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
 	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
@@ -355,7 +342,7 @@ func selecttype(size int32) *Type {
 	sudog.Type.Noalg = 1
 	sudog.Type.Local = 1
 
-	scase = Nod(OTSTRUCT, nil, nil)
+	scase := Nod(OTSTRUCT, nil, nil)
 	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
 	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
 	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
@@ -367,12 +354,12 @@ func selecttype(size int32) *Type {
 	scase.Type.Noalg = 1
 	scase.Type.Local = 1
 
-	sel = Nod(OTSTRUCT, nil, nil)
+	sel := Nod(OTSTRUCT, nil, nil)
 	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
 	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
 	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
 	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
-	arr = Nod(OTARRAY, Nodintconst(int64(size)), scase)
+	arr := Nod(OTARRAY, Nodintconst(int64(size)), scase)
 	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("scase")), arr))
 	arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Ptrto(Types[TUINT8])))
 	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
diff --git a/src/cmd/internal/gc/sinit.go b/src/cmd/internal/gc/sinit.go
index 57422b44b10490424e20e1e05027e696abf8375c..ca8db418619cb22d3078c3dba6bdd42e4161d683 100644
--- a/src/cmd/internal/gc/sinit.go
+++ b/src/cmd/internal/gc/sinit.go
@@ -23,15 +23,12 @@ var initlist *NodeList
 // init1 walks the AST starting at n, and accumulates in out
 // the list of definitions needing init code in dependency order.
 func init1(n *Node, out **NodeList) {
-	var l *NodeList
-	var nv *Node
-
 	if n == nil {
 		return
 	}
 	init1(n.Left, out)
 	init1(n.Right, out)
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		init1(l.N, out)
 	}
 
@@ -72,12 +69,13 @@ func init1(n *Node, out **NodeList) {
 		// Conversely, if there exists an initialization cycle involving
 		// a variable in the program, the tree walk will reach a cycle
 		// involving that variable.
+		var nv *Node
 		if n.Class != PFUNC {
 			nv = n
 			goto foundinitloop
 		}
 
-		for l = initlist; l.N != n; l = l.Next {
+		for l := initlist; l.N != n; l = l.Next {
 			if l.N.Class != PFUNC {
 				nv = l.N
 				goto foundinitloop
@@ -103,13 +101,14 @@ func init1(n *Node, out **NodeList) {
 		fmt.Printf("%v: initialization loop:\n", nv.Line())
 
 		// Build back pointers in initlist.
-		for l = initlist; l != nil; l = l.Next {
+		for l := initlist; l != nil; l = l.Next {
 			if l.Next != nil {
 				l.Next.End = l
 			}
 		}
 
 		// Print nv -> ... -> n1 -> n.
+		var l *NodeList
 		for l = initlist; l.N != nv; l = l.Next {
 		}
 		for ; l != nil; l = l.End {
@@ -129,7 +128,7 @@ func init1(n *Node, out **NodeList) {
 	// reached a new unvisited node.
 	n.Initorder = InitPending
 
-	l = new(NodeList)
+	l := new(NodeList)
 	if l == nil {
 		Flusherrors()
 		Yyerror("out of memory")
@@ -181,7 +180,7 @@ func init1(n *Node, out **NodeList) {
 				break
 			}
 			n.Defn.Initorder = InitDone
-			for l = n.Defn.Rlist; l != nil; l = l.Next {
+			for l := n.Defn.Rlist; l != nil; l = l.Next {
 				init1(l.N, out)
 			}
 			if Debug['%'] != 0 {
@@ -261,11 +260,8 @@ func initreorder(l *NodeList, out **NodeList) {
 // declarations and outputs the corresponding list of statements
 // to include in the init() function body.
 func initfix(l *NodeList) *NodeList {
-	var lout *NodeList
-	var lno int
-
-	lout = nil
-	lno = int(lineno)
+	lout := (*NodeList)(nil)
+	lno := int(lineno)
 	initreorder(l, &lout)
 	lineno = int32(lno)
 	return lout
@@ -276,31 +272,19 @@ func initfix(l *NodeList) *NodeList {
  * into DATA statements if at all possible.
  */
 func staticinit(n *Node, out **NodeList) bool {
-	var l *Node
-	var r *Node
-
 	if n.Op != ONAME || n.Class != PEXTERN || n.Defn == nil || n.Defn.Op != OAS {
 		Fatal("staticinit")
 	}
 
 	lineno = n.Lineno
-	l = n.Defn.Left
-	r = n.Defn.Right
+	l := n.Defn.Left
+	r := n.Defn.Right
 	return staticassign(l, r, out)
 }
 
 // like staticassign but we are copying an already
 // initialized value r.
 func staticcopy(l *Node, r *Node, out **NodeList) bool {
-	var i int
-	var e *InitEntry
-	var p *InitPlan
-	var a *Node
-	var ll *Node
-	var rr *Node
-	var orig *Node
-	var n1 Node
-
 	if r.Op != ONAME || r.Class != PEXTERN || r.Sym.Pkg != localpkg {
 		return false
 	}
@@ -310,7 +294,7 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
 	if r.Defn.Op != OAS {
 		return false
 	}
-	orig = r
+	orig := r
 	r = r.Defn.Right
 
 	switch r.Op {
@@ -353,9 +337,9 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
 	case OARRAYLIT:
 		if Isslice(r.Type) {
 			// copy slice
-			a = r.Nname
+			a := r.Nname
 
-			n1 = *l
+			n1 := *l
 			n1.Xoffset = l.Xoffset + int64(Array_array)
 			gdata(&n1, Nod(OADDR, a, nil), Widthptr)
 			n1.Xoffset = l.Xoffset + int64(Array_nel)
@@ -368,10 +352,13 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
 
 		// fall through
 	case OSTRUCTLIT:
-		p = r.Initplan
+		p := r.Initplan
 
-		n1 = *l
-		for i = 0; i < len(p.E); i++ {
+		n1 := *l
+		var e *InitEntry
+		var ll *Node
+		var rr *Node
+		for i := 0; i < len(p.E); i++ {
 			e = &p.E[i]
 			n1.Xoffset = l.Xoffset + e.Xoffset
 			n1.Type = e.Expr.Type
@@ -402,14 +389,7 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
 }
 
 func staticassign(l *Node, r *Node, out **NodeList) bool {
-	var a *Node
 	var n1 Node
-	var nam Node
-	var ta *Type
-	var p *InitPlan
-	var e *InitEntry
-	var i int
-	var sval *Strlit
 
 	switch r.Op {
 	//dump("not static", r);
@@ -429,8 +409,9 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
 		return true
 
 	case OADDR:
+		var nam Node
 		if stataddr(&nam, r.Left) {
-			n1 = *r
+			n1 := *r
 			n1.Left = &nam
 			gdata(l, &n1, int(l.Type.Width))
 			return true
@@ -447,7 +428,7 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
 		case OARRAYLIT,
 			OMAPLIT,
 			OSTRUCTLIT:
-			a = staticname(r.Left.Type, 1)
+			a := staticname(r.Left.Type, 1)
 
 			r.Nname = a
 			gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
@@ -461,7 +442,7 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
 
 	case OSTRARRAYBYTE:
 		if l.Class == PEXTERN && r.Left.Op == OLITERAL {
-			sval = r.Left.Val.U.Sval
+			sval := r.Left.Val.U.Sval
 			slicebytes(l, sval.S, len(sval.S))
 			return true
 		}
@@ -470,11 +451,11 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
 		initplan(r)
 		if Isslice(r.Type) {
 			// Init slice.
-			ta = typ(TARRAY)
+			ta := typ(TARRAY)
 
 			ta.Type = r.Type.Type
 			ta.Bound = Mpgetfix(r.Right.Val.U.Xval)
-			a = staticname(ta, 1)
+			a := staticname(ta, 1)
 			r.Nname = a
 			n1 = *l
 			n1.Xoffset = l.Xoffset + int64(Array_array)
@@ -493,9 +474,11 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
 	case OSTRUCTLIT:
 		initplan(r)
 
-		p = r.Initplan
+		p := r.Initplan
 		n1 = *l
-		for i = 0; i < len(p.E); i++ {
+		var e *InitEntry
+		var a *Node
+		for i := 0; i < len(p.E); i++ {
 			e = &p.E[i]
 			n1.Xoffset = l.Xoffset + e.Xoffset
 			n1.Type = e.Expr.Type
@@ -529,11 +512,9 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
  * part of the composite literal.
  */
 func staticname(t *Type, ctxt int) *Node {
-	var n *Node
-
 	namebuf = fmt.Sprintf("statictmp_%.4d", statuniqgen)
 	statuniqgen++
-	n = newname(Lookup(namebuf))
+	n := newname(Lookup(namebuf))
 	if ctxt == 0 {
 		n.Readonly = 1
 	}
@@ -570,9 +551,7 @@ no:
 }
 
 func litas(l *Node, r *Node, init **NodeList) {
-	var a *Node
-
-	a = Nod(OAS, l, r)
+	a := Nod(OAS, l, r)
 	typecheck(&a, Etop)
 	walkexpr(&a, init)
 	*init = list(*init, a)
@@ -584,11 +563,7 @@ const (
 )
 
 func getdyn(n *Node, top int) int {
-	var nl *NodeList
-	var value *Node
-	var mode int
-
-	mode = 0
+	mode := 0
 	switch n.Op {
 	default:
 		if isliteral(n) {
@@ -606,7 +581,8 @@ func getdyn(n *Node, top int) int {
 		break
 	}
 
-	for nl = n.List; nl != nil; nl = nl.Next {
+	var value *Node
+	for nl := n.List; nl != nil; nl = nl.Next {
 		value = nl.N.Right
 		mode |= getdyn(value, 0)
 		if mode == MODEDYNAM|MODECONST {
@@ -620,11 +596,10 @@ func getdyn(n *Node, top int) int {
 func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
 	var r *Node
 	var a *Node
-	var nl *NodeList
 	var index *Node
 	var value *Node
 
-	for nl = n.List; nl != nil; nl = nl.Next {
+	for nl := n.List; nl != nil; nl = nl.Next {
 		r = nl.N
 		if r.Op != OKEY {
 			Fatal("structlit: rhs not OKEY: %v", Nconv(r, 0))
@@ -688,11 +663,10 @@ func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
 func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
 	var r *Node
 	var a *Node
-	var l *NodeList
 	var index *Node
 	var value *Node
 
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		r = l.N
 		if r.Op != OKEY {
 			Fatal("arraylit: rhs not OKEY: %v", Nconv(r, 0))
@@ -754,18 +728,8 @@ func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
 }
 
 func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
-	var r *Node
-	var a *Node
-	var l *NodeList
-	var t *Type
-	var vstat *Node
-	var vauto *Node
-	var index *Node
-	var value *Node
-	var mode int
-
 	// make an array type
-	t = shallow(n.Type)
+	t := shallow(n.Type)
 
 	t.Bound = Mpgetfix(n.Right.Val.U.Xval)
 	t.Width = 0
@@ -775,13 +739,13 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 
 	if ctxt != 0 {
 		// put everything into static array
-		vstat = staticname(t, ctxt)
+		vstat := staticname(t, ctxt)
 
 		arraylit(ctxt, 1, n, vstat, init)
 		arraylit(ctxt, 2, n, vstat, init)
 
 		// copy static to slice
-		a = Nod(OSLICE, vstat, Nod(OKEY, nil, nil))
+		a := Nod(OSLICE, vstat, Nod(OKEY, nil, nil))
 
 		a = Nod(OAS, var_, a)
 		typecheck(&a, Etop)
@@ -811,18 +775,19 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 
 	// if the literal contains constants,
 	// make static initialized array (1),(2)
-	vstat = nil
+	vstat := (*Node)(nil)
 
-	mode = getdyn(n, 1)
+	mode := getdyn(n, 1)
 	if mode&MODECONST != 0 {
 		vstat = staticname(t, ctxt)
 		arraylit(ctxt, 1, n, vstat, init)
 	}
 
 	// make new auto *array (3 declare)
-	vauto = temp(Ptrto(t))
+	vauto := temp(Ptrto(t))
 
 	// set auto to point at new temp or heap (3 assign)
+	var a *Node
 	if n.Alloc != nil {
 		// temp allocated during order.c for dddarg
 		n.Alloc.Type = t
@@ -873,7 +838,10 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 	*init = list(*init, a)
 
 	// put dynamics into slice (6)
-	for l = n.List; l != nil; l = l.Next {
+	var value *Node
+	var r *Node
+	var index *Node
+	for l := n.List; l != nil; l = l.Next {
 		r = l.N
 		if r.Op != OKEY {
 			Fatal("slicelit: rhs not OKEY: %v", Nconv(r, 0))
@@ -914,35 +882,22 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 
 func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 	var r *Node
-	var a *Node
-	var l *NodeList
-	var nerr int
-	var b int64
-	var t *Type
-	var tk *Type
-	var tv *Type
-	var t1 *Type
-	var vstat *Node
 	var index *Node
 	var value *Node
-	var key *Node
-	var val *Node
-	var syma *Sym
-	var symb *Sym
 
 	ctxt = 0
 
 	// make the map var
-	nerr = nerrors
+	nerr := nerrors
 
-	a = Nod(OMAKE, nil, nil)
+	a := Nod(OMAKE, nil, nil)
 	a.List = list1(typenod(n.Type))
 	litas(var_, a, init)
 
 	// count the initializers
-	b = 0
+	b := int64(0)
 
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		r = l.N
 
 		if r.Op != OKEY {
@@ -958,18 +913,18 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 
 	if b != 0 {
 		// build type [count]struct { a Tindex, b Tvalue }
-		t = n.Type
+		t := n.Type
 
-		tk = t.Down
-		tv = t.Type
+		tk := t.Down
+		tv := t.Type
 
-		symb = Lookup("b")
+		symb := Lookup("b")
 		t = typ(TFIELD)
 		t.Type = tv
 		t.Sym = symb
 
-		syma = Lookup("a")
-		t1 = t
+		syma := Lookup("a")
+		t1 := t
 		t = typ(TFIELD)
 		t.Type = tk
 		t.Sym = syma
@@ -987,10 +942,13 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 		dowidth(t)
 
 		// make and initialize static array
-		vstat = staticname(t, ctxt)
+		vstat := staticname(t, ctxt)
 
-		b = 0
-		for l = n.List; l != nil; l = l.Next {
+		b := int64(0)
+		var index *Node
+		var r *Node
+		var value *Node
+		for l := n.List; l != nil; l = l.Next {
 			r = l.N
 
 			if r.Op != OKEY {
@@ -1056,10 +1014,10 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 	}
 
 	// put in dynamic entries one-at-a-time
-	key = nil
+	key := (*Node)(nil)
 
-	val = nil
-	for l = n.List; l != nil; l = l.Next {
+	val := (*Node)(nil)
+	for l := n.List; l != nil; l = l.Next {
 		r = l.N
 
 		if r.Op != OKEY {
@@ -1109,12 +1067,7 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 }
 
 func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
-	var t *Type
-	var a *Node
-	var vstat *Node
-	var r *Node
-
-	t = n.Type
+	t := n.Type
 	switch n.Op {
 	default:
 		Fatal("anylit: not lit")
@@ -1124,6 +1077,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 			Fatal("anylit: not ptr")
 		}
 
+		var r *Node
 		if n.Right != nil {
 			r = Nod(OADDR, n.Right, nil)
 			typecheck(&r, Erv)
@@ -1135,7 +1089,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 		}
 
 		walkexpr(&r, init)
-		a = Nod(OAS, var_, r)
+		a := Nod(OAS, var_, r)
 
 		typecheck(&a, Etop)
 		*init = list(*init, a)
@@ -1152,12 +1106,12 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 		if simplename(var_) && count(n.List) > 4 {
 			if ctxt == 0 {
 				// lay out static data
-				vstat = staticname(t, ctxt)
+				vstat := staticname(t, ctxt)
 
 				structlit(ctxt, 1, n, vstat, init)
 
 				// copy static to var
-				a = Nod(OAS, var_, vstat)
+				a := Nod(OAS, var_, vstat)
 
 				typecheck(&a, Etop)
 				walkexpr(&a, init)
@@ -1176,7 +1130,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 
 		// initialize of not completely specified
 		if simplename(var_) || count(n.List) < structcount(t) {
-			a = Nod(OAS, var_, nil)
+			a := Nod(OAS, var_, nil)
 			typecheck(&a, Etop)
 			walkexpr(&a, init)
 			*init = list(*init, a)
@@ -1196,12 +1150,12 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 		if simplename(var_) && count(n.List) > 4 {
 			if ctxt == 0 {
 				// lay out static data
-				vstat = staticname(t, ctxt)
+				vstat := staticname(t, ctxt)
 
 				arraylit(1, 1, n, vstat, init)
 
 				// copy static to automatic
-				a = Nod(OAS, var_, vstat)
+				a := Nod(OAS, var_, vstat)
 
 				typecheck(&a, Etop)
 				walkexpr(&a, init)
@@ -1220,7 +1174,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
 
 		// initialize of not completely specified
 		if simplename(var_) || int64(count(n.List)) < t.Bound {
-			a = Nod(OAS, var_, nil)
+			a := Nod(OAS, var_, nil)
 			typecheck(&a, Etop)
 			walkexpr(&a, init)
 			*init = list(*init, a)
@@ -1289,8 +1243,6 @@ func getlit(lit *Node) int {
 }
 
 func stataddr(nam *Node, n *Node) bool {
-	var l int
-
 	if n == nil {
 		goto no
 	}
@@ -1315,7 +1267,7 @@ func stataddr(nam *Node, n *Node) bool {
 		if !stataddr(nam, n.Left) {
 			break
 		}
-		l = getlit(n.Right)
+		l := getlit(n.Right)
 		if l < 0 {
 			break
 		}
@@ -1334,21 +1286,18 @@ no:
 }
 
 func initplan(n *Node) {
-	var p *InitPlan
-	var a *Node
-	var l *NodeList
-
 	if n.Initplan != nil {
 		return
 	}
-	p = new(InitPlan)
+	p := new(InitPlan)
 	n.Initplan = p
 	switch n.Op {
 	default:
 		Fatal("initplan")
 
 	case OARRAYLIT:
-		for l = n.List; l != nil; l = l.Next {
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
 			a = l.N
 			if a.Op != OKEY || !Smallintconst(a.Left) {
 				Fatal("initplan arraylit")
@@ -1357,7 +1306,8 @@ func initplan(n *Node) {
 		}
 
 	case OSTRUCTLIT:
-		for l = n.List; l != nil; l = l.Next {
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
 			a = l.N
 			if a.Op != OKEY || a.Left.Type == nil {
 				Fatal("initplan structlit")
@@ -1366,7 +1316,8 @@ func initplan(n *Node) {
 		}
 
 	case OMAPLIT:
-		for l = n.List; l != nil; l = l.Next {
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
 			a = l.N
 			if a.Op != OKEY {
 				Fatal("initplan maplit")
@@ -1377,10 +1328,6 @@ func initplan(n *Node) {
 }
 
 func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
-	var i int
-	var q *InitPlan
-	var e *InitEntry
-
 	// special case: zero can be dropped entirely
 	if iszero(n) {
 		p.Zero += n.Type.Width
@@ -1390,8 +1337,9 @@ func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
 	// special case: inline struct and array (not slice) literals
 	if isvaluelit(n) {
 		initplan(n)
-		q = n.Initplan
-		for i = 0; i < len(q.E); i++ {
+		q := n.Initplan
+		var e *InitEntry
+		for i := 0; i < len(q.E); i++ {
 			e = entry(p)
 			*e = q.E[i]
 			e.Xoffset += xoffset
@@ -1407,14 +1355,12 @@ func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
 		p.Expr += n.Type.Width
 	}
 
-	e = entry(p)
+	e := entry(p)
 	e.Xoffset = xoffset
 	e.Expr = n
 }
 
 func iszero(n *Node) bool {
-	var l *NodeList
-
 	switch n.Op {
 	case OLITERAL:
 		switch n.Val.Ctype {
@@ -1450,7 +1396,7 @@ func iszero(n *Node) bool {
 
 		// fall through
 	case OSTRUCTLIT:
-		for l = n.List; l != nil; l = l.Next {
+		for l := n.List; l != nil; l = l.Next {
 			if !iszero(l.N.Right) {
 				return false
 			}
@@ -1483,6 +1429,7 @@ func gen_as_init(n *Node) bool {
 	nr = n.Right
 	nl = n.Left
 	if nr == nil {
+		var nam Node
 		if !stataddr(&nam, nl) {
 			goto no
 		}
diff --git a/src/cmd/internal/gc/subr.go b/src/cmd/internal/gc/subr.go
index dba7dc338fec01414bb1309d2fc32d133404156b..f7c075840015042d08ab993b81d70df4980fa96a 100644
--- a/src/cmd/internal/gc/subr.go
+++ b/src/cmd/internal/gc/subr.go
@@ -44,12 +44,10 @@ func parserline() int {
 }
 
 func adderrorname(n *Node) {
-	var old string
-
 	if n.Op != ODOT {
 		return
 	}
-	old = fmt.Sprintf("%v: undefined: %v\n", n.Line(), Nconv(n.Left, 0))
+	old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), Nconv(n.Left, 0))
 	if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
 		errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), Nconv(n.Left, 0), Nconv(n, 0))
 	}
@@ -74,11 +72,8 @@ func (x errcmp) Swap(i, j int) {
 }
 
 func (x errcmp) Less(i, j int) bool {
-	var a *Error
-	var b *Error
-
-	a = &x[i]
-	b = &x[j]
+	a := &x[i]
+	b := &x[j]
 	if a.lineno != b.lineno {
 		return a.lineno-b.lineno < 0
 	}
@@ -89,14 +84,12 @@ func (x errcmp) Less(i, j int) bool {
 }
 
 func Flusherrors() {
-	var i int
-
 	obj.Bflush(&bstdout)
 	if len(errors) == 0 {
 		return
 	}
 	sort.Sort(errcmp(errors[:len(errors)]))
-	for i = 0; i < len(errors); i++ {
+	for i := 0; i < len(errors); i++ {
 		if i == 0 || errors[i].msg != errors[i-1].msg {
 			fmt.Printf("%s", errors[i].msg)
 		}
@@ -134,8 +127,6 @@ var yychar_subr int
 var yyerror_lastsyntax int
 
 func Yyerror(fmt_ string, args ...interface{}) {
-	var i int
-
 	if fmt_ == "%s" && len(args) == 1 && args[0] == "syntax error" {
 		nsyntaxerrors++
 	}
@@ -169,7 +160,7 @@ func Yyerror(fmt_ string, args ...interface{}) {
 		}
 
 		// look for parse state-specific errors in list (see go.errors).
-		for i = 0; i < len(yymsg); i++ {
+		for i := 0; i < len(yymsg); i++ {
 			if yymsg[i].yystate == yystate && yymsg[i].yychar == yychar_subr {
 				yyerrorl(int(lexlineno), "syntax error: %s", yymsg[i].msg)
 				return
@@ -204,7 +195,6 @@ func Yyerror(fmt_ string, args ...interface{}) {
 }
 
 func Warn(fmt_ string, args ...interface{}) {
-
 	adderr(parserline(), fmt_, args)
 
 	hcrash()
@@ -218,7 +208,6 @@ func Warnl(line int, fmt_ string, args ...interface{}) {
 }
 
 func Fatal(fmt_ string, args ...interface{}) {
-
 	Flusherrors()
 
 	fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
@@ -259,9 +248,7 @@ func linehist(file string, off int32, relative int) {
 }
 
 func setlineno(n *Node) int32 {
-	var lno int32
-
-	lno = lineno
+	lno := lineno
 	if n != nil {
 		switch n.Op {
 		case ONAME,
@@ -285,10 +272,9 @@ func setlineno(n *Node) int32 {
 }
 
 func stringhash(p string) uint32 {
-	var h uint32
 	var c int
 
-	h = 0
+	h := uint32(0)
 	for {
 		c, p = intstarstringplusplus(p)
 		if c == 0 {
@@ -312,13 +298,9 @@ func Lookup(name string) *Sym {
 }
 
 func Pkglookup(name string, pkg *Pkg) *Sym {
-	var s *Sym
-	var h uint32
-	var c int
-
-	h = stringhash(name) % NHASH
-	c = int(name[0])
-	for s = hash[h]; s != nil; s = s.Link {
+	h := stringhash(name) % NHASH
+	c := int(name[0])
+	for s := hash[h]; s != nil; s = s.Link {
 		if int(s.Name[0]) != c || s.Pkg != pkg {
 			continue
 		}
@@ -327,7 +309,7 @@ func Pkglookup(name string, pkg *Pkg) *Sym {
 		}
 	}
 
-	s = new(Sym)
+	s := new(Sym)
 	s.Name = name
 
 	s.Pkg = pkg
@@ -351,12 +333,10 @@ func restrictlookup(name string, pkg *Pkg) *Sym {
 func importdot(opkg *Pkg, pack *Node) {
 	var s *Sym
 	var s1 *Sym
-	var h uint32
-	var n int
 	var pkgerror string
 
-	n = 0
-	for h = 0; h < NHASH; h++ {
+	n := 0
+	for h := uint32(0); h < NHASH; h++ {
 		for s = hash[h]; s != nil; s = s.Link {
 			if s.Pkg != opkg {
 				continue
@@ -389,14 +369,11 @@ func importdot(opkg *Pkg, pack *Node) {
 }
 
 func gethunk() {
-	var h string
-	var nh int32
-
-	nh = NHUNK
+	nh := int32(NHUNK)
 	if thunk >= 10*NHUNK {
 		nh = 10 * NHUNK
 	}
-	h = string(make([]byte, nh))
+	h := string(make([]byte, nh))
 	if h == "" {
 		Flusherrors()
 		Yyerror("out of memory")
@@ -409,9 +386,7 @@ func gethunk() {
 }
 
 func Nod(op int, nleft *Node, nright *Node) *Node {
-	var n *Node
-
-	n = new(Node)
+	n := new(Node)
 	n.Op = uint8(op)
 	n.Left = nleft
 	n.Right = nright
@@ -423,12 +398,10 @@ func Nod(op int, nleft *Node, nright *Node) *Node {
 }
 
 func saveorignode(n *Node) {
-	var norig *Node
-
 	if n.Orig != nil {
 		return
 	}
-	norig = Nod(int(n.Op), nil, nil)
+	norig := Nod(int(n.Op), nil, nil)
 	*norig = *n
 	n.Orig = norig
 }
@@ -447,10 +420,6 @@ func ispaddedfield(t *Type, total int64) bool {
 }
 
 func algtype1(t *Type, bad **Type) int {
-	var a int
-	var ret int
-	var t1 *Type
-
 	if bad != nil {
 		*bad = nil
 	}
@@ -523,7 +492,7 @@ func algtype1(t *Type, bad **Type) int {
 			return ANOEQ
 		}
 
-		a = algtype1(t.Type, bad)
+		a := algtype1(t.Type, bad)
 		if a == ANOEQ || a == AMEM {
 			if a == ANOEQ && bad != nil {
 				*bad = t
@@ -539,8 +508,9 @@ func algtype1(t *Type, bad **Type) int {
 			return algtype1(t.Type.Type, bad)
 		}
 
-		ret = AMEM
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		ret := AMEM
+		var a int
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			// All fields must be comparable.
 			a = algtype1(t1.Type, bad)
 
@@ -564,9 +534,7 @@ func algtype1(t *Type, bad **Type) int {
 }
 
 func algtype(t *Type) int {
-	var a int
-
-	a = algtype1(t, nil)
+	a := algtype1(t, nil)
 	if a == AMEM || a == ANOEQ {
 		if Isslice(t) {
 			return ASLICE
@@ -596,13 +564,10 @@ func algtype(t *Type) int {
 }
 
 func maptype(key *Type, val *Type) *Type {
-	var t *Type
-	var bad *Type
-	var atype int
-	var mtype int
-
 	if key != nil {
-		atype = algtype1(key, &bad)
+		var bad *Type
+		atype := algtype1(key, &bad)
+		var mtype int
 		if bad == nil {
 			mtype = int(key.Etype)
 		} else {
@@ -631,16 +596,14 @@ func maptype(key *Type, val *Type) *Type {
 		}
 	}
 
-	t = typ(TMAP)
+	t := typ(TMAP)
 	t.Down = key
 	t.Type = val
 	return t
 }
 
 func typ(et int) *Type {
-	var t *Type
-
-	t = new(Type)
+	t := new(Type)
 	t.Etype = uint8(et)
 	t.Width = BADWIDTH
 	t.Lineno = int(lineno)
@@ -659,12 +622,8 @@ func (x methcmp) Swap(i, j int) {
 }
 
 func (x methcmp) Less(i, j int) bool {
-	var a *Type
-	var b *Type
-	var k int
-
-	a = x[i]
-	b = x[j]
+	a := x[i]
+	b := x[j]
 	if a.Sym == nil && b.Sym == nil {
 		return false
 	}
@@ -674,12 +633,12 @@ func (x methcmp) Less(i, j int) bool {
 	if b.Sym == nil {
 		return 1 < 0
 	}
-	k = stringsCompare(a.Sym.Name, b.Sym.Name)
+	k := stringsCompare(a.Sym.Name, b.Sym.Name)
 	if k != 0 {
 		return k < 0
 	}
 	if !exportname(a.Sym.Name) {
-		k = stringsCompare(a.Sym.Pkg.Path.S, b.Sym.Pkg.Path.S)
+		k := stringsCompare(a.Sym.Pkg.Path.S, b.Sym.Pkg.Path.S)
 		if k != 0 {
 			return k < 0
 		}
@@ -689,20 +648,17 @@ func (x methcmp) Less(i, j int) bool {
 }
 
 func sortinter(t *Type) *Type {
-	var f *Type
-	var i int
-	var a []*Type
-
 	if t.Type == nil || t.Type.Down == nil {
 		return t
 	}
 
-	i = 0
-	for f = t.Type; f != nil; f = f.Down {
+	i := 0
+	for f := t.Type; f != nil; f = f.Down {
 		i++
 	}
-	a = make([]*Type, i)
+	a := make([]*Type, i)
 	i = 0
+	var f *Type
 	for f = t.Type; f != nil; f = f.Down {
 		a[i] = f
 		i++
@@ -723,9 +679,7 @@ func sortinter(t *Type) *Type {
 }
 
 func Nodintconst(v int64) *Node {
-	var c *Node
-
-	c = Nod(OLITERAL, nil, nil)
+	c := Nod(OLITERAL, nil, nil)
 	c.Addable = 1
 	c.Val.U.Xval = new(Mpint)
 	Mpmovecfix(c.Val.U.Xval, v)
@@ -736,9 +690,7 @@ func Nodintconst(v int64) *Node {
 }
 
 func nodfltconst(v *Mpflt) *Node {
-	var c *Node
-
-	c = Nod(OLITERAL, nil, nil)
+	c := Nod(OLITERAL, nil, nil)
 	c.Addable = 1
 	c.Val.U.Fval = new(Mpflt)
 	mpmovefltflt(c.Val.U.Fval, v)
@@ -764,18 +716,14 @@ func Nodconst(n *Node, t *Type, v int64) {
 }
 
 func nodnil() *Node {
-	var c *Node
-
-	c = Nodintconst(0)
+	c := Nodintconst(0)
 	c.Val.Ctype = CTNIL
 	c.Type = Types[TNIL]
 	return c
 }
 
 func Nodbool(b bool) *Node {
-	var c *Node
-
-	c = Nodintconst(0)
+	c := Nodintconst(0)
 	c.Val.Ctype = CTBOOL
 	c.Val.U.Bval = int16(bool2int(b))
 	c.Type = idealbool
@@ -783,10 +731,7 @@ func Nodbool(b bool) *Node {
 }
 
 func aindex(b *Node, t *Type) *Type {
-	var r *Type
-	var bound int64
-
-	bound = -1 // open bound
+	bound := int64(-1) // open bound
 	typecheck(&b, Erv)
 	if b != nil {
 		switch consttype(b) {
@@ -803,7 +748,7 @@ func aindex(b *Node, t *Type) *Type {
 	}
 
 	// fixed array
-	r = typ(TARRAY)
+	r := typ(TARRAY)
 
 	r.Type = t
 	r.Bound = bound
@@ -811,12 +756,11 @@ func aindex(b *Node, t *Type) *Type {
 }
 
 func treecopy(n *Node) *Node {
-	var m *Node
-
 	if n == nil {
 		return nil
 	}
 
+	var m *Node
 	switch n.Op {
 	default:
 		m = Nod(OXXX, nil, nil)
@@ -1023,8 +967,6 @@ func Eqtype(t1 *Type, t2 *Type) bool {
 }
 
 func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
-	var l TypePairList
-
 	if t1 == t2 {
 		return true
 	}
@@ -1053,6 +995,7 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
 	if onlist(assumed_equal, t1, t2) {
 		return true
 	}
+	var l TypePairList
 	l.next = assumed_equal
 	l.t1 = t1
 	l.t2 = t2
@@ -1159,10 +1102,6 @@ func eqtypenoname(t1 *Type, t2 *Type) bool {
 // If so, return op code to use in conversion.
 // If not, return 0.
 func assignop(src *Type, dst *Type, why *string) int {
-	var missing *Type
-	var have *Type
-	var ptr int
-
 	if why != nil {
 		*why = ""
 	}
@@ -1197,6 +1136,9 @@ func assignop(src *Type, dst *Type, why *string) int {
 
 	// 3. dst is an interface type and src implements dst.
 	if dst.Etype == TINTER && src.Etype != TNIL {
+		var missing *Type
+		var ptr int
+		var have *Type
 		if implements(src, dst, &missing, &have, &ptr) {
 			return OCONVIFACE
 		}
@@ -1233,6 +1175,9 @@ func assignop(src *Type, dst *Type, why *string) int {
 	}
 
 	if src.Etype == TINTER && dst.Etype != TBLANK {
+		var have *Type
+		var ptr int
+		var missing *Type
 		if why != nil && implements(dst, src, &missing, &have, &ptr) {
 			*why = ": need type assertion"
 		}
@@ -1281,8 +1226,6 @@ func assignop(src *Type, dst *Type, why *string) int {
 // If so, return op code to use in conversion (maybe OCONVNOP).
 // If not, return 0.
 func convertop(src *Type, dst *Type, why *string) int {
-	var op int
-
 	if why != nil {
 		*why = ""
 	}
@@ -1295,7 +1238,7 @@ func convertop(src *Type, dst *Type, why *string) int {
 	}
 
 	// 1. src can be assigned to dst.
-	op = assignop(src, dst, why)
+	op := assignop(src, dst, why)
 	if op != 0 {
 		return op
 	}
@@ -1381,11 +1324,6 @@ func convertop(src *Type, dst *Type, why *string) int {
 
 // Convert node n for assignment to type t.
 func assignconv(n *Node, t *Type, context string) *Node {
-	var op int
-	var r *Node
-	var old *Node
-	var why string
-
 	if n == nil || n.Type == nil || n.Type.Broke != 0 {
 		return n
 	}
@@ -1394,7 +1332,7 @@ func assignconv(n *Node, t *Type, context string) *Node {
 		Yyerror("use of untyped nil")
 	}
 
-	old = n
+	old := n
 	old.Diag++ // silence errors about n; we'll issue one below
 	defaultlit(&n, t)
 	old.Diag--
@@ -1406,7 +1344,7 @@ func assignconv(n *Node, t *Type, context string) *Node {
 	// if the next step is non-bool (like interface{}).
 	if n.Type == idealbool && t.Etype != TBOOL {
 		if n.Op == ONAME || n.Op == OLITERAL {
-			r = Nod(OCONVNOP, n, nil)
+			r := Nod(OCONVNOP, n, nil)
 			r.Type = Types[TBOOL]
 			r.Typecheck = 1
 			r.Implicit = 1
@@ -1418,13 +1356,14 @@ func assignconv(n *Node, t *Type, context string) *Node {
 		return n
 	}
 
-	op = assignop(n.Type, t, &why)
+	var why string
+	op := assignop(n.Type, t, &why)
 	if op == 0 {
 		Yyerror("cannot use %v as type %v in %s%s", Nconv(n, obj.FmtLong), Tconv(t, 0), context, why)
 		op = OCONV
 	}
 
-	r = Nod(op, n, nil)
+	r := Nod(op, n, nil)
 	r.Type = t
 	r.Typecheck = 1
 	r.Implicit = 1
@@ -1517,11 +1456,8 @@ func Is64(t *Type) bool {
  * Is a conversion between t1 and t2 a no-op?
  */
 func Noconv(t1 *Type, t2 *Type) bool {
-	var e1 int
-	var e2 int
-
-	e1 = int(Simtype[t1.Etype])
-	e2 = int(Simtype[t2.Etype])
+	e1 := int(Simtype[t1.Etype])
+	e2 := int(Simtype[t2.Etype])
 
 	switch e1 {
 	case TINT8,
@@ -1560,12 +1496,10 @@ func argtype(on *Node, t *Type) {
 }
 
 func shallow(t *Type) *Type {
-	var nt *Type
-
 	if t == nil {
 		return nil
 	}
-	nt = typ(0)
+	nt := typ(0)
 	*nt = *t
 	if t.Orig == t {
 		nt.Orig = nt
@@ -1574,13 +1508,11 @@ func shallow(t *Type) *Type {
 }
 
 func deep(t *Type) *Type {
-	var nt *Type
-	var xt *Type
-
 	if t == nil {
 		return nil
 	}
 
+	var nt *Type
 	switch t.Etype {
 	default:
 		nt = t // share from here down
@@ -1610,7 +1542,7 @@ func deep(t *Type) *Type {
 	case TSTRUCT:
 		nt = shallow(t)
 		nt.Type = shallow(t.Type)
-		xt = nt.Type
+		xt := nt.Type
 
 		for t = t.Type; t != nil; t = t.Down {
 			xt.Type = deep(t.Type)
@@ -1623,10 +1555,7 @@ func deep(t *Type) *Type {
 }
 
 func syslook(name string, copy int) *Node {
-	var s *Sym
-	var n *Node
-
-	s = Pkglookup(name, Runtimepkg)
+	s := Pkglookup(name, Runtimepkg)
 	if s == nil || s.Def == nil {
 		Fatal("syslook: can't find runtime.%s", name)
 	}
@@ -1635,7 +1564,7 @@ func syslook(name string, copy int) *Node {
 		return s.Def
 	}
 
-	n = Nod(0, nil, nil)
+	n := Nod(0, nil, nil)
 	*n = *s.Def
 	n.Type = deep(s.Def.Type)
 
@@ -1656,7 +1585,6 @@ func syslook(name string, copy int) *Node {
  */
 func typehash(t *Type) uint32 {
 	var p string
-	var d MD5
 
 	if t.Thistuple != 0 {
 		// hide method receiver from Tpretty
@@ -1669,6 +1597,7 @@ func typehash(t *Type) uint32 {
 	}
 
 	//print("typehash: %s\n", p);
+	var d MD5
 	md5reset(&d)
 
 	md5write(&d, []byte(p), len(p))
@@ -1677,12 +1606,10 @@ func typehash(t *Type) uint32 {
 }
 
 func Ptrto(t *Type) *Type {
-	var t1 *Type
-
 	if Tptr == 0 {
 		Fatal("ptrto: no tptr")
 	}
-	t1 = typ(Tptr)
+	t1 := typ(Tptr)
 	t1.Type = t
 	t1.Width = int64(Widthptr)
 	t1.Align = uint8(Widthptr)
@@ -1691,8 +1618,6 @@ func Ptrto(t *Type) *Type {
 
 func frame(context int) {
 	var l *NodeList
-	var n *Node
-	var w int64
 
 	if context != 0 {
 		fmt.Printf("--- external frame ---\n")
@@ -1704,6 +1629,8 @@ func frame(context int) {
 		return
 	}
 
+	var n *Node
+	var w int64
 	for ; l != nil; l = l.Next {
 		n = l.N
 		w = -1
@@ -1727,13 +1654,12 @@ func frame(context int) {
  * hardest side first to minimize registers.
  */
 func ullmancalc(n *Node) {
-	var ul int
-	var ur int
-
 	if n == nil {
 		return
 	}
 
+	var ul int
+	var ur int
 	if n.Ninit != nil {
 		ul = UINF
 		goto out
@@ -1788,10 +1714,7 @@ out:
 }
 
 func badtype(o int, tl *Type, tr *Type) {
-	var fmt_ string
-	var s string
-
-	fmt_ = ""
+	fmt_ := ""
 	if tl != nil {
 		fmt_ += fmt.Sprintf("\n\t%v", Tconv(tl, 0))
 	}
@@ -1808,7 +1731,7 @@ func badtype(o int, tl *Type, tr *Type) {
 		}
 	}
 
-	s = fmt_
+	s := fmt_
 	Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
 }
 
@@ -1816,10 +1739,9 @@ func badtype(o int, tl *Type, tr *Type) {
  * iterator to walk a structure declaration
  */
 func Structfirst(s *Iter, nn **Type) *Type {
-	var n *Type
 	var t *Type
 
-	n = *nn
+	n := *nn
 	if n == nil {
 		goto bad
 	}
@@ -1854,11 +1776,8 @@ rnil:
 }
 
 func structnext(s *Iter) *Type {
-	var n *Type
-	var t *Type
-
-	n = s.T
-	t = n.Down
+	n := s.T
+	t := n.Down
 	if t == nil {
 		goto rnil
 	}
@@ -1907,9 +1826,7 @@ bad:
 }
 
 func funcnext(s *Iter) *Type {
-	var fp *Type
-
-	fp = structnext(s)
+	fp := structnext(s)
 	if fp == nil && s.Done == 0 {
 		s.Done = 1
 		fp = Structfirst(s, getinarg(s.Tfunc))
@@ -2004,10 +1921,6 @@ func Brrev(a int) int {
  * result is assignable if n is.
  */
 func safeexpr(n *Node, init **NodeList) *Node {
-	var l *Node
-	var r *Node
-	var a *Node
-
 	if n == nil {
 		return nil
 	}
@@ -2024,11 +1937,11 @@ func safeexpr(n *Node, init **NodeList) *Node {
 		return n
 
 	case ODOT:
-		l = safeexpr(n.Left, init)
+		l := safeexpr(n.Left, init)
 		if l == n.Left {
 			return n
 		}
-		r = Nod(OXXX, nil, nil)
+		r := Nod(OXXX, nil, nil)
 		*r = *n
 		r.Left = l
 		typecheck(&r, Erv)
@@ -2037,11 +1950,11 @@ func safeexpr(n *Node, init **NodeList) *Node {
 
 	case ODOTPTR,
 		OIND:
-		l = safeexpr(n.Left, init)
+		l := safeexpr(n.Left, init)
 		if l == n.Left {
 			return n
 		}
-		a = Nod(OXXX, nil, nil)
+		a := Nod(OXXX, nil, nil)
 		*a = *n
 		a.Left = l
 		walkexpr(&a, init)
@@ -2049,12 +1962,12 @@ func safeexpr(n *Node, init **NodeList) *Node {
 
 	case OINDEX,
 		OINDEXMAP:
-		l = safeexpr(n.Left, init)
-		r = safeexpr(n.Right, init)
+		l := safeexpr(n.Left, init)
+		r := safeexpr(n.Right, init)
 		if l == n.Left && r == n.Right {
 			return n
 		}
-		a = Nod(OXXX, nil, nil)
+		a := Nod(OXXX, nil, nil)
 		*a = *n
 		a.Left = l
 		a.Right = r
@@ -2070,11 +1983,8 @@ func safeexpr(n *Node, init **NodeList) *Node {
 }
 
 func copyexpr(n *Node, t *Type, init **NodeList) *Node {
-	var a *Node
-	var l *Node
-
-	l = temp(t)
-	a = Nod(OAS, l, n)
+	l := temp(t)
+	a := Nod(OAS, l, n)
 	typecheck(&a, Etop)
 	walkexpr(&a, init)
 	*init = list(*init, a)
@@ -2109,10 +2019,8 @@ func localexpr(n *Node, t *Type, init **NodeList) *Node {
 }
 
 func Setmaxarg(t *Type, extra int32) {
-	var w int64
-
 	dowidth(t)
-	w = t.Argwid
+	w := t.Argwid
 	if w >= Thearch.MAXWIDTH {
 		Fatal("bad argwid %v", Tconv(t, 0))
 	}
@@ -2138,18 +2046,14 @@ func Setmaxarg(t *Type, extra int32) {
 // return count of fields+methods
 // found with a given name
 func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
-	var f *Type
-	var u *Type
-	var c int
-
-	u = t
+	u := t
 	if Isptr[u.Etype] != 0 {
 		u = u.Type
 	}
 
-	c = 0
+	c := 0
 	if u.Etype == TSTRUCT || u.Etype == TINTER {
-		for f = u.Type; f != nil; f = f.Down {
+		for f := u.Type; f != nil; f = f.Down {
 			if f.Sym == s || (ignorecase != 0 && f.Type.Etype == TFUNC && f.Type.Thistuple > 0 && strings.EqualFold(f.Sym.Name, s.Name)) {
 				if save != nil {
 					*save = f
@@ -2161,7 +2065,7 @@ func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
 
 	u = methtype(t, 0)
 	if u != nil {
-		for f = u.Method; f != nil; f = f.Down {
+		for f := u.Method; f != nil; f = f.Down {
 			if f.Embedded == 0 && (f.Sym == s || (ignorecase != 0 && strings.EqualFold(f.Sym.Name, s.Name))) {
 				if save != nil {
 					*save = f
@@ -2180,16 +2084,14 @@ func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
 // answer is in dotlist array and
 // count of number of ways is returned.
 func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
-	var f *Type
-	var u *Type
-	var c int
-	var a int
-
 	if t.Trecur != 0 {
 		return 0
 	}
 	t.Trecur = 1
 
+	var c int
+	var u *Type
+	var a int
 	if d == 0 {
 		c = lookdot0(s, t, save, ignorecase)
 		goto out
@@ -2205,7 +2107,7 @@ func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
 	}
 
 	d--
-	for f = u.Type; f != nil; f = f.Down {
+	for f := u.Type; f != nil; f = f.Down {
 		if f.Embedded == 0 {
 			continue
 		}
@@ -2229,14 +2131,13 @@ out:
 // will give shortest unique addressing.
 // modify the tree with missing type names.
 func adddot(n *Node) *Node {
-	var t *Type
 	var s *Sym
 	var c int
 	var d int
 
 	typecheck(&n.Left, Etype|Erv)
 	n.Diag |= n.Left.Diag
-	t = n.Left.Type
+	t := n.Left.Type
 	if t == nil {
 		goto ret
 	}
@@ -2270,7 +2171,7 @@ out:
 	}
 
 	// rebuild elided dots
-	for c = d - 1; c >= 0; c-- {
+	for c := d - 1; c >= 0; c-- {
 		n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
 	}
 
@@ -2298,18 +2199,15 @@ type Symlink struct {
 var slist *Symlink
 
 func expand0(t *Type, followptr int) {
-	var f *Type
-	var u *Type
-	var sl *Symlink
-
-	u = t
+	u := t
 	if Isptr[u.Etype] != 0 {
 		followptr = 1
 		u = u.Type
 	}
 
 	if u.Etype == TINTER {
-		for f = u.Type; f != nil; f = f.Down {
+		var sl *Symlink
+		for f := u.Type; f != nil; f = f.Down {
 			if f.Sym.Flags&SymUniq != 0 {
 				continue
 			}
@@ -2326,7 +2224,8 @@ func expand0(t *Type, followptr int) {
 
 	u = methtype(t, 0)
 	if u != nil {
-		for f = u.Method; f != nil; f = f.Down {
+		var sl *Symlink
+		for f := u.Method; f != nil; f = f.Down {
 			if f.Sym.Flags&SymUniq != 0 {
 				continue
 			}
@@ -2341,9 +2240,6 @@ func expand0(t *Type, followptr int) {
 }
 
 func expand1(t *Type, d int, followptr int) {
-	var f *Type
-	var u *Type
-
 	if t.Trecur != 0 {
 		return
 	}
@@ -2356,7 +2252,7 @@ func expand1(t *Type, d int, followptr int) {
 		expand0(t, followptr)
 	}
 
-	u = t
+	u := t
 	if Isptr[u.Etype] != 0 {
 		followptr = 1
 		u = u.Type
@@ -2366,7 +2262,7 @@ func expand1(t *Type, d int, followptr int) {
 		goto out
 	}
 
-	for f = u.Type; f != nil; f = f.Down {
+	for f := u.Type; f != nil; f = f.Down {
 		if f.Embedded == 0 {
 			continue
 		}
@@ -2381,17 +2277,13 @@ out:
 }
 
 func expandmeth(t *Type) {
-	var sl *Symlink
-	var f *Type
-	var c int
-	var d int
-
 	if t == nil || t.Xmethod != nil {
 		return
 	}
 
 	// mark top-level method symbols
 	// so that expand1 doesn't consider them.
+	var f *Type
 	for f = t.Method; f != nil; f = f.Down {
 		f.Sym.Flags |= SymUniq
 	}
@@ -2402,7 +2294,9 @@ func expandmeth(t *Type) {
 	expand1(t, len(dotlist)-1, 0)
 
 	// check each method to be uniquely reachable
-	for sl = slist; sl != nil; sl = sl.link {
+	var c int
+	var d int
+	for sl := slist; sl != nil; sl = sl.link {
 		sl.field.Sym.Flags &^= SymUniq
 		for d = 0; d < len(dotlist); d++ {
 			c = adddot1(sl.field.Sym, t, d, &f, 0)
@@ -2426,7 +2320,7 @@ func expandmeth(t *Type) {
 	}
 
 	t.Xmethod = t.Method
-	for sl = slist; sl != nil; sl = sl.link {
+	for sl := slist; sl != nil; sl = sl.link {
 		if sl.good != 0 {
 			// add it to the base type method list
 			f = typ(TFIELD)
@@ -2449,14 +2343,11 @@ func structargs(tl **Type, mustname int) *NodeList {
 	var savet Iter
 	var a *Node
 	var n *Node
-	var args *NodeList
-	var t *Type
 	var buf string
-	var gen int
 
-	args = nil
-	gen = 0
-	for t = Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
+	args := (*NodeList)(nil)
+	gen := 0
+	for t := Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
 		n = nil
 		if mustname != 0 && (t.Sym == nil || t.Sym.Name == "_") {
 			// invent a name so that we can refer to it in the trampoline
@@ -2505,23 +2396,6 @@ func structargs(tl **Type, mustname int) *NodeList {
 var genwrapper_linehistdone int = 0
 
 func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
-	var this *Node
-	var fn *Node
-	var call *Node
-	var n *Node
-	var t *Node
-	var pad *Node
-	var dot *Node
-	var as *Node
-	var l *NodeList
-	var args *NodeList
-	var in *NodeList
-	var out *NodeList
-	var tpad *Type
-	var methodrcvr *Type
-	var isddd int
-	var v Val
-
 	if false && Debug['r'] != 0 {
 		fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", Tconv(rcvr, 0), Tconv(method, 0), Sconv(newnam, 0))
 	}
@@ -2538,31 +2412,31 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
 	dclcontext = PEXTERN
 	markdcl()
 
-	this = Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
+	this := Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
 	this.Left.Ntype = this.Right
-	in = structargs(getinarg(method.Type), 1)
-	out = structargs(Getoutarg(method.Type), 0)
+	in := structargs(getinarg(method.Type), 1)
+	out := structargs(Getoutarg(method.Type), 0)
 
-	t = Nod(OTFUNC, nil, nil)
-	l = list1(this)
+	t := Nod(OTFUNC, nil, nil)
+	l := list1(this)
 	if iface != 0 && rcvr.Width < Types[Tptr].Width {
 		// Building method for interface table and receiver
 		// is smaller than the single pointer-sized word
 		// that the interface call will pass in.
 		// Add a dummy padding argument after the
 		// receiver to make up the difference.
-		tpad = typ(TARRAY)
+		tpad := typ(TARRAY)
 
 		tpad.Type = Types[TUINT8]
 		tpad.Bound = Types[Tptr].Width - rcvr.Width
-		pad = Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
+		pad := Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
 		l = list(l, pad)
 	}
 
 	t.List = concat(l, in)
 	t.Rlist = out
 
-	fn = Nod(ODCLFUNC, nil, nil)
+	fn := Nod(ODCLFUNC, nil, nil)
 	fn.Nname = newname(newnam)
 	fn.Nname.Defn = fn
 	fn.Nname.Ntype = t
@@ -2570,27 +2444,28 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
 	funchdr(fn)
 
 	// arg list
-	args = nil
+	args := (*NodeList)(nil)
 
-	isddd = 0
-	for l = in; l != nil; l = l.Next {
+	isddd := 0
+	for l := in; l != nil; l = l.Next {
 		args = list(args, l.N.Left)
 		isddd = int(l.N.Left.Isddd)
 	}
 
-	methodrcvr = getthisx(method.Type).Type.Type
+	methodrcvr := getthisx(method.Type).Type.Type
 
 	// generate nil pointer check for better error
 	if Isptr[rcvr.Etype] != 0 && rcvr.Type == methodrcvr {
 		// generating wrapper from *T to T.
-		n = Nod(OIF, nil, nil)
+		n := Nod(OIF, nil, nil)
 
 		n.Ntest = Nod(OEQ, this.Left, nodnil())
 
 		// these strings are already in the reflect tables,
 		// so no space cost to use them here.
-		l = nil
+		l := (*NodeList)(nil)
 
+		var v Val
 		v.Ctype = CTSTR
 		v.U.Sval = newstrlit(rcvr.Type.Sym.Pkg.Name) // package name
 		l = list(l, nodlit(v))
@@ -2598,13 +2473,13 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
 		l = list(l, nodlit(v))
 		v.U.Sval = newstrlit(method.Sym.Name)
 		l = list(l, nodlit(v)) // method name
-		call = Nod(OCALL, syslook("panicwrap", 0), nil)
+		call := Nod(OCALL, syslook("panicwrap", 0), nil)
 		call.List = l
 		n.Nbody = list1(call)
 		fn.Nbody = list(fn.Nbody, n)
 	}
 
-	dot = adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
+	dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
 
 	// generate call
 	if flag_race == 0 && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !isifacemethod(method.Type) {
@@ -2613,19 +2488,19 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
 		if Isptr[dotlist[0].field.Type.Etype] == 0 {
 			dot = Nod(OADDR, dot, nil)
 		}
-		as = Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
+		as := Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
 		as.Right.Type = rcvr
 		fn.Nbody = list(fn.Nbody, as)
-		n = Nod(ORETJMP, nil, nil)
+		n := Nod(ORETJMP, nil, nil)
 		n.Left = newname(methodsym(method.Sym, methodrcvr, 0))
 		fn.Nbody = list(fn.Nbody, n)
 	} else {
 		fn.Wrapper = 1 // ignore frame for panic+recover matching
-		call = Nod(OCALL, dot, nil)
+		call := Nod(OCALL, dot, nil)
 		call.List = args
 		call.Isddd = uint8(isddd)
 		if method.Type.Outtuple > 0 {
-			n = Nod(ORETURN, nil, nil)
+			n := Nod(ORETURN, nil, nil)
 			n.List = list1(call)
 			call = n
 		}
@@ -2662,15 +2537,11 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
 }
 
 func hashmem(t *Type) *Node {
-	var tfn *Node
-	var n *Node
-	var sym *Sym
-
-	sym = Pkglookup("memhash", Runtimepkg)
+	sym := Pkglookup("memhash", Runtimepkg)
 
-	n = newname(sym)
+	n := newname(sym)
 	n.Class = PFUNC
-	tfn = Nod(OTFUNC, nil, nil)
+	tfn := Nod(OTFUNC, nil, nil)
 	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
 	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
 	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
@@ -2681,12 +2552,9 @@ func hashmem(t *Type) *Node {
 }
 
 func hashfor(t *Type) *Node {
-	var a int
 	var sym *Sym
-	var tfn *Node
-	var n *Node
 
-	a = algtype1(t, nil)
+	a := algtype1(t, nil)
 	switch a {
 	case AMEM:
 		Fatal("hashfor with AMEM type")
@@ -2716,9 +2584,9 @@ func hashfor(t *Type) *Node {
 		sym = typesymprefix(".hash", t)
 	}
 
-	n = newname(sym)
+	n := newname(sym)
 	n.Class = PFUNC
-	tfn = Nod(OTFUNC, nil, nil)
+	tfn := Nod(OTFUNC, nil, nil)
 	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
 	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
 	tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
@@ -2731,24 +2599,6 @@ func hashfor(t *Type) *Node {
  * Generate a helper function to compute the hash of a value of type t.
  */
 func genhash(sym *Sym, t *Type) {
-	var n *Node
-	var fn *Node
-	var np *Node
-	var nh *Node
-	var ni *Node
-	var call *Node
-	var nx *Node
-	var na *Node
-	var tfn *Node
-	var r *Node
-	var hashel *Node
-	var first *Type
-	var t1 *Type
-	var old_safemode int
-	var size int64
-	var mul int64
-	var offend int64
-
 	if Debug['r'] != 0 {
 		fmt.Printf("genhash %v %v\n", Sconv(sym, 0), Tconv(t, 0))
 	}
@@ -2758,19 +2608,19 @@ func genhash(sym *Sym, t *Type) {
 	markdcl()
 
 	// func sym(p *T, h uintptr) uintptr
-	fn = Nod(ODCLFUNC, nil, nil)
+	fn := Nod(ODCLFUNC, nil, nil)
 
 	fn.Nname = newname(sym)
 	fn.Nname.Class = PFUNC
-	tfn = Nod(OTFUNC, nil, nil)
+	tfn := Nod(OTFUNC, nil, nil)
 	fn.Nname.Ntype = tfn
 
-	n = Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+	n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
 	tfn.List = list(tfn.List, n)
-	np = n.Left
+	np := n.Left
 	n = Nod(ODCLFIELD, newname(Lookup("h")), typenod(Types[TUINTPTR]))
 	tfn.List = list(tfn.List, n)
-	nh = n.Left
+	nh := n.Left
 	n = Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
 	tfn.Rlist = list(tfn.Rlist, n)
 
@@ -2792,10 +2642,10 @@ func genhash(sym *Sym, t *Type) {
 		// An array of pure memory would be handled by the
 		// standard algorithm, so the element type must not be
 		// pure memory.
-		hashel = hashfor(t.Type)
+		hashel := hashfor(t.Type)
 
-		n = Nod(ORANGE, nil, Nod(OIND, np, nil))
-		ni = newname(Lookup("i"))
+		n := Nod(ORANGE, nil, Nod(OIND, np, nil))
+		ni := newname(Lookup("i"))
 		ni.Type = Types[TINT]
 		n.List = list1(ni)
 		n.Colas = 1
@@ -2809,6 +2659,7 @@ func genhash(sym *Sym, t *Type) {
 
 		// h *= mul
 		// Same multipliers as in runtime.memhash.
+		var mul int64
 		if Widthptr == 4 {
 			mul = 3267000013
 		} else {
@@ -2817,11 +2668,11 @@ func genhash(sym *Sym, t *Type) {
 		n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OMUL, nh, Nodintconst(mul))))
 
 		// h = hashel(&p[i], h)
-		call = Nod(OCALL, hashel, nil)
+		call := Nod(OCALL, hashel, nil)
 
-		nx = Nod(OINDEX, np, ni)
+		nx := Nod(OINDEX, np, ni)
 		nx.Bounded = true
-		na = Nod(OADDR, nx, nil)
+		na := Nod(OADDR, nx, nil)
 		na.Etype = 1 // no escape to heap
 		call.List = list(call.List, na)
 		call.List = list(call.List, nh)
@@ -2832,10 +2683,15 @@ func genhash(sym *Sym, t *Type) {
 		// Walk the struct using memhash for runs of AMEM
 	// and calling specific hash functions for the others.
 	case TSTRUCT:
-		first = nil
-
-		offend = 0
-		for t1 = t.Type; ; t1 = t1.Down {
+		first := (*Type)(nil)
+
+		offend := int64(0)
+		var size int64
+		var call *Node
+		var nx *Node
+		var na *Node
+		var hashel *Node
+		for t1 := t.Type; ; t1 = t1.Down {
 			if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
 				offend = t1.Width + t1.Type.Width
 				if first == nil {
@@ -2906,7 +2762,7 @@ func genhash(sym *Sym, t *Type) {
 		}
 	}
 
-	r = Nod(ORETURN, nil, nil)
+	r := Nod(ORETURN, nil, nil)
 	r.List = list(r.List, nh)
 	fn.Nbody = list(fn.Nbody, r)
 
@@ -2926,7 +2782,7 @@ func genhash(sym *Sym, t *Type) {
 	// In this case it can happen if we need to generate an ==
 	// for a struct containing a reflect.Value, which itself has
 	// an unexported field of type unsafe.Pointer.
-	old_safemode = safemode
+	old_safemode := safemode
 
 	safemode = 0
 	funccompile(fn)
@@ -2936,23 +2792,17 @@ func genhash(sym *Sym, t *Type) {
 // Return node for
 //	if p.field != q.field { return false }
 func eqfield(p *Node, q *Node, field *Node) *Node {
-	var nif *Node
-	var nx *Node
-	var ny *Node
-	var r *Node
-
-	nx = Nod(OXDOT, p, field)
-	ny = Nod(OXDOT, q, field)
-	nif = Nod(OIF, nil, nil)
+	nx := Nod(OXDOT, p, field)
+	ny := Nod(OXDOT, q, field)
+	nif := Nod(OIF, nil, nil)
 	nif.Ntest = Nod(ONE, nx, ny)
-	r = Nod(ORETURN, nil, nil)
+	r := Nod(ORETURN, nil, nil)
 	r.List = list(r.List, Nodbool(false))
 	nif.Nbody = list(nif.Nbody, r)
 	return nif
 }
 
 func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
-	var buf string
 	var fn *Node
 
 	switch size {
@@ -2965,7 +2815,7 @@ func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
 		4,
 		8,
 		16:
-		buf = fmt.Sprintf("memequal%d", int(size)*8)
+		buf := fmt.Sprintf("memequal%d", int(size)*8)
 		fn = syslook(buf, 1)
 		*needsize = 0
 	}
@@ -2978,31 +2828,26 @@ func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
 // Return node for
 //	if !memequal(&p.field, &q.field [, size]) { return false }
 func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
-	var nif *Node
-	var nx *Node
-	var ny *Node
-	var call *Node
-	var r *Node
 	var needsize int
 
-	nx = Nod(OADDR, Nod(OXDOT, p, field), nil)
+	nx := Nod(OADDR, Nod(OXDOT, p, field), nil)
 	nx.Etype = 1 // does not escape
-	ny = Nod(OADDR, Nod(OXDOT, q, field), nil)
+	ny := Nod(OADDR, Nod(OXDOT, q, field), nil)
 	ny.Etype = 1 // does not escape
 	typecheck(&nx, Erv)
 	typecheck(&ny, Erv)
 
-	call = Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
+	call := Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
 	call.List = list(call.List, nx)
 	call.List = list(call.List, ny)
 	if needsize != 0 {
 		call.List = list(call.List, Nodintconst(size))
 	}
 
-	nif = Nod(OIF, nil, nil)
+	nif := Nod(OIF, nil, nil)
 	nif.Ninit = list(nif.Ninit, call)
 	nif.Ntest = Nod(ONOT, call, nil)
-	r = Nod(ORETURN, nil, nil)
+	r := Nod(ORETURN, nil, nil)
 	r.List = list(r.List, Nodbool(false))
 	nif.Nbody = list(nif.Nbody, r)
 	return nif
@@ -3012,23 +2857,6 @@ func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
  * Generate a helper function to check equality of two values of type t.
  */
 func geneq(sym *Sym, t *Type) {
-	var n *Node
-	var fn *Node
-	var np *Node
-	var nq *Node
-	var tfn *Node
-	var nif *Node
-	var ni *Node
-	var nx *Node
-	var ny *Node
-	var nrange *Node
-	var r *Node
-	var t1 *Type
-	var first *Type
-	var old_safemode int
-	var size int64
-	var offend int64
-
 	if Debug['r'] != 0 {
 		fmt.Printf("geneq %v %v\n", Sconv(sym, 0), Tconv(t, 0))
 	}
@@ -3038,19 +2866,19 @@ func geneq(sym *Sym, t *Type) {
 	markdcl()
 
 	// func sym(p, q *T) bool
-	fn = Nod(ODCLFUNC, nil, nil)
+	fn := Nod(ODCLFUNC, nil, nil)
 
 	fn.Nname = newname(sym)
 	fn.Nname.Class = PFUNC
-	tfn = Nod(OTFUNC, nil, nil)
+	tfn := Nod(OTFUNC, nil, nil)
 	fn.Nname.Ntype = tfn
 
-	n = Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+	n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
 	tfn.List = list(tfn.List, n)
-	np = n.Left
+	np := n.Left
 	n = Nod(ODCLFIELD, newname(Lookup("q")), typenod(Ptrto(t)))
 	tfn.List = list(tfn.List, n)
-	nq = n.Left
+	nq := n.Left
 	n = Nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
 	tfn.Rlist = list(tfn.Rlist, n)
 
@@ -3073,9 +2901,9 @@ func geneq(sym *Sym, t *Type) {
 		// pure memory.  Even if we unrolled the range loop,
 		// each iteration would be a function call, so don't bother
 		// unrolling.
-		nrange = Nod(ORANGE, nil, Nod(OIND, np, nil))
+		nrange := Nod(ORANGE, nil, Nod(OIND, np, nil))
 
-		ni = newname(Lookup("i"))
+		ni := newname(Lookup("i"))
 		ni.Type = Types[TINT]
 		nrange.List = list1(ni)
 		nrange.Colas = 1
@@ -3083,15 +2911,15 @@ func geneq(sym *Sym, t *Type) {
 		ni = nrange.List.N
 
 		// if p[i] != q[i] { return false }
-		nx = Nod(OINDEX, np, ni)
+		nx := Nod(OINDEX, np, ni)
 
 		nx.Bounded = true
-		ny = Nod(OINDEX, nq, ni)
+		ny := Nod(OINDEX, nq, ni)
 		ny.Bounded = true
 
-		nif = Nod(OIF, nil, nil)
+		nif := Nod(OIF, nil, nil)
 		nif.Ntest = Nod(ONE, nx, ny)
-		r = Nod(ORETURN, nil, nil)
+		r := Nod(ORETURN, nil, nil)
 		r.List = list(r.List, Nodbool(false))
 		nif.Nbody = list(nif.Nbody, r)
 		nrange.Nbody = list(nrange.Nbody, nif)
@@ -3101,10 +2929,11 @@ func geneq(sym *Sym, t *Type) {
 	// and calling specific equality tests for the others.
 	// Skip blank-named fields.
 	case TSTRUCT:
-		first = nil
+		first := (*Type)(nil)
 
-		offend = 0
-		for t1 = t.Type; ; t1 = t1.Down {
+		offend := int64(0)
+		var size int64
+		for t1 := t.Type; ; t1 = t1.Down {
 			if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
 				offend = t1.Width + t1.Type.Width
 				if first == nil {
@@ -3153,7 +2982,7 @@ func geneq(sym *Sym, t *Type) {
 	}
 
 	// return true
-	r = Nod(ORETURN, nil, nil)
+	r := Nod(ORETURN, nil, nil)
 
 	r.List = list(r.List, Nodbool(true))
 	fn.Nbody = list(fn.Nbody, r)
@@ -3174,7 +3003,7 @@ func geneq(sym *Sym, t *Type) {
 	// In this case it can happen if we need to generate an ==
 	// for a struct containing a reflect.Value, which itself has
 	// an unexported field of type unsafe.Pointer.
-	old_safemode = safemode
+	old_safemode := safemode
 
 	safemode = 0
 	funccompile(fn)
@@ -3182,18 +3011,16 @@ func geneq(sym *Sym, t *Type) {
 }
 
 func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
-	var i int
-	var c int
-	var d int
-	var m *Type
-
 	*followptr = 0
 
 	if t == nil {
 		return nil
 	}
 
-	for d = 0; d < len(dotlist); d++ {
+	var m *Type
+	var i int
+	var c int
+	for d := 0; d < len(dotlist); d++ {
 		c = adddot1(s, t, d, &m, ignorecase)
 		if c > 1 {
 			Yyerror("%v.%v is ambiguous", Tconv(t, 0), Sconv(s, 0))
@@ -3221,14 +3048,7 @@ func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
 }
 
 func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
-	var t0 *Type
-	var im *Type
-	var tm *Type
-	var rcvr *Type
-	var imtype *Type
-	var followptr int
-
-	t0 = t
+	t0 := t
 	if t == nil {
 		return false
 	}
@@ -3238,7 +3058,8 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
 	// and then do one loop.
 
 	if t.Etype == TINTER {
-		for im = iface.Type; im != nil; im = im.Down {
+		var tm *Type
+		for im := iface.Type; im != nil; im = im.Down {
 			for tm = t.Type; tm != nil; tm = tm.Down {
 				if tm.Sym == im.Sym {
 					if Eqtype(tm.Type, im.Type) {
@@ -3265,7 +3086,11 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
 	if t != nil {
 		expandmeth(t)
 	}
-	for im = iface.Type; im != nil; im = im.Down {
+	var tm *Type
+	var imtype *Type
+	var followptr int
+	var rcvr *Type
+	for im := iface.Type; im != nil; im = im.Down {
 		imtype = methodfunc(im.Type, nil)
 		tm = ifacelookdot(im.Sym, t, &followptr, 0)
 		if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
@@ -3303,13 +3128,11 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
  * all the invalid conversions (like ptr -> bool)
  */
 func Simsimtype(t *Type) int {
-	var et int
-
 	if t == nil {
 		return 0
 	}
 
-	et = int(Simtype[t.Etype])
+	et := int(Simtype[t.Etype])
 	switch et {
 	case TPTR32:
 		et = TUINT32
@@ -3339,21 +3162,19 @@ func concat(a *NodeList, b *NodeList) *NodeList {
 }
 
 func list1(n *Node) *NodeList {
-	var l *NodeList
-
 	if n == nil {
 		return nil
 	}
 	if n.Op == OBLOCK && n.Ninit == nil {
 		// Flatten list and steal storage.
 		// Poison pointer to catch errant uses.
-		l = n.List
+		l := n.List
 
 		n.List = nil
 		return l
 	}
 
-	l = new(NodeList)
+	l := new(NodeList)
 	l.N = n
 	l.End = l
 	return l
@@ -3364,16 +3185,12 @@ func list(l *NodeList, n *Node) *NodeList {
 }
 
 func listsort(l **NodeList, f func(*Node, *Node) int) {
-	var l1 *NodeList
-	var l2 *NodeList
-	var le *NodeList
-
 	if *l == nil || (*l).Next == nil {
 		return
 	}
 
-	l1 = *l
-	l2 = *l
+	l1 := *l
+	l2 := *l
 	for {
 		l2 = l2.Next
 		if l2 == nil {
@@ -3405,6 +3222,7 @@ func listsort(l **NodeList, f func(*Node, *Node) int) {
 
 	// now l1 == *l; and l1 < l2
 
+	var le *NodeList
 	for (l1 != nil) && (l2 != nil) {
 		for (l1.Next != nil) && f(l1.Next.N, l2.N) < 0 {
 			l1 = l1.Next
@@ -3427,9 +3245,7 @@ func listsort(l **NodeList, f func(*Node, *Node) int) {
 }
 
 func listtreecopy(l *NodeList) *NodeList {
-	var out *NodeList
-
-	out = nil
+	out := (*NodeList)(nil)
 	for ; l != nil; l = l.Next {
 		out = list(out, treecopy(l.N))
 	}
@@ -3437,9 +3253,7 @@ func listtreecopy(l *NodeList) *NodeList {
 }
 
 func liststmt(l *NodeList) *Node {
-	var n *Node
-
-	n = Nod(OBLOCK, nil, nil)
+	n := Nod(OBLOCK, nil, nil)
 	n.List = l
 	if l != nil {
 		n.Lineno = l.N.Lineno
@@ -3451,9 +3265,7 @@ func liststmt(l *NodeList) *Node {
  * return nelem of list
  */
 func count(l *NodeList) int {
-	var n int64
-
-	n = 0
+	n := int64(0)
 	for ; l != nil; l = l.Next {
 		n++
 	}
@@ -3468,10 +3280,9 @@ func count(l *NodeList) int {
  * return nelem of list
  */
 func structcount(t *Type) int {
-	var v int
 	var s Iter
 
-	v = 0
+	v := 0
 	for t = Structfirst(&s, &t); t != nil; t = structnext(&s) {
 		v++
 	}
@@ -3486,7 +3297,6 @@ func structcount(t *Type) int {
 func powtwo(n *Node) int {
 	var v uint64
 	var b uint64
-	var i int
 
 	if n == nil || n.Op != OLITERAL || n.Type == nil {
 		goto no
@@ -3497,7 +3307,7 @@ func powtwo(n *Node) int {
 
 	v = uint64(Mpgetfix(n.Val.U.Xval))
 	b = 1
-	for i = 0; i < 64; i++ {
+	for i := 0; i < 64; i++ {
 		if b == v {
 			return i
 		}
@@ -3510,7 +3320,7 @@ func powtwo(n *Node) int {
 
 	v = -v
 	b = 1
-	for i = 0; i < 64; i++ {
+	for i := 0; i < 64; i++ {
 		if b == v {
 			return i + 1000
 		}
@@ -3559,17 +3369,7 @@ func tounsigned(t *Type) *Type {
  * see hacker's delight chapter 10
  */
 func Smagic(m *Magic) {
-	var p int
-	var ad uint64
-	var anc uint64
-	var delta uint64
-	var q1 uint64
-	var r1 uint64
-	var q2 uint64
-	var r2 uint64
-	var t uint64
 	var mask uint64
-	var two31 uint64
 
 	m.Bad = 0
 	switch m.W {
@@ -3590,10 +3390,10 @@ func Smagic(m *Magic) {
 		mask = 0xffffffffffffffff
 	}
 
-	two31 = mask ^ (mask >> 1)
+	two31 := mask ^ (mask >> 1)
 
-	p = m.W - 1
-	ad = uint64(m.Sd)
+	p := m.W - 1
+	ad := uint64(m.Sd)
 	if m.Sd < 0 {
 		ad = -uint64(m.Sd)
 	}
@@ -3604,22 +3404,23 @@ func Smagic(m *Magic) {
 		return
 	}
 
-	t = two31
+	t := two31
 	ad &= mask
 
-	anc = t - 1 - t%ad
+	anc := t - 1 - t%ad
 	anc &= mask
 
-	q1 = two31 / anc
-	r1 = two31 - q1*anc
+	q1 := two31 / anc
+	r1 := two31 - q1*anc
 	q1 &= mask
 	r1 &= mask
 
-	q2 = two31 / ad
-	r2 = two31 - q2*ad
+	q2 := two31 / ad
+	r2 := two31 - q2*ad
 	q2 &= mask
 	r2 &= mask
 
+	var delta uint64
 	for {
 		p++
 		q1 <<= 1
@@ -3665,15 +3466,7 @@ func Smagic(m *Magic) {
  * see hacker's delight chapter 10
  */
 func Umagic(m *Magic) {
-	var p int
-	var nc uint64
-	var delta uint64
-	var q1 uint64
-	var r1 uint64
-	var q2 uint64
-	var r2 uint64
 	var mask uint64
-	var two31 uint64
 
 	m.Bad = 0
 	m.Ua = 0
@@ -3696,7 +3489,7 @@ func Umagic(m *Magic) {
 		mask = 0xffffffffffffffff
 	}
 
-	two31 = mask ^ (mask >> 1)
+	two31 := mask ^ (mask >> 1)
 
 	m.Ud &= mask
 	if m.Ud == 0 || m.Ud == two31 {
@@ -3704,19 +3497,20 @@ func Umagic(m *Magic) {
 		return
 	}
 
-	nc = mask - (-m.Ud&mask)%m.Ud
-	p = m.W - 1
+	nc := mask - (-m.Ud&mask)%m.Ud
+	p := m.W - 1
 
-	q1 = two31 / nc
-	r1 = two31 - q1*nc
+	q1 := two31 / nc
+	r1 := two31 - q1*nc
 	q1 &= mask
 	r1 &= mask
 
-	q2 = (two31 - 1) / m.Ud
-	r2 = (two31 - 1) - q2*m.Ud
+	q2 := (two31 - 1) / m.Ud
+	r2 := (two31 - 1) - q2*m.Ud
 	q2 &= mask
 	r2 &= mask
 
+	var delta uint64
 	for {
 		p++
 		if r1 >= nc-r1 {
@@ -3811,17 +3605,14 @@ escape:
 }
 
 func mkpkg(path_ *Strlit) *Pkg {
-	var p *Pkg
-	var h int
-
-	h = int(stringhash(path_.S) & uint32(len(phash)-1))
-	for p = phash[h]; p != nil; p = p.Link {
+	h := int(stringhash(path_.S) & uint32(len(phash)-1))
+	for p := phash[h]; p != nil; p = p.Link {
 		if p.Path.S == path_.S {
 			return p
 		}
 	}
 
-	p = new(Pkg)
+	p := new(Pkg)
 	p.Path = path_
 	p.Prefix = pathtoprefix(path_.S)
 	p.Link = phash[h]
@@ -3836,13 +3627,11 @@ func newstrlit(s string) *Strlit {
 }
 
 func addinit(np **Node, init *NodeList) {
-	var n *Node
-
 	if init == nil {
 		return
 	}
 
-	n = *np
+	n := *np
 	switch n.Op {
 	// There may be multiple refs to this node;
 	// introduce OCONVNOP to hold init list.
@@ -3865,23 +3654,21 @@ var reservedimports = []string{
 }
 
 func isbadimport(path_ *Strlit) bool {
-	var i int
-	var s string
-	var r uint
-
 	if len(path_.S) != len(path_.S) {
 		Yyerror("import path contains NUL")
 		return true
 	}
 
-	for i = 0; i < len(reservedimports); i++ {
+	for i := 0; i < len(reservedimports); i++ {
 		if path_.S == reservedimports[i] {
 			Yyerror("import path \"%s\" is reserved and cannot be used", path_.S)
 			return true
 		}
 	}
 
+	var s string
 	_ = s
+	var r uint
 	_ = r
 	for _, r := range path_.S {
 		if r == utf8.RuneError {
@@ -3914,14 +3701,12 @@ func isbadimport(path_ *Strlit) bool {
 }
 
 func checknil(x *Node, init **NodeList) {
-	var n *Node
-
 	if Isinter(x.Type) {
 		x = Nod(OITAB, x, nil)
 		typecheck(&x, Erv)
 	}
 
-	n = Nod(OCHECKNIL, x, nil)
+	n := Nod(OCHECKNIL, x, nil)
 	n.Typecheck = 1
 	*init = list(*init, n)
 }
diff --git a/src/cmd/internal/gc/swt.go b/src/cmd/internal/gc/swt.go
index 7c25041b461496fffcda03dfad0c457e281d168b..81eb56c3a6e0594a018c3769d12ad1e9b7527f1a 100644
--- a/src/cmd/internal/gc/swt.go
+++ b/src/cmd/internal/gc/swt.go
@@ -35,9 +35,7 @@ type Case struct {
 var C *Case
 
 func dumpcase(c0 *Case) {
-	var c *Case
-
-	for c = c0; c != nil; c = c.link {
+	for c := c0; c != nil; c = c.link {
 		switch c.type_ {
 		case Tdefault:
 			fmt.Printf("case-default\n")
@@ -104,11 +102,6 @@ func ordlcmp(c1 *Case, c2 *Case) int {
 }
 
 func exprcmp(c1 *Case, c2 *Case) int {
-	var ct int
-	var n int
-	var n1 *Node
-	var n2 *Node
-
 	// sort non-constants last
 	if c1.type_ != Texprconst {
 		return +1
@@ -117,11 +110,11 @@ func exprcmp(c1 *Case, c2 *Case) int {
 		return -1
 	}
 
-	n1 = c1.node.Left
-	n2 = c2.node.Left
+	n1 := c1.node.Left
+	n2 := c2.node.Left
 
 	// sort by type (for switches on interface)
-	ct = int(n1.Val.Ctype)
+	ct := int(n1.Val.Ctype)
 
 	if ct != int(n2.Val.Ctype) {
 		return ct - int(n2.Val.Ctype)
@@ -135,7 +128,7 @@ func exprcmp(c1 *Case, c2 *Case) int {
 	}
 
 	// sort by constant value
-	n = 0
+	n := 0
 
 	switch ct {
 	case CTFLT:
@@ -181,16 +174,12 @@ func typecmp(c1 *Case, c2 *Case) int {
 }
 
 func csort(l *Case, f func(*Case, *Case) int) *Case {
-	var l1 *Case
-	var l2 *Case
-	var le *Case
-
 	if l == nil || l.link == nil {
 		return l
 	}
 
-	l1 = l
-	l2 = l
+	l1 := l
+	l2 := l
 	for {
 		l2 = l2.link
 		if l2 == nil {
@@ -217,7 +206,7 @@ func csort(l *Case, f func(*Case, *Case) int) *Case {
 		l2 = l2.link
 	}
 
-	le = l
+	le := l
 
 	for {
 		if l1 == nil {
@@ -270,31 +259,24 @@ func newlabel_swt() *Node {
  * deal with fallthrough, break, unreachable statements
  */
 func casebody(sw *Node, typeswvar *Node) {
-	var n *Node
-	var c *Node
-	var last *Node
-	var def *Node
-	var cas *NodeList
-	var stat *NodeList
-	var l *NodeList
-	var lc *NodeList
-	var go_ *Node
-	var br *Node
-	var lno int32
-	var needvar bool
-
 	if sw.List == nil {
 		return
 	}
 
-	lno = setlineno(sw)
+	lno := setlineno(sw)
 
-	cas = nil  // cases
-	stat = nil // statements
-	def = nil  // defaults
-	br = Nod(OBREAK, nil, nil)
+	cas := (*NodeList)(nil)  // cases
+	stat := (*NodeList)(nil) // statements
+	def := (*Node)(nil)      // defaults
+	br := Nod(OBREAK, nil, nil)
 
-	for l = sw.List; l != nil; l = l.Next {
+	var c *Node
+	var go_ *Node
+	var needvar bool
+	var lc *NodeList
+	var last *Node
+	var n *Node
+	for l := sw.List; l != nil; l = l.Next {
 		n = l.N
 		setlineno(n)
 		if n.Op != OXCASE {
@@ -333,9 +315,7 @@ func casebody(sw *Node, typeswvar *Node) {
 
 		stat = list(stat, Nod(OLABEL, go_.Left, nil))
 		if typeswvar != nil && needvar && n.Nname != nil {
-			var l *NodeList
-
-			l = list1(Nod(ODCL, n.Nname, nil))
+			l := list1(Nod(ODCL, n.Nname, nil))
 			l = list(l, Nod(OAS, n.Nname, typeswvar))
 			typechecklist(l, Etop)
 			stat = concat(stat, l)
@@ -375,16 +355,12 @@ func casebody(sw *Node, typeswvar *Node) {
 
 func mkcaselist(sw *Node, arg int) *Case {
 	var n *Node
-	var c *Case
 	var c1 *Case
-	var c2 *Case
-	var l *NodeList
-	var ord int
 
-	c = nil
-	ord = 0
+	c := (*Case)(nil)
+	ord := 0
 
-	for l = sw.List; l != nil; l = l.Next {
+	for l := sw.List; l != nil; l = l.Next {
 		n = l.N
 		c1 = new(Case)
 		c1.link = c
@@ -444,7 +420,8 @@ func mkcaselist(sw *Node, arg int) *Case {
 	switch arg {
 	case Stype:
 		c = csort(c, typecmp)
-		for c1 = c; c1 != nil; c1 = c1.link {
+		var c2 *Case
+		for c1 := c; c1 != nil; c1 = c1.link {
 			for c2 = c1.link; c2 != nil && c2.hash == c1.hash; c2 = c2.link {
 				if c1.type_ == Ttypenil || c1.type_ == Tdefault {
 					break
@@ -463,7 +440,7 @@ func mkcaselist(sw *Node, arg int) *Case {
 		Strue,
 		Sfalse:
 		c = csort(c, exprcmp)
-		for c1 = c; c1.link != nil; c1 = c1.link {
+		for c1 := c; c1.link != nil; c1 = c1.link {
 			if exprcmp(c1, c1.link) != 0 {
 				continue
 			}
@@ -481,17 +458,12 @@ func mkcaselist(sw *Node, arg int) *Case {
 var exprname *Node
 
 func exprbsw(c0 *Case, ncase int, arg int) *Node {
-	var cas *NodeList
-	var a *Node
-	var n *Node
-	var c *Case
-	var i int
-	var half int
-	var lno int
-
-	cas = nil
+	cas := (*NodeList)(nil)
 	if ncase < Ncase {
-		for i = 0; i < ncase; i++ {
+		var a *Node
+		var n *Node
+		var lno int
+		for i := 0; i < ncase; i++ {
 			n = c0.node
 			lno = int(setlineno(n))
 
@@ -520,13 +492,13 @@ func exprbsw(c0 *Case, ncase int, arg int) *Node {
 	}
 
 	// find the middle and recur
-	c = c0
+	c := c0
 
-	half = ncase >> 1
-	for i = 1; i < half; i++ {
+	half := ncase >> 1
+	for i := 1; i < half; i++ {
 		c = c.link
 	}
-	a = Nod(OIF, nil, nil)
+	a := Nod(OIF, nil, nil)
 	a.Ntest = Nod(OLE, exprname, c.node.Left)
 	typecheck(&a.Ntest, Erv)
 	a.Nbody = list1(exprbsw(c0, half, arg))
@@ -539,19 +511,9 @@ func exprbsw(c0 *Case, ncase int, arg int) *Node {
  * rebuild case statements into if .. goto
  */
 func exprswitch(sw *Node) {
-	var def *Node
-	var cas *NodeList
-	var a *Node
-	var c0 *Case
-	var c *Case
-	var c1 *Case
-	var t *Type
-	var arg int
-	var ncase int
-
 	casebody(sw, nil)
 
-	arg = Snorm
+	arg := Snorm
 	if Isconst(sw.Ntest, CTBOOL) {
 		arg = Strue
 		if sw.Ntest.Val.U.Bval == 0 {
@@ -560,7 +522,7 @@ func exprswitch(sw *Node) {
 	}
 
 	walkexpr(&sw.Ntest, &sw.Ninit)
-	t = sw.Type
+	t := sw.Type
 	if t == nil {
 		return
 	}
@@ -570,7 +532,7 @@ func exprswitch(sw *Node) {
 	 */
 	exprname = nil
 
-	cas = nil
+	cas := (*NodeList)(nil)
 	if arg == Strue || arg == Sfalse {
 		exprname = Nodbool(arg == Strue)
 	} else if consttype(sw.Ntest) >= 0 {
@@ -582,7 +544,8 @@ func exprswitch(sw *Node) {
 		typechecklist(cas, Etop)
 	}
 
-	c0 = mkcaselist(sw, arg)
+	c0 := mkcaselist(sw, arg)
+	var def *Node
 	if c0 != nil && c0.type_ == Tdefault {
 		def = c0.node.Right
 		c0 = c0.link
@@ -590,6 +553,10 @@ func exprswitch(sw *Node) {
 		def = Nod(OBREAK, nil, nil)
 	}
 
+	var c *Case
+	var a *Node
+	var ncase int
+	var c1 *Case
 loop:
 	if c0 == nil {
 		cas = list(cas, def)
@@ -639,13 +606,8 @@ var facename *Node
 var boolname *Node
 
 func typeone(t *Node) *Node {
-	var init *NodeList
-	var a *Node
-	var b *Node
-	var var_ *Node
-
-	var_ = t.Nname
-	init = nil
+	var_ := t.Nname
+	init := (*NodeList)(nil)
 	if var_ == nil {
 		typecheck(&nblank, Erv|Easgn)
 		var_ = nblank
@@ -653,9 +615,9 @@ func typeone(t *Node) *Node {
 		init = list1(Nod(ODCL, var_, nil))
 	}
 
-	a = Nod(OAS2, nil, nil)
+	a := Nod(OAS2, nil, nil)
 	a.List = list(list1(var_), boolname) // var,bool =
-	b = Nod(ODOTTYPE, facename, nil)
+	b := Nod(ODOTTYPE, facename, nil)
 	b.Type = t.Left.Type // interface.(type)
 	a.Rlist = list1(b)
 	typecheck(&a, Etop)
@@ -669,17 +631,12 @@ func typeone(t *Node) *Node {
 }
 
 func typebsw(c0 *Case, ncase int) *Node {
-	var cas *NodeList
-	var a *Node
-	var n *Node
-	var c *Case
-	var i int
-	var half int
-
-	cas = nil
+	cas := (*NodeList)(nil)
 
 	if ncase < Ncase {
-		for i = 0; i < ncase; i++ {
+		var n *Node
+		var a *Node
+		for i := 0; i < ncase; i++ {
 			n = c0.node
 			if c0.type_ != Ttypeconst {
 				Fatal("typebsw")
@@ -696,13 +653,13 @@ func typebsw(c0 *Case, ncase int) *Node {
 	}
 
 	// find the middle and recur
-	c = c0
+	c := c0
 
-	half = ncase >> 1
-	for i = 1; i < half; i++ {
+	half := ncase >> 1
+	for i := 1; i < half; i++ {
 		c = c.link
 	}
-	a = Nod(OIF, nil, nil)
+	a := Nod(OIF, nil, nil)
 	a.Ntest = Nod(OLE, hashname, Nodintconst(int64(c.hash)))
 	typecheck(&a.Ntest, Erv)
 	a.Nbody = list1(typebsw(c0, half))
@@ -716,18 +673,6 @@ func typebsw(c0 *Case, ncase int) *Node {
  * into if statements
  */
 func typeswitch(sw *Node) {
-	var def *Node
-	var cas *NodeList
-	var hash *NodeList
-	var a *Node
-	var n *Node
-	var c *Case
-	var c0 *Case
-	var c1 *Case
-	var ncase int
-	var t *Type
-	var v Val
-
 	if sw.Ntest == nil {
 		return
 	}
@@ -743,7 +688,7 @@ func typeswitch(sw *Node) {
 		return
 	}
 
-	cas = nil
+	cas := (*NodeList)(nil)
 
 	/*
 	 * predeclare temporary variables
@@ -751,7 +696,7 @@ func typeswitch(sw *Node) {
 	 */
 	facename = temp(sw.Ntest.Right.Type)
 
-	a = Nod(OAS, facename, sw.Ntest.Right)
+	a := Nod(OAS, facename, sw.Ntest.Right)
 	typecheck(&a, Etop)
 	cas = list(cas, a)
 
@@ -763,7 +708,7 @@ func typeswitch(sw *Node) {
 	hashname = temp(Types[TUINT32])
 	typecheck(&hashname, Erv)
 
-	t = sw.Ntest.Right.Type
+	t := sw.Ntest.Right.Type
 	if isnilinter(t) {
 		a = syslook("efacethash", 1)
 	} else {
@@ -776,7 +721,8 @@ func typeswitch(sw *Node) {
 	typecheck(&a, Etop)
 	cas = list(cas, a)
 
-	c0 = mkcaselist(sw, Stype)
+	c0 := mkcaselist(sw, Stype)
+	var def *Node
 	if c0 != nil && c0.type_ == Tdefault {
 		def = c0.node.Right
 		c0 = c0.link
@@ -787,7 +733,9 @@ func typeswitch(sw *Node) {
 	/*
 	 * insert if statement into each case block
 	 */
-	for c = c0; c != nil; c = c.link {
+	var v Val
+	var n *Node
+	for c := c0; c != nil; c = c.link {
 		n = c.node
 		switch c.type_ {
 		case Ttypenil:
@@ -807,6 +755,10 @@ func typeswitch(sw *Node) {
 	/*
 	 * generate list of if statements, binary search for constant sequences
 	 */
+	var ncase int
+	var c1 *Case
+	var hash *NodeList
+	var c *Case
 	for c0 != nil {
 		if c0.type_ != Ttypeconst {
 			n = c0.node
@@ -895,22 +847,11 @@ func walkswitch(sw *Node) {
  */
 func typecheckswitch(n *Node) {
 	var top int
-	var lno int
-	var ptr int
-	var nilonly string
 	var t *Type
-	var badtype *Type
-	var missing *Type
-	var have *Type
-	var l *NodeList
-	var ll *NodeList
-	var ncase *Node
-	var nvar *Node
-	var def *Node
 
-	lno = int(lineno)
+	lno := int(lineno)
 	typechecklist(n.Ninit, Etop)
-	nilonly = ""
+	nilonly := ""
 
 	if n.Ntest != nil && n.Ntest.Op == OTYPESW {
 		// type switch
@@ -933,6 +874,7 @@ func typecheckswitch(n *Node) {
 			t = Types[TBOOL]
 		}
 		if t != nil {
+			var badtype *Type
 			if okforeq[t.Etype] == 0 {
 				Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
 			} else if t.Etype == TARRAY && !Isfixedarray(t) {
@@ -951,8 +893,14 @@ func typecheckswitch(n *Node) {
 
 	n.Type = t
 
-	def = nil
-	for l = n.List; l != nil; l = l.Next {
+	def := (*Node)(nil)
+	var ptr int
+	var have *Type
+	var nvar *Node
+	var ll *NodeList
+	var missing *Type
+	var ncase *Node
+	for l := n.List; l != nil; l = l.Next {
 		ncase = l.N
 		setlineno(n)
 		if ncase.List == nil {
diff --git a/src/cmd/internal/gc/typecheck.go b/src/cmd/internal/gc/typecheck.go
index 3cd740850d0b5d89383e209f588ec59b94f4e276..9fa19300af8342acbffd96f92a97cd6a5b8a7081 100644
--- a/src/cmd/internal/gc/typecheck.go
+++ b/src/cmd/internal/gc/typecheck.go
@@ -24,10 +24,8 @@ var typecheckdefstack *NodeList
  * resolve ONONAME to definition, if any.
  */
 func resolve(n *Node) *Node {
-	var r *Node
-
 	if n != nil && n.Op == ONONAME && n.Sym != nil {
-		r = n.Sym.Def
+		r := n.Sym.Def
 		if r != nil {
 			if r.Op != OIOTA {
 				n = r
@@ -80,15 +78,12 @@ var _typekind = []string{
 var typekind_buf string
 
 func typekind(t *Type) string {
-	var et int
-	var s string
-
 	if Isslice(t) {
 		return "slice"
 	}
-	et = int(t.Etype)
+	et := int(t.Etype)
 	if 0 <= et && et < len(_typekind) {
-		s = _typekind[et]
+		s := _typekind[et]
 		if s != "" {
 			return s
 		}
@@ -104,9 +99,7 @@ func typekind(t *Type) string {
  * to print constant definition loops.
  */
 func sprint_depchain(fmt_ *string, stack *NodeList, cur *Node, first *Node) {
-	var l *NodeList
-
-	for l = stack; l != nil; l = l.Next {
+	for l := stack; l != nil; l = l.Next {
 		if l.N.Op == cur.Op {
 			if l.N != first {
 				sprint_depchain(fmt_, l.Next, l.N, first)
@@ -127,22 +120,17 @@ var typecheck_tcstack *NodeList
 var typecheck_tcfree *NodeList
 
 func typecheck(np **Node, top int) *Node {
-	var n *Node
-	var lno int
-	var fmt_ string
-	var l *NodeList
-
 	// cannot type check until all the source has been parsed
 	if typecheckok == 0 {
 		Fatal("early typecheck")
 	}
 
-	n = *np
+	n := *np
 	if n == nil {
 		return nil
 	}
 
-	lno = int(setlineno(n))
+	lno := int(setlineno(n))
 
 	// Skip over parens.
 	for n.Op == OPAREN {
@@ -173,6 +161,7 @@ func typecheck(np **Node, top int) *Node {
 	if n.Typecheck == 2 {
 		// Typechecking loop. Trying printing a meaningful message,
 		// otherwise a stack trace of typechecking.
+		var fmt_ string
 		switch n.Op {
 		// We can already diagnose variables used as types.
 		case ONAME:
@@ -193,7 +182,7 @@ func typecheck(np **Node, top int) *Node {
 
 		if nsavederrors+nerrors == 0 {
 			fmt_ = ""
-			for l = typecheck_tcstack; l != nil; l = l.Next {
+			for l := typecheck_tcstack; l != nil; l = l.Next {
 				fmt_ += fmt.Sprintf("\n\t%v %v", l.N.Line(), Nconv(l.N, 0))
 			}
 			Yyerror("typechecking loop involving %v%s", Nconv(n, 0), fmt_)
@@ -205,6 +194,7 @@ func typecheck(np **Node, top int) *Node {
 
 	n.Typecheck = 2
 
+	var l *NodeList
 	if typecheck_tcfree != nil {
 		l = typecheck_tcfree
 		typecheck_tcfree = l.Next
@@ -270,9 +260,7 @@ func callrecvlist(l *NodeList) bool {
 // except for constants of numerical kind, which are acceptable
 // whenever they can be represented by a value of type int.
 func indexlit(np **Node) {
-	var n *Node
-
-	n = *np
+	n := *np
 	if n == nil || !isideal(n.Type) {
 		return
 	}
@@ -292,13 +280,11 @@ func typecheck1(np **Node, top int) {
 	var aop int
 	var op int
 	var ptr int
-	var n *Node
 	var l *Node
 	var r *Node
 	var lo *Node
 	var mid *Node
 	var hi *Node
-	var args *NodeList
 	var ok int
 	var ntop int
 	var t *Type
@@ -308,11 +294,9 @@ func typecheck1(np **Node, top int) {
 	var badtype *Type
 	var v Val
 	var why string
-	var desc string
-	var descbuf string
 	var x int64
 
-	n = *np
+	n := *np
 
 	if n.Sym != nil {
 		if n.Op == ONAME && n.Etype != 0 && top&Ecall == 0 {
@@ -1046,7 +1030,7 @@ reswitch:
 		}
 
 		defaultlit(&n.Left, nil)
-		l = n.Left
+		l := n.Left
 		if l.Op == OTYPE {
 			if n.Isddd != 0 || l.Type.Bound == -100 {
 				if l.Type.Broke == 0 {
@@ -1074,7 +1058,7 @@ reswitch:
 		} else {
 			typechecklist(n.List, Erv)
 		}
-		t = l.Type
+		t := l.Type
 		if t == nil {
 			goto error
 		}
@@ -1091,7 +1075,7 @@ reswitch:
 			// information further down the call chain to know if we
 			// were testing a method receiver for unexported fields.
 			// It isn't necessary, so just do a sanity check.
-			tp = getthisx(t).Type.Type
+			tp := getthisx(t).Type.Type
 
 			if l.Left == nil || !Eqtype(l.Left.Type, tp) {
 				Fatal("method receiver")
@@ -1105,8 +1089,8 @@ reswitch:
 			}
 		}
 
-		descbuf = fmt.Sprintf("argument to %v", Nconv(n.Left, 0))
-		desc = descbuf
+		descbuf := fmt.Sprintf("argument to %v", Nconv(n.Left, 0))
+		desc := descbuf
 		typecheckaste(OCALL, n.Left, int(n.Isddd), getinargx(t), n.List, desc)
 		ok |= Etop
 		if t.Outtuple == 0 {
@@ -1114,7 +1098,7 @@ reswitch:
 		}
 		ok |= Erv
 		if t.Outtuple == 1 {
-			t = getoutargx(l.Type).Type
+			t := getoutargx(l.Type).Type
 			if t == nil {
 				goto error
 			}
@@ -1145,8 +1129,8 @@ reswitch:
 		typecheck(&n.Left, Erv)
 		defaultlit(&n.Left, nil)
 		implicitstar(&n.Left)
-		l = n.Left
-		t = l.Type
+		l := n.Left
+		t := l.Type
 		if t == nil {
 			goto error
 		}
@@ -1167,7 +1151,7 @@ reswitch:
 				goto badcall1
 			}
 			if Isconst(l, CTCPLX) {
-				r = n
+				r := n
 				if n.Op == OREAL {
 					n = nodfltconst(&l.Val.U.Cval.Real)
 				} else {
@@ -1184,7 +1168,7 @@ reswitch:
 		switch t.Etype {
 		case TSTRING:
 			if Isconst(l, CTSTR) {
-				r = Nod(OXXX, nil, nil)
+				r := Nod(OXXX, nil, nil)
 				Nodconst(r, Types[TINT], int64(len(l.Val.U.Sval.S)))
 				r.Orig = n
 				n = r
@@ -1197,7 +1181,7 @@ reswitch:
 			if callrecv(l) { // has call or receive
 				break
 			}
-			r = Nod(OXXX, nil, nil)
+			r := Nod(OXXX, nil, nil)
 			Nodconst(r, Types[TINT], t.Bound)
 			r.Orig = n
 			n = r
@@ -1208,6 +1192,8 @@ reswitch:
 
 	case OCOMPLEX:
 		ok |= Erv
+		var r *Node
+		var l *Node
 		if count(n.List) == 1 {
 			typechecklist(n.List, Efnstruct)
 			if n.List.N.Op != OCALLFUNC && n.List.N.Op != OCALLMETH {
@@ -1215,7 +1201,7 @@ reswitch:
 				goto error
 			}
 
-			t = n.List.N.Left.Type
+			t := n.List.N.Left.Type
 			if t.Outtuple != 2 {
 				Yyerror("invalid operation: complex expects two arguments, %v returns %d results", Nconv(n.List.N, 0), t.Outtuple)
 				goto error
@@ -1246,6 +1232,7 @@ reswitch:
 			goto error
 		}
 
+		var t *Type
 		switch l.Type.Etype {
 		default:
 			Yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", Nconv(n, 0), Tconv(l.Type, 0), r.Type)
@@ -1278,8 +1265,8 @@ reswitch:
 		}
 		typecheck(&n.Left, Erv)
 		defaultlit(&n.Left, nil)
-		l = n.Left
-		t = l.Type
+		l := n.Left
+		t := l.Type
 		if t == nil {
 			goto error
 		}
@@ -1297,7 +1284,7 @@ reswitch:
 		goto ret
 
 	case ODELETE:
-		args = n.List
+		args := n.List
 		if args == nil {
 			Yyerror("missing arguments to delete")
 			goto error
@@ -1315,8 +1302,8 @@ reswitch:
 
 		ok |= Etop
 		typechecklist(args, Erv)
-		l = args.N
-		r = args.Next.N
+		l := args.N
+		r := args.Next.N
 		if l.Type != nil && l.Type.Etype != TMAP {
 			Yyerror("first argument to delete must be map; have %v", Tconv(l.Type, obj.FmtLong))
 			goto error
@@ -1327,7 +1314,7 @@ reswitch:
 
 	case OAPPEND:
 		ok |= Erv
-		args = n.List
+		args := n.List
 		if args == nil {
 			Yyerror("missing arguments to append")
 			goto error
@@ -1339,7 +1326,7 @@ reswitch:
 			typechecklist(args, Erv)
 		}
 
-		t = args.N.Type
+		t := args.N.Type
 		if t == nil {
 			goto error
 		}
@@ -1394,7 +1381,7 @@ reswitch:
 
 	case OCOPY:
 		ok |= Etop | Erv
-		args = n.List
+		args := n.List
 		if args == nil || args.Next == nil {
 			Yyerror("missing arguments to copy")
 			goto error
@@ -1452,17 +1439,17 @@ reswitch:
 
 	case OMAKE:
 		ok |= Erv
-		args = n.List
+		args := n.List
 		if args == nil {
 			Yyerror("missing argument to make")
 			goto error
 		}
 
 		n.List = nil
-		l = args.N
+		l := args.N
 		args = args.Next
 		typecheck(&l, Etype)
-		t = l.Type
+		t := l.Type
 		if t == nil {
 			goto error
 		}
@@ -1486,7 +1473,7 @@ reswitch:
 			l = args.N
 			args = args.Next
 			typecheck(&l, Erv)
-			r = nil
+			r := (*Node)(nil)
 			if args != nil {
 				r = args.N
 				args = args.Next
@@ -1496,7 +1483,7 @@ reswitch:
 			if l.Type == nil || (r != nil && r.Type == nil) {
 				goto error
 			}
-			et = bool2int(checkmake(t, "len", l) < 0)
+			et := bool2int(checkmake(t, "len", l) < 0)
 			et |= bool2int(r != nil && checkmake(t, "cap", r) < 0)
 			if et != 0 {
 				goto error
@@ -1559,15 +1546,15 @@ reswitch:
 
 	case ONEW:
 		ok |= Erv
-		args = n.List
+		args := n.List
 		if args == nil {
 			Yyerror("missing argument to new")
 			goto error
 		}
 
-		l = args.N
+		l := args.N
 		typecheck(&l, Etype)
-		t = l.Type
+		t := l.Type
 		if t == nil {
 			goto error
 		}
@@ -1584,7 +1571,7 @@ reswitch:
 		OPRINTN:
 		ok |= Etop
 		typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
-		for args = n.List; args != nil; args = args.Next {
+		for args := n.List; args != nil; args = args.Next {
 			// Special case for print: int constant is int64, not int.
 			if Isconst(args.N, CTINT) {
 				defaultlit(&args.N, Types[TINT64])
@@ -1628,7 +1615,7 @@ reswitch:
 	case OITAB:
 		ok |= Erv
 		typecheck(&n.Left, Erv)
-		t = n.Left.Type
+		t := n.Left.Type
 		if t == nil {
 			goto error
 		}
@@ -1641,7 +1628,7 @@ reswitch:
 	case OSPTR:
 		ok |= Erv
 		typecheck(&n.Left, Erv)
-		t = n.Left.Type
+		t := n.Left.Type
 		if t == nil {
 			goto error
 		}
@@ -1724,7 +1711,7 @@ reswitch:
 		decldepth++
 		typecheck(&n.Ntest, Erv)
 		if n.Ntest != nil {
-			t = n.Ntest.Type
+			t := n.Ntest.Type
 			if t != nil && t.Etype != TBOOL {
 				Yyerror("non-bool %v used as for condition", Nconv(n.Ntest, obj.FmtLong))
 			}
@@ -1739,7 +1726,7 @@ reswitch:
 		typechecklist(n.Ninit, Etop)
 		typecheck(&n.Ntest, Erv)
 		if n.Ntest != nil {
-			t = n.Ntest.Type
+			t := n.Ntest.Type
 			if t != nil && t.Etype != TBOOL {
 				Yyerror("non-bool %v used as if condition", Nconv(n.Ntest, obj.FmtLong))
 			}
@@ -2049,7 +2036,7 @@ doconv:
 	switch n.Op {
 	case OCONVNOP:
 		if n.Left.Op == OLITERAL && n.Type != Types[TBOOL] {
-			r = Nod(OXXX, nil, nil)
+			r := Nod(OXXX, nil, nil)
 			n.Op = OCONV
 			n.Orig = r
 			*r = *n
@@ -2135,9 +2122,7 @@ out:
 }
 
 func checksliceindex(l *Node, r *Node, tp *Type) int {
-	var t *Type
-
-	t = r.Type
+	t := r.Type
 	if t == nil {
 		return -1
 	}
@@ -2175,9 +2160,7 @@ func checksliceconst(lo *Node, hi *Node) int {
 }
 
 func checkdefergo(n *Node) {
-	var what string
-
-	what = "defer"
+	what := "defer"
 	if n.Op == OPROC {
 		what = "go"
 	}
@@ -2231,13 +2214,10 @@ func checkdefergo(n *Node) {
 }
 
 func implicitstar(nn **Node) {
-	var t *Type
-	var n *Node
-
 	// insert implicit * if needed for fixed array
-	n = *nn
+	n := *nn
 
-	t = n.Type
+	t := n.Type
 	if t == nil || Isptr[t.Etype] == 0 {
 		return
 	}
@@ -2255,19 +2235,17 @@ func implicitstar(nn **Node) {
 }
 
 func onearg(n *Node, f string, args ...interface{}) int {
-	var p string
-
 	if n.Left != nil {
 		return 0
 	}
 	if n.List == nil {
-		p = fmt.Sprintf(f, args...)
+		p := fmt.Sprintf(f, args...)
 		Yyerror("missing argument to %s: %v", p, Nconv(n, 0))
 		return -1
 	}
 
 	if n.List.Next != nil {
-		p = fmt.Sprintf(f, args...)
+		p := fmt.Sprintf(f, args...)
 		Yyerror("too many arguments to %s: %v", p, Nconv(n, 0))
 		n.Left = n.List.N
 		n.List = nil
@@ -2307,9 +2285,7 @@ func twoarg(n *Node) int {
 }
 
 func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
-	var r *Type
-
-	r = nil
+	r := (*Type)(nil)
 	for ; f != nil; f = f.Down {
 		if dostrcmp != 0 && f.Sym.Name == s.Name {
 			return f
@@ -2335,14 +2311,10 @@ func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
 }
 
 func looktypedot(n *Node, t *Type, dostrcmp int) bool {
-	var f1 *Type
-	var f2 *Type
-	var s *Sym
-
-	s = n.Right.Sym
+	s := n.Right.Sym
 
 	if t.Etype == TINTER {
-		f1 = lookdot1(n, s, t, t.Type, dostrcmp)
+		f1 := lookdot1(n, s, t, t.Type, dostrcmp)
 		if f1 == nil {
 			return false
 		}
@@ -2356,7 +2328,7 @@ func looktypedot(n *Node, t *Type, dostrcmp int) bool {
 
 	// Find the base type: methtype will fail if t
 	// is not of the form T or *T.
-	f2 = methtype(t, 0)
+	f2 := methtype(t, 0)
 
 	if f2 == nil {
 		return false
@@ -2389,21 +2361,15 @@ func derefall(t *Type) *Type {
 }
 
 func lookdot(n *Node, t *Type, dostrcmp int) bool {
-	var f1 *Type
-	var f2 *Type
-	var tt *Type
-	var rcvr *Type
-	var s *Sym
-
-	s = n.Right.Sym
+	s := n.Right.Sym
 
 	dowidth(t)
-	f1 = nil
+	f1 := (*Type)(nil)
 	if t.Etype == TSTRUCT || t.Etype == TINTER {
 		f1 = lookdot1(n, s, t, t.Type, dostrcmp)
 	}
 
-	f2 = nil
+	f2 := (*Type)(nil)
 	if n.Left.Type == t || n.Left.Type.Sym == nil {
 		f2 = methtype(t, 0)
 		if f2 != nil {
@@ -2437,9 +2403,9 @@ func lookdot(n *Node, t *Type, dostrcmp int) bool {
 	}
 
 	if f2 != nil {
-		tt = n.Left.Type
+		tt := n.Left.Type
 		dowidth(tt)
-		rcvr = getthisx(f2.Type).Type.Type
+		rcvr := getthisx(f2.Type).Type.Type
 		if !Eqtype(rcvr, tt) {
 			if int(rcvr.Etype) == Tptr && Eqtype(rcvr.Type, tt) {
 				checklvalue(n.Left, "call pointer method on")
@@ -2490,9 +2456,7 @@ func nokeys(l *NodeList) bool {
 }
 
 func hasddd(t *Type) bool {
-	var tl *Type
-
-	for tl = t.Type; tl != nil; tl = tl.Down {
+	for tl := t.Type; tl != nil; tl = tl.Down {
 		if tl.Isddd != 0 {
 			return true
 		}
@@ -2502,11 +2466,8 @@ func hasddd(t *Type) bool {
 }
 
 func downcount(t *Type) int {
-	var tl *Type
-	var n int
-
-	n = 0
-	for tl = t.Type; tl != nil; tl = tl.Down {
+	n := 0
+	for tl := t.Type; tl != nil; tl = tl.Down {
 		n++
 	}
 
@@ -2518,15 +2479,11 @@ func downcount(t *Type) int {
  */
 func typecheckaste(op int, call *Node, isddd int, tstruct *Type, nl *NodeList, desc string) {
 	var t *Type
-	var tl *Type
-	var tn *Type
 	var n *Node
-	var lno int
-	var why string
 	var n1 int
 	var n2 int
 
-	lno = int(lineno)
+	lno := int(lineno)
 
 	if tstruct.Broke != 0 {
 		goto out
@@ -2538,8 +2495,8 @@ func typecheckaste(op int, call *Node, isddd int, tstruct *Type, nl *NodeList, d
 		if n.Type != nil {
 			if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
 				if !hasddd(tstruct) {
-					n1 = downcount(tstruct)
-					n2 = downcount(n.Type)
+					n1 := downcount(tstruct)
+					n2 := downcount(n.Type)
 					if n2 > n1 {
 						goto toomany
 					}
@@ -2548,8 +2505,9 @@ func typecheckaste(op int, call *Node, isddd int, tstruct *Type, nl *NodeList, d
 					}
 				}
 
-				tn = n.Type.Type
-				for tl = tstruct.Type; tl != nil; tl = tl.Down {
+				tn := n.Type.Type
+				var why string
+				for tl := tstruct.Type; tl != nil; tl = tl.Down {
 					if tl.Isddd != 0 {
 						for ; tn != nil; tn = tn.Down {
 							if assignop(tn.Type, tl.Type.Type, &why) == 0 {
@@ -2610,7 +2568,7 @@ func typecheckaste(op int, call *Node, isddd int, tstruct *Type, nl *NodeList, d
 		}
 	}
 
-	for tl = tstruct.Type; tl != nil; tl = tl.Down {
+	for tl := tstruct.Type; tl != nil; tl = tl.Down {
 		t = tl.Type
 		if tl.Isddd != 0 {
 			if isddd != 0 {
@@ -2692,16 +2650,12 @@ toomany:
  * type check composite
  */
 func fielddup(n *Node, hash []*Node) {
-	var h uint
-	var s string
-	var a *Node
-
 	if n.Op != ONAME {
 		Fatal("fielddup: not ONAME")
 	}
-	s = n.Sym.Name
-	h = uint(stringhash(s) % uint32(len(hash)))
-	for a = hash[h]; a != nil; a = a.Ntest {
+	s := n.Sym.Name
+	h := uint(stringhash(s) % uint32(len(hash)))
+	for a := hash[h]; a != nil; a = a.Ntest {
 		if a.Sym.Name == s {
 			Yyerror("duplicate field name in struct literal: %s", s)
 			return
@@ -2713,16 +2667,7 @@ func fielddup(n *Node, hash []*Node) {
 }
 
 func keydup(n *Node, hash []*Node) {
-	var h uint
-	var b uint32
-	var d float64
-	var i int
-	var a *Node
-	var orign *Node
-	var cmp Node
-	var s string
-
-	orign = n
+	orign := n
 	if n.Op == OCONVIFACE {
 		n = n.Left
 	}
@@ -2731,6 +2676,7 @@ func keydup(n *Node, hash []*Node) {
 		return // we dont check variables
 	}
 
+	var b uint32
 	switch n.Val.Ctype {
 	default: // unknown, bool, nil
 		b = 23
@@ -2740,7 +2686,7 @@ func keydup(n *Node, hash []*Node) {
 		b = uint32(Mpgetfix(n.Val.U.Xval))
 
 	case CTFLT:
-		d = mpgetflt(n.Val.U.Fval)
+		d := mpgetflt(n.Val.U.Fval)
 		x := math.Float64bits(d)
 		for i := 0; i < 8; i++ {
 			b = b*PRIME1 + uint32(x&0xFF)
@@ -2749,16 +2695,16 @@ func keydup(n *Node, hash []*Node) {
 
 	case CTSTR:
 		b = 0
-		s = n.Val.U.Sval.S
-		for i = len(n.Val.U.Sval.S); i > 0; i-- {
+		s := n.Val.U.Sval.S
+		for i := len(n.Val.U.Sval.S); i > 0; i-- {
 			b = b*PRIME1 + uint32(s[0])
 			s = s[1:]
 		}
 	}
 
-	h = uint(b % uint32(len(hash)))
-	cmp = Node{}
-	for a = hash[h]; a != nil; a = a.Ntest {
+	h := uint(b % uint32(len(hash)))
+	cmp := Node{}
+	for a := hash[h]; a != nil; a = a.Ntest {
 		cmp.Op = OEQ
 		cmp.Left = n
 		b = 0
@@ -2785,18 +2731,14 @@ func keydup(n *Node, hash []*Node) {
 }
 
 func indexdup(n *Node, hash []*Node) {
-	var h uint
-	var a *Node
-	var b uint32
-	var c uint32
-
 	if n.Op != OLITERAL {
 		Fatal("indexdup: not OLITERAL")
 	}
 
-	b = uint32(Mpgetfix(n.Val.U.Xval))
-	h = uint(b % uint32(len(hash)))
-	for a = hash[h]; a != nil; a = a.Ntest {
+	b := uint32(Mpgetfix(n.Val.U.Xval))
+	h := uint(b % uint32(len(hash)))
+	var c uint32
+	for a := hash[h]; a != nil; a = a.Ntest {
 		c = uint32(Mpgetfix(a.Val.U.Xval))
 		if b == c {
 			Yyerror("duplicate index in array literal: %d", b)
@@ -2809,9 +2751,7 @@ func indexdup(n *Node, hash []*Node) {
 }
 
 func prime(h uint32, sr uint32) bool {
-	var n uint32
-
-	for n = 3; n <= sr; n += 2 {
+	for n := uint32(3); n <= sr; n += 2 {
 		if h%n == 0 {
 			return false
 		}
@@ -2820,15 +2760,10 @@ func prime(h uint32, sr uint32) bool {
 }
 
 func inithash(n *Node, autohash []*Node) []*Node {
-	var h uint32
-	var sr uint32
-	var ll *NodeList
-	var i int
-
 	// count the number of entries
-	h = 0
+	h := uint32(0)
 
-	for ll = n.List; ll != nil; ll = ll.Next {
+	for ll := n.List; ll != nil; ll = ll.Next {
 		h++
 	}
 
@@ -2847,9 +2782,9 @@ func inithash(n *Node, autohash []*Node) []*Node {
 	h |= 1
 
 	// calculate sqrt of h
-	sr = h / 2
+	sr := h / 2
 
-	for i = 0; i < 5; i++ {
+	for i := 0; i < 5; i++ {
 		sr = (sr + h/sr) / 2
 	}
 
@@ -2900,25 +2835,14 @@ func pushtype(n *Node, t *Type) {
 }
 
 func typecheckcomplit(np **Node) {
-	var bad int
-	var i int
 	var nerr int
-	var length int64
 	var l *Node
-	var n *Node
 	var norig *Node
 	var r *Node
-	var hash []*Node
-	var ll *NodeList
 	var t *Type
-	var f *Type
-	var s *Sym
-	var s1 *Sym
-	var lno int32
-	var autohash [101]*Node
 
-	n = *np
-	lno = lineno
+	n := *np
+	lno := lineno
 
 	if n.Right == nil {
 		if n.List != nil {
@@ -2965,11 +2889,13 @@ func typecheckcomplit(np **Node) {
 		n.Type = nil
 
 	case TARRAY:
-		hash = inithash(n, autohash[:])
+		var autohash [101]*Node
+		hash := inithash(n, autohash[:])
 
-		length = 0
-		i = 0
-		for ll = n.List; ll != nil; ll = ll.Next {
+		length := int64(0)
+		i := 0
+		var l *Node
+		for ll := n.List; ll != nil; ll = ll.Next {
 			l = ll.N
 			setlineno(l)
 			if l.Op != OKEY {
@@ -3017,9 +2943,11 @@ func typecheckcomplit(np **Node) {
 		n.Op = OARRAYLIT
 
 	case TMAP:
-		hash = inithash(n, autohash[:])
+		var autohash [101]*Node
+		hash := inithash(n, autohash[:])
 
-		for ll = n.List; ll != nil; ll = ll.Next {
+		var l *Node
+		for ll := n.List; ll != nil; ll = ll.Next {
 			l = ll.N
 			setlineno(l)
 			if l.Op != OKEY {
@@ -3045,12 +2973,13 @@ func typecheckcomplit(np **Node) {
 		n.Op = OMAPLIT
 
 	case TSTRUCT:
-		bad = 0
+		bad := 0
 		if n.List != nil && nokeys(n.List) {
 			// simple list of variables
-			f = t.Type
+			f := t.Type
 
-			for ll = n.List; ll != nil; ll = ll.Next {
+			var s *Sym
+			for ll := n.List; ll != nil; ll = ll.Next {
 				setlineno(ll.N)
 				typecheck(&ll.N, Erv)
 				if f == nil {
@@ -3080,10 +3009,15 @@ func typecheckcomplit(np **Node) {
 				Yyerror("too few values in struct initializer")
 			}
 		} else {
-			hash = inithash(n, autohash[:])
+			var autohash [101]*Node
+			hash := inithash(n, autohash[:])
 
 			// keyed list
-			for ll = n.List; ll != nil; ll = ll.Next {
+			var s *Sym
+			var f *Type
+			var l *Node
+			var s1 *Sym
+			for ll := n.List; ll != nil; ll = ll.Next {
 				l = ll.N
 				setlineno(l)
 				if l.Op != OKEY {
@@ -3201,12 +3135,10 @@ func checklvalue(n *Node, verb string) {
 }
 
 func checkassign(stmt *Node, n *Node) {
-	var r *Node
-	var l *Node
-
 	// Variables declared in ORANGE are assigned on every iteration.
 	if n.Defn != stmt || stmt.Op == ORANGE {
-		r = outervalue(n)
+		r := outervalue(n)
+		var l *Node
 		for l = n; l != r; l = l.Left {
 			l.Assigned = 1
 			if l.Closure != nil {
@@ -3345,14 +3277,8 @@ func checkassignto(src *Type, dst *Node) {
 }
 
 func typecheckas2(n *Node) {
-	var cl int
-	var cr int
 	var ll *NodeList
 	var lr *NodeList
-	var l *Node
-	var r *Node
-	var s Iter
-	var t *Type
 
 	for ll = n.List; ll != nil; ll = ll.Next {
 		// delicate little dance.
@@ -3363,8 +3289,8 @@ func typecheckas2(n *Node) {
 		}
 	}
 
-	cl = count(n.List)
-	cr = count(n.Rlist)
+	cl := count(n.List)
+	cr := count(n.Rlist)
 	if cl > 1 && cr == 1 {
 		typecheck(&n.Rlist.N, Erv|Efnstruct)
 	} else {
@@ -3372,6 +3298,8 @@ func typecheckas2(n *Node) {
 	}
 	checkassignlist(n, n.List)
 
+	var l *Node
+	var r *Node
 	if cl == cr {
 		// easy
 		ll = n.List
@@ -3409,7 +3337,8 @@ func typecheckas2(n *Node) {
 				goto mismatch
 			}
 			n.Op = OAS2FUNC
-			t = Structfirst(&s, &r.Type)
+			var s Iter
+			t := Structfirst(&s, &r.Type)
 			for ll = n.List; ll != nil; ll = ll.Next {
 				if t.Type != nil && ll.N.Type != nil {
 					checkassignto(t.Type, ll.N)
@@ -3451,7 +3380,7 @@ func typecheckas2(n *Node) {
 			if l.Defn == n {
 				l.Type = r.Type
 			}
-			l = n.List.Next.N
+			l := n.List.Next.N
 			if l.Type != nil && l.Type.Etype != TBOOL {
 				checkassignto(Types[TBOOL], l)
 			}
@@ -3480,23 +3409,19 @@ out:
  * type check function definition
  */
 func typecheckfunc(n *Node) {
-	var t *Type
-	var rcvr *Type
-	var l *NodeList
-
 	typecheck(&n.Nname, Erv|Easgn)
-	t = n.Nname.Type
+	t := n.Nname.Type
 	if t == nil {
 		return
 	}
 	n.Type = t
 	t.Nname = n.Nname
-	rcvr = getthisx(t).Type
+	rcvr := getthisx(t).Type
 	if rcvr != nil && n.Shortname != nil && !isblank(n.Shortname) {
 		addmethod(n.Shortname.Sym, t, true, n.Nname.Nointerface)
 	}
 
-	for l = n.Dcl; l != nil; l = l.Next {
+	for l := n.Dcl; l != nil; l = l.Next {
 		if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
 			l.N.Decldepth = 1
 		}
@@ -3536,10 +3461,7 @@ var ntypecheckdeftype int
 var methodqueue *NodeList
 
 func domethod(n *Node) {
-	var nt *Node
-	var t *Type
-
-	nt = n.Type.Nname
+	nt := n.Type.Nname
 	typecheck(&nt, Etype)
 	if nt.Type == nil {
 		// type check failed; leave empty func
@@ -3557,7 +3479,7 @@ func domethod(n *Node) {
 	// value of its argument, a specific implementation of I may
 	// care.  The _ would suppress the assignment to that argument
 	// while generating a call, so remove it.
-	for t = getinargx(nt.Type).Type; t != nil; t = t.Down {
+	for t := getinargx(nt.Type).Type; t != nil; t = t.Down {
 		if t.Sym != nil && t.Sym.Name == "_" {
 			t.Sym = nil
 		}
@@ -3571,11 +3493,6 @@ func domethod(n *Node) {
 var mapqueue *NodeList
 
 func copytype(n *Node, t *Type) {
-	var maplineno int
-	var embedlineno int
-	var lno int
-	var l *NodeList
-
 	if t.Etype == TFORW {
 		// This type isn't computed yet; when it is, update n.
 		t.Copyto = list(t.Copyto, n)
@@ -3583,10 +3500,10 @@ func copytype(n *Node, t *Type) {
 		return
 	}
 
-	maplineno = int(n.Type.Maplineno)
-	embedlineno = int(n.Type.Embedlineno)
+	maplineno := int(n.Type.Maplineno)
+	embedlineno := int(n.Type.Embedlineno)
 
-	l = n.Type.Copyto
+	l := n.Type.Copyto
 	*n.Type = *t
 
 	t = n.Type
@@ -3607,7 +3524,7 @@ func copytype(n *Node, t *Type) {
 	}
 
 	// Double-check use of type as embedded type.
-	lno = int(lineno)
+	lno := int(lineno)
 
 	if embedlineno != 0 {
 		lineno = int32(embedlineno)
@@ -3626,17 +3543,13 @@ func copytype(n *Node, t *Type) {
 }
 
 func typecheckdeftype(n *Node) {
-	var lno int
-	var t *Type
-	var l *NodeList
-
 	ntypecheckdeftype++
-	lno = int(lineno)
+	lno := int(lineno)
 	setlineno(n)
 	n.Type.Sym = n.Sym
 	n.Typecheck = 1
 	typecheck(&n.Ntype, Etype)
-	t = n.Ntype.Type
+	t := n.Ntype.Type
 	if t == nil {
 		n.Diag = 1
 		n.Type = nil
@@ -3661,6 +3574,7 @@ ret:
 	// try to resolve the method types for the interfaces
 	// we just read.
 	if ntypecheckdeftype == 1 {
+		var l *NodeList
 		for {
 			l = methodqueue
 			if l == nil {
@@ -3672,7 +3586,7 @@ ret:
 			}
 		}
 
-		for l = mapqueue; l != nil; l = l.Next {
+		for l := mapqueue; l != nil; l = l.Next {
 			lineno = l.N.Type.Maplineno
 			maptype(l.N.Type, Types[TBOOL])
 		}
@@ -3693,13 +3607,7 @@ func queuemethod(n *Node) {
 }
 
 func typecheckdef(n *Node) *Node {
-	var lno int
-	var nerrors0 int
-	var e *Node
-	var t *Type
-	var l *NodeList
-
-	lno = int(lineno)
+	lno := int(lineno)
 	setlineno(n)
 
 	if n.Op == ONONAME {
@@ -3721,7 +3629,7 @@ func typecheckdef(n *Node) *Node {
 		return n
 	}
 
-	l = new(NodeList)
+	l := new(NodeList)
 	l.N = n
 	l.Next = typecheckdefstack
 	typecheckdefstack = l
@@ -3729,7 +3637,7 @@ func typecheckdef(n *Node) *Node {
 	if n.Walkdef == 2 {
 		Flusherrors()
 		fmt.Printf("typecheckdef loop:")
-		for l = typecheckdefstack; l != nil; l = l.Next {
+		for l := typecheckdefstack; l != nil; l = l.Next {
 			fmt.Printf(" %v", Sconv(l.N.Sym, 0))
 		}
 		fmt.Printf("\n")
@@ -3762,7 +3670,7 @@ func typecheckdef(n *Node) *Node {
 			}
 		}
 
-		e = n.Defn
+		e := n.Defn
 		n.Defn = nil
 		if e == nil {
 			lineno = n.Lineno
@@ -3785,7 +3693,7 @@ func typecheckdef(n *Node) *Node {
 			goto ret
 		}
 
-		t = n.Type
+		t := n.Type
 		if t != nil {
 			if okforconst[t.Etype] == 0 {
 				Yyerror("invalid constant type %v", Tconv(t, 0))
@@ -3847,7 +3755,7 @@ func typecheckdef(n *Node) *Node {
 		n.Walkdef = 1
 		n.Type = typ(TFORW)
 		n.Type.Sym = n.Sym
-		nerrors0 = nerrors
+		nerrors0 := nerrors
 		typecheckdeftype(n)
 		if n.Type.Etype == TFORW && nerrors > nerrors0 {
 			// Something went wrong during type-checking,
@@ -3920,8 +3828,6 @@ func checkmake(t *Type, arg string, n *Node) int {
 }
 
 func markbreak(n *Node, implicit *Node) {
-	var lab *Label
-
 	if n == nil {
 		return
 	}
@@ -3933,7 +3839,7 @@ func markbreak(n *Node, implicit *Node) {
 				implicit.Hasbreak = 1
 			}
 		} else {
-			lab = n.Left.Sym.Label
+			lab := n.Left.Sym.Label
 			if lab != nil {
 				lab.Def.Hasbreak = 1
 			}
@@ -3990,9 +3896,6 @@ func markbreaklist(l *NodeList, implicit *Node) {
 }
 
 func isterminating(l *NodeList, top int) bool {
-	var def int
-	var n *Node
-
 	if l == nil {
 		return false
 	}
@@ -4006,7 +3909,7 @@ func isterminating(l *NodeList, top int) bool {
 	for l.Next != nil {
 		l = l.Next
 	}
-	n = l.N
+	n := l.N
 
 	if n == nil {
 		return false
@@ -4046,7 +3949,7 @@ func isterminating(l *NodeList, top int) bool {
 		if n.Hasbreak != 0 {
 			return false
 		}
-		def = 0
+		def := 0
 		for l = n.List; l != nil; l = l.Next {
 			if !isterminating(l.N.Nbody, 0) {
 				return false
diff --git a/src/cmd/internal/gc/unsafe.go b/src/cmd/internal/gc/unsafe.go
index 3970468285bca380df05ae20f2ee772936230333..7f0a33f2a702e232b92798d01f5f2a404867bb5d 100644
--- a/src/cmd/internal/gc/unsafe.go
+++ b/src/cmd/internal/gc/unsafe.go
@@ -15,19 +15,11 @@ import "cmd/internal/obj"
  */
 func unsafenmagic(nn *Node) *Node {
 	var r *Node
-	var n *Node
-	var base *Node
-	var r1 *Node
 	var s *Sym
-	var t *Type
-	var tr *Type
 	var v int64
-	var val Val
-	var fn *Node
-	var args *NodeList
 
-	fn = nn.Left
-	args = nn.List
+	fn := nn.Left
+	args := nn.List
 
 	if safemode != 0 || fn == nil || fn.Op != ONAME {
 		goto no
@@ -50,7 +42,7 @@ func unsafenmagic(nn *Node) *Node {
 	if s.Name == "Sizeof" {
 		typecheck(&r, Erv)
 		defaultlit(&r, nil)
-		tr = r.Type
+		tr := r.Type
 		if tr == nil {
 			goto bad
 		}
@@ -70,7 +62,7 @@ func unsafenmagic(nn *Node) *Node {
 		// first to track it correctly.
 		typecheck(&r.Left, Erv)
 
-		base = r.Left
+		base := r.Left
 		typecheck(&r, Erv)
 		switch r.Op {
 		case ODOT,
@@ -89,6 +81,7 @@ func unsafenmagic(nn *Node) *Node {
 		v = 0
 
 		// add offsets for inserted dots.
+		var r1 *Node
 		for r1 = r; r1.Left != base; r1 = r1.Left {
 			switch r1.Op {
 			case ODOT:
@@ -112,13 +105,13 @@ func unsafenmagic(nn *Node) *Node {
 	if s.Name == "Alignof" {
 		typecheck(&r, Erv)
 		defaultlit(&r, nil)
-		tr = r.Type
+		tr := r.Type
 		if tr == nil {
 			goto bad
 		}
 
 		// make struct { byte; T; }
-		t = typ(TSTRUCT)
+		t := typ(TSTRUCT)
 
 		t.Type = typ(TFIELD)
 		t.Type.Type = Types[TUINT8]
@@ -149,11 +142,12 @@ yes:
 
 	// any side effects disappear; ignore init
 ret:
+	var val Val
 	val.Ctype = CTINT
 
 	val.U.Xval = new(Mpint)
 	Mpmovecfix(val.U.Xval, v)
-	n = Nod(OLITERAL, nil, nil)
+	n := Nod(OLITERAL, nil, nil)
 	n.Orig = nn
 	n.Val = val
 	n.Type = Types[TUINTPTR]
diff --git a/src/cmd/internal/gc/walk.go b/src/cmd/internal/gc/walk.go
index dfb965e35ec48ccd3f391083a496b1a448af535e..b242fd42aa22f5b29a04838899df7242008bac06 100644
--- a/src/cmd/internal/gc/walk.go
+++ b/src/cmd/internal/gc/walk.go
@@ -18,35 +18,31 @@ const (
 )
 
 func walk(fn *Node) {
-	var s string
-	var l *NodeList
-	var lno int
-
 	Curfn = fn
 
 	if Debug['W'] != 0 {
-		s = fmt.Sprintf("\nbefore %v", Sconv(Curfn.Nname.Sym, 0))
+		s := fmt.Sprintf("\nbefore %v", Sconv(Curfn.Nname.Sym, 0))
 		dumplist(s, Curfn.Nbody)
 	}
 
-	lno = int(lineno)
+	lno := int(lineno)
 
 	// Final typecheck for any unused variables.
 	// It's hard to be on the heap when not-used, but best to be consistent about &~PHEAP here and below.
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO {
 			typecheck(&l.N, Erv|Easgn)
 		}
 	}
 
 	// Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO && l.N.Defn != nil && l.N.Defn.Op == OTYPESW && l.N.Used != 0 {
 			l.N.Defn.Left.Used++
 		}
 	}
 
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		if l.N.Op != ONAME || l.N.Class&^PHEAP != PAUTO || l.N.Sym.Name[0] == '&' || l.N.Used != 0 {
 			continue
 		}
@@ -69,13 +65,13 @@ func walk(fn *Node) {
 	}
 	walkstmtlist(Curfn.Nbody)
 	if Debug['W'] != 0 {
-		s = fmt.Sprintf("after walk %v", Sconv(Curfn.Nname.Sym, 0))
+		s := fmt.Sprintf("after walk %v", Sconv(Curfn.Nname.Sym, 0))
 		dumplist(s, Curfn.Nbody)
 	}
 
 	heapmoves()
 	if Debug['W'] != 0 && Curfn.Enter != nil {
-		s = fmt.Sprintf("enter %v", Sconv(Curfn.Nname.Sym, 0))
+		s := fmt.Sprintf("enter %v", Sconv(Curfn.Nname.Sym, 0))
 		dumplist(s, Curfn.Enter)
 	}
 }
@@ -96,9 +92,7 @@ func samelist(a *NodeList, b *NodeList) bool {
 }
 
 func paramoutheap(fn *Node) int {
-	var l *NodeList
-
-	for l = fn.Dcl; l != nil; l = l.Next {
+	for l := fn.Dcl; l != nil; l = l.Next {
 		switch l.N.Class {
 		case PPARAMOUT,
 			PPARAMOUT | PHEAP:
@@ -117,13 +111,11 @@ func paramoutheap(fn *Node) int {
 // adds "adjust" to all the argument locations for the call n.
 // n must be a defer or go node that has already been walked.
 func adjustargs(n *Node, adjust int) {
-	var callfunc *Node
 	var arg *Node
 	var lhs *Node
-	var args *NodeList
 
-	callfunc = n.Left
-	for args = callfunc.List; args != nil; args = args.Next {
+	callfunc := n.Left
+	for args := callfunc.List; args != nil; args = args.Next {
 		arg = args.N
 		if arg.Op != OAS {
 			Yyerror("call arg not assignment")
@@ -147,14 +139,7 @@ func adjustargs(n *Node, adjust int) {
 }
 
 func walkstmt(np **Node) {
-	var init *NodeList
-	var ll *NodeList
-	var rl *NodeList
-	var cl int
-	var n *Node
-	var f *Node
-
-	n = *np
+	n := *np
 	if n == nil {
 		return
 	}
@@ -198,7 +183,7 @@ func walkstmt(np **Node) {
 		if n.Typecheck == 0 {
 			Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
 		}
-		init = n.Ninit
+		init := n.Ninit
 		n.Ninit = nil
 		walkexpr(&n, &init)
 		addinit(&n, init)
@@ -212,7 +197,7 @@ func walkstmt(np **Node) {
 		if n.Typecheck == 0 {
 			Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
 		}
-		init = n.Ninit
+		init := n.Ninit
 		n.Ninit = nil
 
 		walkexpr(&n.Left, &init)
@@ -264,7 +249,7 @@ func walkstmt(np **Node) {
 	case OFOR:
 		if n.Ntest != nil {
 			walkstmtlist(n.Ntest.Ninit)
-			init = n.Ntest.Ninit
+			init := n.Ntest.Ninit
 			n.Ntest.Ninit = nil
 			walkexpr(&n.Ntest, &init)
 			addinit(&n.Ntest, init)
@@ -302,9 +287,10 @@ func walkstmt(np **Node) {
 		if (Curfn.Type.Outnamed != 0 && count(n.List) > 1) || paramoutheap(Curfn) != 0 {
 			// assign to the function out parameters,
 			// so that reorder3 can fix up conflicts
-			rl = nil
+			rl := (*NodeList)(nil)
 
-			for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+			var cl int
+			for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
 				cl = int(ll.N.Class) &^ PHEAP
 				if cl == PAUTO {
 					break
@@ -323,7 +309,7 @@ func walkstmt(np **Node) {
 
 			if count(n.List) == 1 && count(rl) > 1 {
 				// OAS2FUNC in disguise
-				f = n.List.N
+				f := n.List.N
 
 				if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
 					Fatal("expected return of call, have %v", Nconv(f, 0))
@@ -335,12 +321,12 @@ func walkstmt(np **Node) {
 			// move function calls out, to make reorder3's job easier.
 			walkexprlistsafe(n.List, &n.Ninit)
 
-			ll = ascompatee(int(n.Op), rl, n.List, &n.Ninit)
+			ll := ascompatee(int(n.Op), rl, n.List, &n.Ninit)
 			n.List = reorder3(ll)
 			break
 		}
 
-		ll = ascompatte(int(n.Op), nil, 0, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+		ll := ascompatte(int(n.Op), nil, 0, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
 		n.List = ll
 
 	case ORETJMP:
@@ -395,31 +381,7 @@ func walkexprlistcheap(l *NodeList, init **NodeList) {
 }
 
 func walkexpr(np **Node, init **NodeList) {
-	var r *Node
-	var l *Node
-	var var_ *Node
-	var a *Node
-	var ok *Node
-	var map_ *Node
-	var key *Node
-	var ll *NodeList
-	var lr *NodeList
-	var t *Type
-	var et int
-	var old_safemode int
-	var v int64
-	var lno int32
-	var n *Node
-	var fn *Node
-	var n1 *Node
-	var n2 *Node
-	var sym *Sym
-	var buf string
-	var p string
-	var from string
-	var to string
-
-	n = *np
+	n := *np
 
 	if n == nil {
 		return
@@ -445,7 +407,7 @@ func walkexpr(np **Node, init **NodeList) {
 		return
 	}
 
-	lno = setlineno(n)
+	lno := setlineno(n)
 
 	if Debug['w'] > 1 {
 		Dump("walk-before", n)
@@ -515,7 +477,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// replace len(*[10]int) with 10.
 		// delayed until now to preserve side effects.
-		t = n.Left.Type
+		t := n.Left.Type
 
 		if Isptr[t.Etype] != 0 {
 			t = t.Type
@@ -532,7 +494,7 @@ func walkexpr(np **Node, init **NodeList) {
 		ORSH:
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
-		t = n.Left.Type
+		t := n.Left.Type
 		n.Bounded = bounded(n.Right, 8*t.Width)
 		if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
 			Warn("shift bounds check elided")
@@ -576,7 +538,7 @@ func walkexpr(np **Node, init **NodeList) {
 		// In this case it can happen if we need to generate an ==
 		// for a struct containing a reflect.Value, which itself has
 		// an unexported field of type unsafe.Pointer.
-		old_safemode = safemode
+		old_safemode := safemode
 
 		safemode = 0
 		walkcompare(&n, init)
@@ -590,7 +552,7 @@ func walkexpr(np **Node, init **NodeList) {
 		// cannot put side effects from n->right on init,
 		// because they cannot run before n->left is checked.
 		// save elsewhere and store on the eventual n->right.
-		ll = nil
+		ll := (*NodeList)(nil)
 
 		walkexpr(&n.Right, &ll)
 		addinit(&n.Right, ll)
@@ -626,13 +588,13 @@ func walkexpr(np **Node, init **NodeList) {
 		goto ret
 
 	case OCALLINTER:
-		t = n.Left.Type
+		t := n.Left.Type
 		if n.List != nil && n.List.N.Op == OAS {
 			goto ret
 		}
 		walkexpr(&n.Left, init)
 		walkexprlist(n.List, init)
-		ll = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+		ll := ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
 		n.List = reorder1(ll)
 		goto ret
 
@@ -652,7 +614,7 @@ func walkexpr(np **Node, init **NodeList) {
 			// Update type of OCALLFUNC node.
 			// Output arguments had not changed, but their offsets could.
 			if n.Left.Type.Outtuple == 1 {
-				t = getoutargx(n.Left.Type).Type
+				t := getoutargx(n.Left.Type).Type
 				if t.Etype == TFIELD {
 					t = t.Type
 				}
@@ -662,7 +624,7 @@ func walkexpr(np **Node, init **NodeList) {
 			}
 		}
 
-		t = n.Left.Type
+		t := n.Left.Type
 		if n.List != nil && n.List.N.Op == OAS {
 			goto ret
 		}
@@ -670,19 +632,19 @@ func walkexpr(np **Node, init **NodeList) {
 		walkexpr(&n.Left, init)
 		walkexprlist(n.List, init)
 
-		ll = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+		ll := ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
 		n.List = reorder1(ll)
 		goto ret
 
 	case OCALLMETH:
-		t = n.Left.Type
+		t := n.Left.Type
 		if n.List != nil && n.List.N.Op == OAS {
 			goto ret
 		}
 		walkexpr(&n.Left, init)
 		walkexprlist(n.List, init)
-		ll = ascompatte(int(n.Op), n, 0, getthis(t), list1(n.Left.Left), 0, init)
-		lr = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+		ll := ascompatte(int(n.Op), n, 0, getthis(t), list1(n.Left.Left), 0, init)
+		lr := ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
 		ll = concat(ll, lr)
 		n.Left.Left = nil
 		ullmancalc(n.Left)
@@ -713,12 +675,12 @@ func walkexpr(np **Node, init **NodeList) {
 		case ODOTTYPE:
 			walkexpr(&n.Right.Left, init)
 
-			n1 = Nod(OADDR, n.Left, nil)
-			r = n.Right // i.(T)
+			n1 := Nod(OADDR, n.Left, nil)
+			r := n.Right // i.(T)
 
-			from = "I"
+			from := "I"
 
-			to = "T"
+			to := "T"
 			if isnilinter(r.Left.Type) {
 				from = "E"
 			}
@@ -728,9 +690,9 @@ func walkexpr(np **Node, init **NodeList) {
 				to = "I"
 			}
 
-			buf = fmt.Sprintf("assert%s2%s", from, to)
+			buf := fmt.Sprintf("assert%s2%s", from, to)
 
-			fn = syslook(buf, 1)
+			fn := syslook(buf, 1)
 			argtype(fn, r.Left.Type)
 			argtype(fn, r.Type)
 
@@ -743,15 +705,15 @@ func walkexpr(np **Node, init **NodeList) {
 		case ORECV:
 			walkexpr(&n.Right.Left, init)
 
-			n1 = Nod(OADDR, n.Left, nil)
-			r = n.Right.Left // the channel
+			n1 := Nod(OADDR, n.Left, nil)
+			r := n.Right.Left // the channel
 			n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
 			walkexpr(&n, init)
 			goto ret
 		}
 
 		if n.Left != nil && n.Right != nil {
-			r = convas(Nod(OAS, n.Left, n.Right), init)
+			r := convas(Nod(OAS, n.Left, n.Right), init)
 			r.Dodata = n.Dodata
 			n = r
 			n = applywritebarrier(n, init)
@@ -764,9 +726,9 @@ func walkexpr(np **Node, init **NodeList) {
 		n.Ninit = nil
 		walkexprlistsafe(n.List, init)
 		walkexprlistsafe(n.Rlist, init)
-		ll = ascompatee(OAS, n.List, n.Rlist, init)
+		ll := ascompatee(OAS, n.List, n.Rlist, init)
 		ll = reorder3(ll)
-		for lr = ll; lr != nil; lr = lr.Next {
+		for lr := ll; lr != nil; lr = lr.Next {
 			lr.N = applywritebarrier(lr.N, init)
 		}
 		n = liststmt(ll)
@@ -777,12 +739,12 @@ func walkexpr(np **Node, init **NodeList) {
 		*init = concat(*init, n.Ninit)
 
 		n.Ninit = nil
-		r = n.Rlist.N
+		r := n.Rlist.N
 		walkexprlistsafe(n.List, init)
 		walkexpr(&r, init)
 
-		ll = ascompatet(int(n.Op), n.List, &r.Type, 0, init)
-		for lr = ll; lr != nil; lr = lr.Next {
+		ll := ascompatet(int(n.Op), n.List, &r.Type, 0, init)
+		for lr := ll; lr != nil; lr = lr.Next {
 			lr.N = applywritebarrier(lr.N, init)
 		}
 		n = liststmt(concat(list1(r), ll))
@@ -794,16 +756,17 @@ func walkexpr(np **Node, init **NodeList) {
 		*init = concat(*init, n.Ninit)
 
 		n.Ninit = nil
-		r = n.Rlist.N
+		r := n.Rlist.N
 		walkexprlistsafe(n.List, init)
 		walkexpr(&r.Left, init)
+		var n1 *Node
 		if isblank(n.List.N) {
 			n1 = nodnil()
 		} else {
 			n1 = Nod(OADDR, n.List.N, nil)
 		}
 		n1.Etype = 1 // addr does not escape
-		fn = chanfn("chanrecv2", 2, r.Left.Type)
+		fn := chanfn("chanrecv2", 2, r.Left.Type)
 		r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
 		n = Nod(OAS, n.List.Next.N, r)
 		typecheck(&n, Etop)
@@ -814,12 +777,12 @@ func walkexpr(np **Node, init **NodeList) {
 		*init = concat(*init, n.Ninit)
 
 		n.Ninit = nil
-		r = n.Rlist.N
+		r := n.Rlist.N
 		walkexprlistsafe(n.List, init)
 		walkexpr(&r.Left, init)
 		walkexpr(&r.Right, init)
-		t = r.Left.Type
-		p = ""
+		t := r.Left.Type
+		p := ""
 		if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
 			switch Simsimtype(t.Down) {
 			case TINT32,
@@ -835,6 +798,7 @@ func walkexpr(np **Node, init **NodeList) {
 			}
 		}
 
+		var key *Node
 		if p != "" {
 			// fast versions take key by value
 			key = r.Right
@@ -851,9 +815,9 @@ func walkexpr(np **Node, init **NodeList) {
 		// to:
 		//   var,b = mapaccess2*(t, m, i)
 		//   a = *var
-		a = n.List.N
+		a := n.List.N
 
-		fn = mapfn(p, t)
+		fn := mapfn(p, t)
 		r = mkcall1(fn, getoutargx(fn.Type), init, typename(t), r.Left, key)
 
 		// mapaccess2* returns a typed bool, but due to spec changes,
@@ -867,7 +831,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// don't generate a = *var if a is _
 		if !isblank(a) {
-			var_ = temp(Ptrto(t.Type))
+			var_ := temp(Ptrto(t.Type))
 			var_.Typecheck = 1
 			n.List.N = var_
 			walkexpr(&n, init)
@@ -889,15 +853,15 @@ func walkexpr(np **Node, init **NodeList) {
 	case ODELETE:
 		*init = concat(*init, n.Ninit)
 		n.Ninit = nil
-		map_ = n.List.N
-		key = n.List.Next.N
+		map_ := n.List.N
+		key := n.List.Next.N
 		walkexpr(&map_, init)
 		walkexpr(&key, init)
 
 		// orderstmt made sure key is addressable.
 		key = Nod(OADDR, key, nil)
 
-		t = map_.Type
+		t := map_.Type
 		n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
 		goto ret
 
@@ -907,9 +871,10 @@ func walkexpr(np **Node, init **NodeList) {
 		*init = concat(*init, n.Ninit)
 
 		n.Ninit = nil
-		r = n.Rlist.N
+		r := n.Rlist.N
 		walkexprlistsafe(n.List, init)
 		walkexpr(&r.Left, init)
+		var n1 *Node
 		if isblank(n.List.N) {
 			n1 = nodnil()
 		} else {
@@ -917,9 +882,9 @@ func walkexpr(np **Node, init **NodeList) {
 		}
 		n1.Etype = 1 // addr does not escape
 
-		from = "I"
+		from := "I"
 
-		to = "T"
+		to := "T"
 		if isnilinter(r.Left.Type) {
 			from = "E"
 		}
@@ -928,14 +893,14 @@ func walkexpr(np **Node, init **NodeList) {
 		} else if Isinter(r.Type) {
 			to = "I"
 		}
-		buf = fmt.Sprintf("assert%s2%s2", from, to)
+		buf := fmt.Sprintf("assert%s2%s2", from, to)
 
-		fn = syslook(buf, 1)
+		fn := syslook(buf, 1)
 		argtype(fn, r.Left.Type)
 		argtype(fn, r.Type)
 
-		t = Types[TBOOL]
-		ok = n.List.Next.N
+		t := Types[TBOOL]
+		ok := n.List.Next.N
 		if !isblank(ok) {
 			t = ok.Type
 		}
@@ -953,7 +918,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// Optimize convT2E as a two-word copy when T is pointer-shaped.
 		if isnilinter(n.Type) && isdirectiface(n.Left.Type) {
-			l = Nod(OEFACE, typename(n.Left.Type), n.Left)
+			l := Nod(OEFACE, typename(n.Left.Type), n.Left)
 			l.Type = n.Type
 			l.Typecheck = n.Typecheck
 			n = l
@@ -963,9 +928,9 @@ func walkexpr(np **Node, init **NodeList) {
 		// Build name of function: convI2E etc.
 		// Not all names are possible
 		// (e.g., we'll never generate convE2E or convE2I).
-		from = "T"
+		from := "T"
 
-		to = "I"
+		to := "I"
 		if isnilinter(n.Left.Type) {
 			from = "E"
 		} else if Isinter(n.Left.Type) {
@@ -974,10 +939,10 @@ func walkexpr(np **Node, init **NodeList) {
 		if isnilinter(n.Type) {
 			to = "E"
 		}
-		buf = fmt.Sprintf("conv%s2%s", from, to)
+		buf := fmt.Sprintf("conv%s2%s", from, to)
 
-		fn = syslook(buf, 1)
-		ll = nil
+		fn := syslook(buf, 1)
+		ll := (*NodeList)(nil)
 		if !Isinter(n.Left.Type) {
 			ll = list(ll, typename(n.Left.Type))
 		}
@@ -985,9 +950,9 @@ func walkexpr(np **Node, init **NodeList) {
 			ll = list(ll, typename(n.Type))
 		}
 		if !Isinter(n.Left.Type) && !isnilinter(n.Type) {
-			sym = Pkglookup(fmt.Sprintf("%v.%v", Tconv(n.Left.Type, obj.FmtLeft), Tconv(n.Type, obj.FmtLeft)), itabpkg)
+			sym := Pkglookup(fmt.Sprintf("%v.%v", Tconv(n.Left.Type, obj.FmtLeft), Tconv(n.Type, obj.FmtLeft)), itabpkg)
 			if sym.Def == nil {
-				l = Nod(ONAME, nil, nil)
+				l := Nod(ONAME, nil, nil)
 				l.Sym = sym
 				l.Type = Ptrto(Types[TUINT8])
 				l.Addable = 1
@@ -997,7 +962,7 @@ func walkexpr(np **Node, init **NodeList) {
 				ggloblsym(sym, int32(Widthptr), obj.DUPOK|obj.NOPTR)
 			}
 
-			l = Nod(OADDR, sym.Def, nil)
+			l := Nod(OADDR, sym.Def, nil)
 			l.Addable = 1
 			ll = list(ll, l)
 
@@ -1012,19 +977,19 @@ func walkexpr(np **Node, init **NodeList) {
 				 * The CONVIFACE expression is replaced with this:
 				 * 	OEFACE{tab, ptr};
 				 */
-				l = temp(Ptrto(Types[TUINT8]))
+				l := temp(Ptrto(Types[TUINT8]))
 
-				n1 = Nod(OAS, l, sym.Def)
+				n1 := Nod(OAS, l, sym.Def)
 				typecheck(&n1, Etop)
 				*init = list(*init, n1)
 
-				fn = syslook("typ2Itab", 1)
+				fn := syslook("typ2Itab", 1)
 				n1 = Nod(OCALL, fn, nil)
 				n1.List = ll
 				typecheck(&n1, Erv)
 				walkexpr(&n1, init)
 
-				n2 = Nod(OIF, nil, nil)
+				n2 := Nod(OIF, nil, nil)
 				n2.Ntest = Nod(OEQ, l, nodnil())
 				n2.Nbody = list1(Nod(OAS, l, n1))
 				n2.Likely = -1
@@ -1117,10 +1082,10 @@ func walkexpr(np **Node, init **NodeList) {
 		/*
 		 * rewrite complex div into function call.
 		 */
-		et = int(n.Left.Type.Etype)
+		et := int(n.Left.Type.Etype)
 
 		if Iscomplex[et] != 0 && n.Op == ODIV {
-			t = n.Type
+			t := n.Type
 			n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
 			n = conv(n, t)
 			goto ret
@@ -1167,7 +1132,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// save the original node for bounds checking elision.
 		// If it was a ODIV/OMOD walk might rewrite it.
-		r = n.Right
+		r := n.Right
 
 		walkexpr(&n.Right, init)
 
@@ -1176,7 +1141,7 @@ func walkexpr(np **Node, init **NodeList) {
 		if n.Bounded {
 			goto ret
 		}
-		t = n.Left.Type
+		t := n.Left.Type
 		if t != nil && Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
@@ -1200,7 +1165,7 @@ func walkexpr(np **Node, init **NodeList) {
 					// replace "abc"[1] with 'b'.
 					// delayed until now because "abc"[1] is not
 					// an ideal constant.
-					v = Mpgetfix(n.Right.Val.U.Xval)
+					v := Mpgetfix(n.Right.Val.U.Xval)
 
 					Nodconst(n, n.Type, int64(n.Left.Val.U.Sval.S[v]))
 					n.Typecheck = 1
@@ -1222,8 +1187,8 @@ func walkexpr(np **Node, init **NodeList) {
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
 
-		t = n.Left.Type
-		p = ""
+		t := n.Left.Type
+		p := ""
 		if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
 			switch Simsimtype(t.Down) {
 			case TINT32,
@@ -1239,6 +1204,7 @@ func walkexpr(np **Node, init **NodeList) {
 			}
 		}
 
+		var key *Node
 		if p != "" {
 			// fast versions take key by value
 			key = n.Right
@@ -1325,7 +1291,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 	case ONEW:
 		if n.Esc == EscNone && n.Type.Type.Width < 1<<16 {
-			r = temp(n.Type.Type)
+			r := temp(n.Type.Type)
 			r = Nod(OAS, r, nil) // zero temp
 			typecheck(&r, Etop)
 			*init = list(*init, r)
@@ -1343,7 +1309,7 @@ func walkexpr(np **Node, init **NodeList) {
 	// without the function call.
 	case OCMPSTR:
 		if (Isconst(n.Left, CTSTR) && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val.U.Sval.S) == 0) {
-			r = Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+			r := Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
 			r.Type = n.Type
@@ -1353,7 +1319,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// s + "badgerbadgerbadger" == "badgerbadgerbadger"
 		if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
-			r = Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+			r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
 			r.Type = n.Type
@@ -1361,6 +1327,7 @@ func walkexpr(np **Node, init **NodeList) {
 			goto ret
 		}
 
+		var r *Node
 		if n.Etype == OEQ || n.Etype == ONE {
 			// prepare for rewrite below
 			n.Left = cheapexpr(n.Left, init)
@@ -1416,7 +1383,7 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// cannot use chanfn - closechan takes any, not chan any
 	case OCLOSE:
-		fn = syslook("closechan", 1)
+		fn := syslook("closechan", 1)
 
 		argtype(fn, n.Left.Type)
 		n = mkcall1(fn, nil, init, n.Left)
@@ -1427,15 +1394,15 @@ func walkexpr(np **Node, init **NodeList) {
 		goto ret
 
 	case OMAKEMAP:
-		t = n.Type
+		t := n.Type
 
-		fn = syslook("makemap", 1)
+		fn := syslook("makemap", 1)
 
-		a = nodnil() // hmap buffer
-		r = nodnil() // bucket buffer
+		a := nodnil() // hmap buffer
+		r := nodnil() // bucket buffer
 		if n.Esc == EscNone {
 			// Allocate hmap buffer on stack.
-			var_ = temp(hmap(t))
+			var_ := temp(hmap(t))
 
 			a = Nod(OAS, var_, nil) // zero temp
 			typecheck(&a, Etop)
@@ -1461,29 +1428,29 @@ func walkexpr(np **Node, init **NodeList) {
 		goto ret
 
 	case OMAKESLICE:
-		l = n.Left
-		r = n.Right
+		l := n.Left
+		r := n.Right
 		if r == nil {
 			r = safeexpr(l, init)
 			l = r
 		}
-		t = n.Type
+		t := n.Type
 		if n.Esc == EscNone && Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
 			// var arr [r]T
 			// n = arr[:l]
 			t = aindex(r, t.Type) // [r]T
-			var_ = temp(t)
-			a = Nod(OAS, var_, nil) // zero temp
+			var_ := temp(t)
+			a := Nod(OAS, var_, nil) // zero temp
 			typecheck(&a, Etop)
 			*init = list(*init, a)
-			r = Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
-			r = conv(r, n.Type)                      // in case n->type is named.
+			r := Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
+			r = conv(r, n.Type)                       // in case n->type is named.
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
 			n = r
 		} else {
 			// makeslice(t *Type, nel int64, max int64) (ary []any)
-			fn = syslook("makeslice", 1)
+			fn := syslook("makeslice", 1)
 
 			argtype(fn, t.Type) // any-1
 			n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
@@ -1492,10 +1459,10 @@ func walkexpr(np **Node, init **NodeList) {
 		goto ret
 
 	case ORUNESTR:
-		a = nodnil()
+		a := nodnil()
 		if n.Esc == EscNone {
-			t = aindex(Nodintconst(4), Types[TUINT8])
-			var_ = temp(t)
+			t := aindex(Nodintconst(4), Types[TUINT8])
+			var_ := temp(t)
 			a = Nod(OADDR, var_, nil)
 		}
 
@@ -1505,10 +1472,10 @@ func walkexpr(np **Node, init **NodeList) {
 		goto ret
 
 	case OARRAYBYTESTR:
-		a = nodnil()
+		a := nodnil()
 		if n.Esc == EscNone {
 			// Create temporary buffer for string on stack.
-			t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
 
 			a = Nod(OADDR, temp(t), nil)
 		}
@@ -1526,11 +1493,11 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// slicerunetostring(*[32]byte, []rune) string;
 	case OARRAYRUNESTR:
-		a = nodnil()
+		a := nodnil()
 
 		if n.Esc == EscNone {
 			// Create temporary buffer for string on stack.
-			t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
 
 			a = Nod(OADDR, temp(t), nil)
 		}
@@ -1540,11 +1507,11 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// stringtoslicebyte(*32[byte], string) []byte;
 	case OSTRARRAYBYTE:
-		a = nodnil()
+		a := nodnil()
 
 		if n.Esc == EscNone {
 			// Create temporary buffer for slice on stack.
-			t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
 
 			a = Nod(OADDR, temp(t), nil)
 		}
@@ -1560,11 +1527,11 @@ func walkexpr(np **Node, init **NodeList) {
 
 		// stringtoslicerune(*[32]rune, string) []rune
 	case OSTRARRAYRUNE:
-		a = nodnil()
+		a := nodnil()
 
 		if n.Esc == EscNone {
 			// Create temporary buffer for slice on stack.
-			t = aindex(Nodintconst(tmpstringbufsize), Types[TINT32])
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TINT32])
 
 			a = Nod(OADDR, temp(t), nil)
 		}
@@ -1577,6 +1544,7 @@ func walkexpr(np **Node, init **NodeList) {
 		if !Eqtype(n.Left.Type, n.Right.Type) {
 			Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), Tconv(n.Left.Type, 0), Tconv(n.Right.Type, 0))
 		}
+		var fn *Node
 		if isnilinter(n.Left.Type) {
 			fn = syslook("efaceeq", 1)
 		} else {
@@ -1587,7 +1555,7 @@ func walkexpr(np **Node, init **NodeList) {
 		n.Left = cheapexpr(n.Left, init)
 		argtype(fn, n.Right.Type)
 		argtype(fn, n.Left.Type)
-		r = mkcall1(fn, n.Type, init, n.Left, n.Right)
+		r := mkcall1(fn, n.Type, init, n.Left, n.Right)
 		if n.Etype == ONE {
 			r = Nod(ONOT, r, nil)
 		}
@@ -1608,13 +1576,13 @@ func walkexpr(np **Node, init **NodeList) {
 		OMAPLIT,
 		OSTRUCTLIT,
 		OPTRLIT:
-		var_ = temp(n.Type)
+		var_ := temp(n.Type)
 		anylit(0, n, var_, init)
 		n = var_
 		goto ret
 
 	case OSEND:
-		n1 = n.Right
+		n1 := n.Right
 		n1 = assignconv(n1, n.Left.Type.Type, "chan send")
 		walkexpr(&n1, init)
 		n1 = Nod(OADDR, n1, nil)
@@ -1638,7 +1606,7 @@ func walkexpr(np **Node, init **NodeList) {
 	// walk of y%1 may have replaced it by 0.
 	// Check whether n with its updated args is itself now a constant.
 ret:
-	t = n.Type
+	t := n.Type
 
 	evconst(n)
 	n.Type = t
@@ -1657,11 +1625,9 @@ ret:
 }
 
 func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
-	var n *Node
-
 	// convas will turn map assigns into function calls,
 	// making it impossible for reorder3 to work.
-	n = Nod(OAS, l, r)
+	n := Nod(OAS, l, r)
 
 	if l.Op == OINDEXMAP {
 		return n
@@ -1673,7 +1639,6 @@ func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
 func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
 	var ll *NodeList
 	var lr *NodeList
-	var nn *NodeList
 
 	/*
 	 * check assign expression list to
@@ -1689,7 +1654,7 @@ func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
 		lr.N = safeexpr(lr.N, init)
 	}
 
-	nn = nil
+	nn := (*NodeList)(nil)
 	ll = nl
 	lr = nr
 	for ; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
@@ -1714,12 +1679,10 @@ func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
  * in the conversion of the types
  */
 func fncall(l *Node, rt *Type) bool {
-	var r Node
-
 	if l.Ullman >= UINF || l.Op == OINDEXMAP {
 		return true
 	}
-	r = Node{}
+	r := Node{}
 	if needwritebarrier(l, &r) {
 		return true
 	}
@@ -1734,22 +1697,18 @@ func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeL
 	var tmp *Node
 	var a *Node
 	var ll *NodeList
-	var r *Type
 	var saver Iter
-	var ucount int
-	var nn *NodeList
-	var mm *NodeList
 
 	/*
 	 * check assign type list to
 	 * a expression list. called in
 	 *	expr-list = func()
 	 */
-	r = Structfirst(&saver, nr)
+	r := Structfirst(&saver, nr)
 
-	nn = nil
-	mm = nil
-	ucount = 0
+	nn := (*NodeList)(nil)
+	mm := (*NodeList)(nil)
+	ucount := 0
 	for ll = nl; ll != nil; ll = ll.Next {
 		if r == nil {
 			break
@@ -1798,20 +1757,16 @@ func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeL
 * package all the arguments that match a ... T parameter into a []T.
  */
 func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
-	var a *Node
-	var n *Node
-	var tslice *Type
-	var esc int
-
-	esc = EscUnknown
+	esc := EscUnknown
 	if ddd != nil {
 		esc = int(ddd.Esc)
 	}
 
-	tslice = typ(TARRAY)
+	tslice := typ(TARRAY)
 	tslice.Type = l.Type.Type
 	tslice.Bound = -1
 
+	var n *Node
 	if count(lr0) == 0 {
 		n = nodnil()
 		n.Type = tslice
@@ -1829,7 +1784,7 @@ func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList
 		walkexpr(&n, init)
 	}
 
-	a = Nod(OAS, nodarg(l, fp), n)
+	a := Nod(OAS, nodarg(l, fp), n)
 	nn = list(nn, convas(a, init))
 	return nn
 }
@@ -1838,15 +1793,12 @@ func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList
  * helpers for shape errors
  */
 func dumptypes(nl **Type, what string) string {
-	var first int
-	var l *Type
 	var savel Iter
-	var fmt_ string
 
-	fmt_ = ""
+	fmt_ := ""
 	fmt_ += fmt.Sprintf("\t")
-	first = 1
-	for l = Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
+	first := 1
+	for l := Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
 		if first != 0 {
 			first = 0
 		} else {
@@ -1862,13 +1814,11 @@ func dumptypes(nl **Type, what string) string {
 }
 
 func dumpnodetypes(l *NodeList, what string) string {
-	var first int
 	var r *Node
-	var fmt_ string
 
-	fmt_ = ""
+	fmt_ := ""
 	fmt_ += fmt.Sprintf("\t")
-	first = 1
+	first := 1
 	for ; l != nil; l = l.Next {
 		r = l.N
 		if first != 0 {
@@ -1892,30 +1842,25 @@ func dumpnodetypes(l *NodeList, what string) string {
  *	func(expr-list)
  */
 func ascompatte(op int, call *Node, isddd int, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
-	var l *Type
-	var ll *Type
-	var r *Node
-	var a *Node
-	var nn *NodeList
-	var lr0 *NodeList
-	var alist *NodeList
 	var savel Iter
-	var l1 string
-	var l2 string
 
-	lr0 = lr
-	l = Structfirst(&savel, nl)
-	r = nil
+	lr0 := lr
+	l := Structfirst(&savel, nl)
+	r := (*Node)(nil)
 	if lr != nil {
 		r = lr.N
 	}
-	nn = nil
+	nn := (*NodeList)(nil)
 
 	// f(g()) where g has multiple return values
+	var a *Node
+	var l2 string
+	var ll *Type
+	var l1 string
 	if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
 		// optimization - can do block copy
 		if eqtypenoname(r.Type, *nl) {
-			a = nodarg(*nl, fp)
+			a := nodarg(*nl, fp)
 			r = Nod(OCONVNOP, r, nil)
 			r.Type = a.Type
 			nn = list1(convas(Nod(OAS, a, r), init))
@@ -1924,9 +1869,9 @@ func ascompatte(op int, call *Node, isddd int, nl **Type, lr *NodeList, fp int,
 
 		// conversions involved.
 		// copy into temporaries.
-		alist = nil
+		alist := (*NodeList)(nil)
 
-		for l = Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
+		for l := Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
 			a = temp(l.Type)
 			alist = list(alist, a)
 		}
@@ -2007,26 +1952,21 @@ ret:
 func walkprint(nn *Node, init **NodeList) *Node {
 	var r *Node
 	var n *Node
-	var l *NodeList
-	var all *NodeList
 	var on *Node
 	var t *Type
-	var notfirst bool
 	var et int
-	var op int
-	var calls *NodeList
 
-	op = int(nn.Op)
-	all = nn.List
-	calls = nil
-	notfirst = false
+	op := int(nn.Op)
+	all := nn.List
+	calls := (*NodeList)(nil)
+	notfirst := false
 
 	// Hoist all the argument evaluation up before the lock.
 	walkexprlistcheap(all, init)
 
 	calls = list(calls, mkcall("printlock", nil, init))
 
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		if notfirst {
 			calls = list(calls, mkcall("printsp", nil, init))
 		}
@@ -2129,23 +2069,19 @@ func walkprint(nn *Node, init **NodeList) *Node {
 }
 
 func callnew(t *Type) *Node {
-	var fn *Node
-
 	dowidth(t)
-	fn = syslook("newobject", 1)
+	fn := syslook("newobject", 1)
 	argtype(fn, t)
 	return mkcall1(fn, Ptrto(t), nil, typename(t))
 }
 
 func isstack(n *Node) bool {
-	var defn *Node
-
 	n = outervalue(n)
 
 	// If n is *autotmp and autotmp = &foo, replace n with foo.
 	// We introduce such temps when initializing struct literals.
 	if n.Op == OIND && n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
-		defn = n.Left.Defn
+		defn := n.Left.Defn
 		if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
 			n = defn.Right.Left
 		}
@@ -2263,18 +2199,12 @@ func needwritebarrier(l *Node, r *Node) bool {
 var applywritebarrier_bv *Bvec
 
 func applywritebarrier(n *Node, init **NodeList) *Node {
-	var l *Node
-	var r *Node
-	var t *Type
-	var x int64
-	var name string
-
 	if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
 		if Curfn != nil && Curfn.Nowritebarrier {
 			Yyerror("write barrier prohibited")
 		}
-		t = n.Left.Type
-		l = Nod(OADDR, n.Left, nil)
+		t := n.Left.Type
+		l := Nod(OADDR, n.Left, nil)
 		l.Etype = 1 // addr does not escape
 		if t.Width == int64(Widthptr) {
 			n = mkcall1(writebarrierfn("writebarrierptr", t, n.Right.Type), nil, init, l, n.Right)
@@ -2285,7 +2215,7 @@ func applywritebarrier(n *Node, init **NodeList) *Node {
 		} else if Isinter(t) {
 			n = mkcall1(writebarrierfn("writebarrieriface", t, n.Right.Type), nil, init, l, n.Right)
 		} else if t.Width <= int64(4*Widthptr) {
-			x = 0
+			x := int64(0)
 			if applywritebarrier_bv == nil {
 				applywritebarrier_bv = bvalloc(obj.BitsPerPointer * 4)
 			}
@@ -2298,6 +2228,7 @@ func applywritebarrier(n *Node, init **NodeList) *Node {
 			if obj.BitsPointer != 1<<PtrBit {
 				Fatal("wrong PtrBit")
 			}
+			var name string
 			switch t.Width / int64(Widthptr) {
 			default:
 				Fatal("found writebarrierfat for %d-byte object of type %v", int(t.Width), Tconv(t, 0))
@@ -2314,7 +2245,7 @@ func applywritebarrier(n *Node, init **NodeList) *Node {
 
 			n = mkcall1(writebarrierfn(name, t, n.Right.Type), nil, init, l, nodnil(), n.Right)
 		} else {
-			r = n.Right
+			r := n.Right
 			for r.Op == OCONVNOP {
 				r = r.Left
 			}
@@ -2330,18 +2261,14 @@ func applywritebarrier(n *Node, init **NodeList) *Node {
 }
 
 func convas(n *Node, init **NodeList) *Node {
-	var lt *Type
-	var rt *Type
-	var map_ *Node
-	var key *Node
-	var val *Node
-
 	if n.Op != OAS {
 		Fatal("convas: not OAS %v", Oconv(int(n.Op), 0))
 	}
 
 	n.Typecheck = 1
 
+	var lt *Type
+	var rt *Type
 	if n.Left == nil || n.Right == nil {
 		goto out
 	}
@@ -2358,9 +2285,9 @@ func convas(n *Node, init **NodeList) *Node {
 	}
 
 	if n.Left.Op == OINDEXMAP {
-		map_ = n.Left.Left
-		key = n.Left.Right
-		val = n.Right
+		map_ := n.Left.Left
+		key := n.Left.Right
+		val := n.Right
 		walkexpr(&map_, init)
 		walkexpr(&key, init)
 		walkexpr(&val, init)
@@ -2392,20 +2319,12 @@ out:
  * make temp variables
  */
 func reorder1(all *NodeList) *NodeList {
-	var f *Node
-	var a *Node
 	var n *Node
-	var l *NodeList
-	var r *NodeList
-	var g *NodeList
-	var c int
-	var d int
-	var t int
 
-	c = 0 // function calls
-	t = 0 // total parameters
+	c := 0 // function calls
+	t := 0 // total parameters
 
-	for l = all; l != nil; l = l.Next {
+	for l := all; l != nil; l = l.Next {
 		n = l.N
 		t++
 		ullmancalc(n)
@@ -2418,11 +2337,12 @@ func reorder1(all *NodeList) *NodeList {
 		return all
 	}
 
-	g = nil // fncalls assigned to tempnames
-	f = nil // last fncall assigned to stack
-	r = nil // non fncalls and tempnames assigned to stack
-	d = 0
-	for l = all; l != nil; l = l.Next {
+	g := (*NodeList)(nil) // fncalls assigned to tempnames
+	f := (*Node)(nil)     // last fncall assigned to stack
+	r := (*NodeList)(nil) // non fncalls and tempnames assigned to stack
+	d := 0
+	var a *Node
+	for l := all; l != nil; l = l.Next {
 		n = l.N
 		if n.Ullman < UINF {
 			r = list(r, n)
@@ -2463,18 +2383,15 @@ func reorder1(all *NodeList) *NodeList {
  * function calls have been removed.
  */
 func reorder3(all *NodeList) *NodeList {
-	var list *NodeList
-	var early *NodeList
-	var mapinit *NodeList
 	var l *Node
 
 	// If a needed expression may be affected by an
 	// earlier assignment, make an early copy of that
 	// expression and use the copy instead.
-	early = nil
+	early := (*NodeList)(nil)
 
-	mapinit = nil
-	for list = all; list != nil; list = list.Next {
+	mapinit := (*NodeList)(nil)
+	for list := all; list != nil; list = list.Next {
 		l = list.N.Left
 
 		// Save subexpressions needed on left side.
@@ -2529,15 +2446,12 @@ func reorder3(all *NodeList) *NodeList {
  * replace *np with that temp.
  */
 func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
-	var n *Node
-	var q *Node
-
-	n = *np
+	n := *np
 	if !aliased(n, all, stop) {
 		return
 	}
 
-	q = temp(n.Type)
+	q := temp(n.Type)
 	q = Nod(OAS, q, n)
 	typecheck(&q, Etop)
 	*early = list(*early, q)
@@ -2574,11 +2488,6 @@ func outervalue(n *Node) *Node {
  * affected by writes in as up to but not including stop?
  */
 func aliased(n *Node, all *NodeList, stop *NodeList) bool {
-	var memwrite int
-	var varwrite int
-	var a *Node
-	var l *NodeList
-
 	if n == nil {
 		return false
 	}
@@ -2588,10 +2497,11 @@ func aliased(n *Node, all *NodeList, stop *NodeList) bool {
 	// Also record whether there are any writes to main memory.
 	// Also record whether there are any writes to variables
 	// whose addresses have been taken.
-	memwrite = 0
+	memwrite := 0
 
-	varwrite = 0
-	for l = all; l != stop; l = l.Next {
+	varwrite := 0
+	var a *Node
+	for l := all; l != stop; l = l.Next {
 		a = outervalue(l.N.Left)
 		if a.Op != ONAME {
 			memwrite = 1
@@ -2698,8 +2608,6 @@ func varexpr(n *Node) bool {
  * is the name l mentioned in r?
  */
 func vmatch2(l *Node, r *Node) bool {
-	var ll *NodeList
-
 	if r == nil {
 		return false
 	}
@@ -2718,7 +2626,7 @@ func vmatch2(l *Node, r *Node) bool {
 	if vmatch2(l, r.Right) {
 		return true
 	}
-	for ll = r.List; ll != nil; ll = ll.Next {
+	for ll := r.List; ll != nil; ll = ll.Next {
 		if vmatch2(l, ll.N) {
 			return true
 		}
@@ -2731,8 +2639,6 @@ func vmatch2(l *Node, r *Node) bool {
  * called by sinit.c
  */
 func vmatch1(l *Node, r *Node) bool {
-	var ll *NodeList
-
 	/*
 	 * isolate all left sides
 	 */
@@ -2767,7 +2673,7 @@ func vmatch1(l *Node, r *Node) bool {
 	if vmatch1(l.Right, r) {
 		return true
 	}
-	for ll = l.List; ll != nil; ll = ll.Next {
+	for ll := l.List; ll != nil; ll = ll.Next {
 		if vmatch1(ll.N, r) {
 			return true
 		}
@@ -2781,14 +2687,12 @@ func vmatch1(l *Node, r *Node) bool {
  * copies of escaped parameters to the heap.
  */
 func paramstoheap(argin **Type, out int) *NodeList {
-	var t *Type
 	var savet Iter
 	var v *Node
 	var as *Node
-	var nn *NodeList
 
-	nn = nil
-	for t = Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+	nn := (*NodeList)(nil)
+	for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
 		v = t.Nname
 		if v != nil && v.Sym != nil && v.Sym.Name[0] == '~' && v.Sym.Name[1] == 'r' { // unnamed result
 			v = nil
@@ -2831,13 +2735,11 @@ func paramstoheap(argin **Type, out int) *NodeList {
  * walk through argout parameters copying back to stack
  */
 func returnsfromheap(argin **Type) *NodeList {
-	var t *Type
 	var savet Iter
 	var v *Node
-	var nn *NodeList
 
-	nn = nil
-	for t = Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+	nn := (*NodeList)(nil)
+	for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
 		v = t.Nname
 		if v == nil || v.Class != PHEAP|PPARAMOUT {
 			continue
@@ -2854,12 +2756,9 @@ func returnsfromheap(argin **Type) *NodeList {
  * curfn's before and after lists.
  */
 func heapmoves() {
-	var nn *NodeList
-	var lno int32
-
-	lno = lineno
+	lno := lineno
 	lineno = Curfn.Lineno
-	nn = paramstoheap(getthis(Curfn.Type), 0)
+	nn := paramstoheap(getthis(Curfn.Type), 0)
 	nn = concat(nn, paramstoheap(getinarg(Curfn.Type), 0))
 	nn = concat(nn, paramstoheap(Getoutarg(Curfn.Type), 1))
 	Curfn.Enter = concat(Curfn.Enter, nn)
@@ -2869,22 +2768,17 @@ func heapmoves() {
 }
 
 func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
-	var i int
-	var n int
-	var r *Node
-	var args *NodeList
-
 	if fn.Type == nil || fn.Type.Etype != TFUNC {
 		Fatal("mkcall %v %v", Nconv(fn, 0), Tconv(fn.Type, 0))
 	}
 
-	args = nil
-	n = fn.Type.Intuple
-	for i = 0; i < n; i++ {
+	args := (*NodeList)(nil)
+	n := fn.Type.Intuple
+	for i := 0; i < n; i++ {
 		args = list(args, va[i])
 	}
 
-	r = Nod(OCALL, fn, nil)
+	r := Nod(OCALL, fn, nil)
 	r.List = args
 	if fn.Type.Outtuple > 0 {
 		typecheck(&r, Erv|Efnstruct)
@@ -2915,26 +2809,21 @@ func conv(n *Node, t *Type) *Node {
 }
 
 func chanfn(name string, n int, t *Type) *Node {
-	var fn *Node
-	var i int
-
 	if t.Etype != TCHAN {
 		Fatal("chanfn %v", Tconv(t, 0))
 	}
-	fn = syslook(name, 1)
-	for i = 0; i < n; i++ {
+	fn := syslook(name, 1)
+	for i := 0; i < n; i++ {
 		argtype(fn, t.Type)
 	}
 	return fn
 }
 
 func mapfn(name string, t *Type) *Node {
-	var fn *Node
-
 	if t.Etype != TMAP {
 		Fatal("mapfn %v", Tconv(t, 0))
 	}
-	fn = syslook(name, 1)
+	fn := syslook(name, 1)
 	argtype(fn, t.Down)
 	argtype(fn, t.Type)
 	argtype(fn, t.Down)
@@ -2943,12 +2832,10 @@ func mapfn(name string, t *Type) *Node {
 }
 
 func mapfndel(name string, t *Type) *Node {
-	var fn *Node
-
 	if t.Etype != TMAP {
 		Fatal("mapfn %v", Tconv(t, 0))
 	}
-	fn = syslook(name, 1)
+	fn := syslook(name, 1)
 	argtype(fn, t.Down)
 	argtype(fn, t.Type)
 	argtype(fn, t.Down)
@@ -2956,36 +2843,24 @@ func mapfndel(name string, t *Type) *Node {
 }
 
 func writebarrierfn(name string, l *Type, r *Type) *Node {
-	var fn *Node
-
-	fn = syslook(name, 1)
+	fn := syslook(name, 1)
 	argtype(fn, l)
 	argtype(fn, r)
 	return fn
 }
 
 func addstr(n *Node, init **NodeList) *Node {
-	var r *Node
-	var cat *Node
-	var slice *Node
-	var buf *Node
-	var args *NodeList
-	var l *NodeList
-	var c int
-	var sz int64
-	var t *Type
-
 	// orderexpr rewrote OADDSTR to have a list of strings.
-	c = count(n.List)
+	c := count(n.List)
 
 	if c < 2 {
 		Yyerror("addstr count %d too small", c)
 	}
 
-	buf = nodnil()
+	buf := nodnil()
 	if n.Esc == EscNone {
-		sz = 0
-		for l = n.List; l != nil; l = l.Next {
+		sz := int64(0)
+		for l := n.List; l != nil; l = l.Next {
 			if n.Op == OLITERAL {
 				sz += int64(len(n.Val.U.Sval.S))
 			}
@@ -2994,16 +2869,16 @@ func addstr(n *Node, init **NodeList) *Node {
 		// Don't allocate the buffer if the result won't fit.
 		if sz < tmpstringbufsize {
 			// Create temporary buffer for result string on stack.
-			t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
 
 			buf = Nod(OADDR, temp(t), nil)
 		}
 	}
 
 	// build list of string arguments
-	args = list1(buf)
+	args := list1(buf)
 
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		args = list(args, conv(l.N, Types[TSTRING]))
 	}
 
@@ -3015,10 +2890,10 @@ func addstr(n *Node, init **NodeList) *Node {
 		// large numbers of strings are passed to the runtime as a slice.
 		namebuf = "concatstrings"
 
-		t = typ(TARRAY)
+		t := typ(TARRAY)
 		t.Type = Types[TSTRING]
 		t.Bound = -1
-		slice = Nod(OCOMPLIT, nil, typenod(t))
+		slice := Nod(OCOMPLIT, nil, typenod(t))
 		slice.Alloc = n.Alloc
 		slice.List = args.Next // skip buf arg
 		args = list1(buf)
@@ -3026,8 +2901,8 @@ func addstr(n *Node, init **NodeList) *Node {
 		slice.Esc = EscNone
 	}
 
-	cat = syslook(namebuf, 1)
-	r = Nod(OCALL, cat, nil)
+	cat := syslook(namebuf, 1)
+	r := Nod(OCALL, cat, nil)
 	r.List = args
 	typecheck(&r, Erv)
 	walkexpr(&r, init)
@@ -3049,36 +2924,25 @@ func addstr(n *Node, init **NodeList) *Node {
 //
 // l2 is allowed to be a string.
 func appendslice(n *Node, init **NodeList) *Node {
-	var l *NodeList
-	var l1 *Node
-	var l2 *Node
-	var nt *Node
-	var nif *Node
-	var fn *Node
-	var nptr1 *Node
-	var nptr2 *Node
-	var nwid *Node
-	var s *Node
-
 	walkexprlistsafe(n.List, init)
 
 	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
 	// and n are name or literal, but those may index the slice we're
 	// modifying here.  Fix explicitly.
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		l.N = cheapexpr(l.N, init)
 	}
 
-	l1 = n.List.N
-	l2 = n.List.Next.N
+	l1 := n.List.N
+	l2 := n.List.Next.N
 
-	s = temp(l1.Type) // var s []T
-	l = nil
+	s := temp(l1.Type) // var s []T
+	l := (*NodeList)(nil)
 	l = list(l, Nod(OAS, s, l1)) // s = l1
 
-	nt = temp(Types[TINT])
+	nt := temp(Types[TINT])
 
-	nif = Nod(OIF, nil, nil)
+	nif := Nod(OIF, nil, nil)
 
 	// n := len(s) + len(l2) - cap(s)
 	nif.Ninit = list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil))))
@@ -3086,7 +2950,7 @@ func appendslice(n *Node, init **NodeList) *Node {
 	nif.Ntest = Nod(OGT, nt, Nodintconst(0))
 
 	// instantiate growslice(Type*, []any, int64) []any
-	fn = syslook("growslice", 1)
+	fn := syslook("growslice", 1)
 
 	argtype(fn, s.Type.Type)
 	argtype(fn, s.Type.Type)
@@ -3098,22 +2962,23 @@ func appendslice(n *Node, init **NodeList) *Node {
 
 	if haspointers(l1.Type.Type) {
 		// copy(s[len(l1):len(l1)+len(l2)], l2)
-		nptr1 = Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+		nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
 
 		nptr1.Etype = 1
-		nptr2 = l2
-		fn = syslook("typedslicecopy", 1)
+		nptr2 := l2
+		fn := syslook("typedslicecopy", 1)
 		argtype(fn, l1.Type)
 		argtype(fn, l2.Type)
-		nt = mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
+		nt := mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
 		l = list(l, nt)
 	} else if flag_race != 0 {
 		// rely on runtime to instrument copy.
 		// copy(s[len(l1):len(l1)+len(l2)], l2)
-		nptr1 = Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+		nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
 
 		nptr1.Etype = 1
-		nptr2 = l2
+		nptr2 := l2
+		var fn *Node
 		if l2.Type.Etype == TSTRING {
 			fn = syslook("slicestringcopy", 1)
 		} else {
@@ -3121,25 +2986,25 @@ func appendslice(n *Node, init **NodeList) *Node {
 		}
 		argtype(fn, l1.Type)
 		argtype(fn, l2.Type)
-		nt = mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
+		nt := mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
 		l = list(l, nt)
 	} else {
 		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-		nptr1 = Nod(OINDEX, s, Nod(OLEN, l1, nil))
+		nptr1 := Nod(OINDEX, s, Nod(OLEN, l1, nil))
 
 		nptr1.Bounded = true
 		nptr1 = Nod(OADDR, nptr1, nil)
 
-		nptr2 = Nod(OSPTR, l2, nil)
+		nptr2 := Nod(OSPTR, l2, nil)
 
-		fn = syslook("memmove", 1)
+		fn := syslook("memmove", 1)
 		argtype(fn, s.Type.Type) // 1 old []any
 		argtype(fn, s.Type.Type) // 2 ret []any
 
-		nwid = cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
+		nwid := cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
 
 		nwid = Nod(OMUL, nwid, Nodintconst(s.Type.Type.Width))
-		nt = mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
+		nt := mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
 		l = list(l, nt)
 	}
 
@@ -3172,61 +3037,51 @@ func appendslice(n *Node, init **NodeList) *Node {
 //   }
 //   s
 func walkappend(n *Node, init **NodeList) *Node {
-	var l *NodeList
-	var a *NodeList
-	var nsrc *Node
-	var ns *Node
-	var nn *Node
-	var na *Node
-	var nx *Node
-	var fn *Node
-	var argc int
-
 	walkexprlistsafe(n.List, init)
 
 	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
 	// and n are name or literal, but those may index the slice we're
 	// modifying here.  Fix explicitly.
-	for l = n.List; l != nil; l = l.Next {
+	for l := n.List; l != nil; l = l.Next {
 		l.N = cheapexpr(l.N, init)
 	}
 
-	nsrc = n.List.N
+	nsrc := n.List.N
 
 	// Resolve slice type of multi-valued return.
 	if Istype(nsrc.Type, TSTRUCT) {
 		nsrc.Type = nsrc.Type.Type.Type
 	}
-	argc = count(n.List) - 1
+	argc := count(n.List) - 1
 	if argc < 1 {
 		return nsrc
 	}
 
-	l = nil
+	l := (*NodeList)(nil)
 
-	ns = temp(nsrc.Type)
+	ns := temp(nsrc.Type)
 	l = list(l, Nod(OAS, ns, nsrc)) // s = src
 
-	na = Nodintconst(int64(argc)) // const argc
-	nx = Nod(OIF, nil, nil)       // if cap(s) - len(s) < argc
+	na := Nodintconst(int64(argc)) // const argc
+	nx := Nod(OIF, nil, nil)       // if cap(s) - len(s) < argc
 	nx.Ntest = Nod(OLT, Nod(OSUB, Nod(OCAP, ns, nil), Nod(OLEN, ns, nil)), na)
 
-	fn = syslook("growslice", 1) //   growslice(<type>, old []T, n int64) (ret []T)
-	argtype(fn, ns.Type.Type)    // 1 old []any
-	argtype(fn, ns.Type.Type)    // 2 ret []any
+	fn := syslook("growslice", 1) //   growslice(<type>, old []T, n int64) (ret []T)
+	argtype(fn, ns.Type.Type)     // 1 old []any
+	argtype(fn, ns.Type.Type)     // 2 ret []any
 
 	nx.Nbody = list1(Nod(OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, conv(na, Types[TINT64]))))
 
 	l = list(l, nx)
 
-	nn = temp(Types[TINT])
+	nn := temp(Types[TINT])
 	l = list(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
 
 	nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc]
 	nx.Etype = 1
 	l = list(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
 
-	for a = n.List.Next; a != nil; a = a.Next {
+	for a := n.List.Next; a != nil; a = a.Next {
 		nx = Nod(OINDEX, ns, nn) // s[n] ...
 		nx.Bounded = true
 		l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
@@ -3253,22 +3108,13 @@ func walkappend(n *Node, init **NodeList) *Node {
 // Also works if b is a string.
 //
 func copyany(n *Node, init **NodeList, runtimecall int) *Node {
-	var nl *Node
-	var nr *Node
-	var nfrm *Node
-	var nto *Node
-	var nif *Node
-	var nlen *Node
-	var nwid *Node
-	var fn *Node
-	var l *NodeList
-
 	if haspointers(n.Left.Type.Type) {
-		fn = writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
+		fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
 		return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
 	}
 
 	if runtimecall != 0 {
+		var fn *Node
 		if n.Right.Type.Etype == TSTRING {
 			fn = syslook("slicestringcopy", 1)
 		} else {
@@ -3281,33 +3127,33 @@ func copyany(n *Node, init **NodeList, runtimecall int) *Node {
 
 	walkexpr(&n.Left, init)
 	walkexpr(&n.Right, init)
-	nl = temp(n.Left.Type)
-	nr = temp(n.Right.Type)
-	l = nil
+	nl := temp(n.Left.Type)
+	nr := temp(n.Right.Type)
+	l := (*NodeList)(nil)
 	l = list(l, Nod(OAS, nl, n.Left))
 	l = list(l, Nod(OAS, nr, n.Right))
 
-	nfrm = Nod(OSPTR, nr, nil)
-	nto = Nod(OSPTR, nl, nil)
+	nfrm := Nod(OSPTR, nr, nil)
+	nto := Nod(OSPTR, nl, nil)
 
-	nlen = temp(Types[TINT])
+	nlen := temp(Types[TINT])
 
 	// n = len(to)
 	l = list(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
 
 	// if n > len(frm) { n = len(frm) }
-	nif = Nod(OIF, nil, nil)
+	nif := Nod(OIF, nil, nil)
 
 	nif.Ntest = Nod(OGT, nlen, Nod(OLEN, nr, nil))
 	nif.Nbody = list(nif.Nbody, Nod(OAS, nlen, Nod(OLEN, nr, nil)))
 	l = list(l, nif)
 
 	// Call memmove.
-	fn = syslook("memmove", 1)
+	fn := syslook("memmove", 1)
 
 	argtype(fn, nl.Type.Type)
 	argtype(fn, nl.Type.Type)
-	nwid = temp(Types[TUINTPTR])
+	nwid := temp(Types[TUINTPTR])
 	l = list(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
 	nwid = Nod(OMUL, nwid, Nodintconst(nl.Type.Type.Width))
 	l = list(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
@@ -3321,30 +3167,15 @@ func copyany(n *Node, init **NodeList, runtimecall int) *Node {
 // Generate frontend part for OSLICE[3][ARR|STR]
 //
 func sliceany(n *Node, init **NodeList) *Node {
-	var bounded int
-	var slice3 bool
-	var src *Node
-	var lb *Node
 	var hb *Node
 	var cb *Node
-	var bound *Node
-	var chk *Node
-	var chk0 *Node
-	var chk1 *Node
-	var chk2 *Node
-	var lbv int64
-	var hbv int64
-	var cbv int64
-	var bv int64
-	var w int64
-	var bt *Type
 
 	//	print("before sliceany: %+N\n", n);
 
-	src = n.Left
+	src := n.Left
 
-	lb = n.Right.Left
-	slice3 = n.Op == OSLICE3 || n.Op == OSLICE3ARR
+	lb := n.Right.Left
+	slice3 := n.Op == OSLICE3 || n.Op == OSLICE3ARR
 	if slice3 {
 		hb = n.Right.Right.Left
 		cb = n.Right.Right.Right
@@ -3353,8 +3184,9 @@ func sliceany(n *Node, init **NodeList) *Node {
 		cb = nil
 	}
 
-	bounded = int(n.Etype)
+	bounded := int(n.Etype)
 
+	var bound *Node
 	if n.Op == OSLICESTR {
 		bound = Nod(OLEN, src, nil)
 	} else {
@@ -3365,7 +3197,7 @@ func sliceany(n *Node, init **NodeList) *Node {
 	walkexpr(&bound, init) // if src is an array, bound will be a const now.
 
 	// static checks if possible
-	bv = 1 << 50
+	bv := int64(1 << 50)
 
 	if Isconst(bound, CTINT) {
 		if !Smallintconst(bound) {
@@ -3376,21 +3208,21 @@ func sliceany(n *Node, init **NodeList) *Node {
 	}
 
 	if Isconst(cb, CTINT) {
-		cbv = Mpgetfix(cb.Val.U.Xval)
+		cbv := Mpgetfix(cb.Val.U.Xval)
 		if cbv < 0 || cbv > bv {
 			Yyerror("slice index out of bounds")
 		}
 	}
 
 	if Isconst(hb, CTINT) {
-		hbv = Mpgetfix(hb.Val.U.Xval)
+		hbv := Mpgetfix(hb.Val.U.Xval)
 		if hbv < 0 || hbv > bv {
 			Yyerror("slice index out of bounds")
 		}
 	}
 
 	if Isconst(lb, CTINT) {
-		lbv = Mpgetfix(lb.Val.U.Xval)
+		lbv := Mpgetfix(lb.Val.U.Xval)
 		if lbv < 0 || lbv > bv {
 			Yyerror("slice index out of bounds")
 			lbv = -1
@@ -3403,14 +3235,12 @@ func sliceany(n *Node, init **NodeList) *Node {
 
 	// Checking src[lb:hb:cb] or src[lb:hb].
 	// if chk0 || chk1 || chk2 { panicslice() }
-	chk = nil
-
-	chk0 = nil // cap(src) < cb
-	chk1 = nil // cb < hb for src[lb:hb:cb]; cap(src) < hb for src[lb:hb]
-	chk2 = nil // hb < lb
+	chk0 := (*Node)(nil) // cap(src) < cb
+	chk1 := (*Node)(nil) // cb < hb for src[lb:hb:cb]; cap(src) < hb for src[lb:hb]
+	chk2 := (*Node)(nil) // hb < lb
 
 	// All comparisons are unsigned to avoid testing < 0.
-	bt = Types[Simtype[TUINT]]
+	bt := Types[Simtype[TUINT]]
 
 	if cb != nil && cb.Type.Width > 4 {
 		bt = Types[TUINT64]
@@ -3463,7 +3293,7 @@ func sliceany(n *Node, init **NodeList) *Node {
 	}
 
 	if chk0 != nil || chk1 != nil || chk2 != nil {
-		chk = Nod(OIF, nil, nil)
+		chk := Nod(OIF, nil, nil)
 		chk.Nbody = list1(mkcall("panicslice", nil, init))
 		chk.Likely = -1
 		if chk0 != nil {
@@ -3521,6 +3351,7 @@ func sliceany(n *Node, init **NodeList) *Node {
 
 	// offs = [width *] lo, but omit if zero
 	if lb != nil {
+		var w int64
 		if n.Op == OSLICESTR {
 			w = 1
 		} else {
@@ -3541,33 +3372,28 @@ func sliceany(n *Node, init **NodeList) *Node {
 }
 
 func eqfor(t *Type, needsize *int) *Node {
-	var a int
-	var n *Node
-	var ntype *Node
-	var sym *Sym
-
 	// Should only arrive here with large memory or
 	// a struct/array containing a non-memory field/element.
 	// Small memory is handled inline, and single non-memory
 	// is handled during type check (OCMPSTR etc).
-	a = algtype1(t, nil)
+	a := algtype1(t, nil)
 
 	if a != AMEM && a != -1 {
 		Fatal("eqfor %v", Tconv(t, 0))
 	}
 
 	if a == AMEM {
-		n = syslook("memequal", 1)
+		n := syslook("memequal", 1)
 		argtype(n, t)
 		argtype(n, t)
 		*needsize = 1
 		return n
 	}
 
-	sym = typesymprefix(".eq", t)
-	n = newname(sym)
+	sym := typesymprefix(".eq", t)
+	n := newname(sym)
 	n.Class = PFUNC
-	ntype = Nod(OTFUNC, nil, nil)
+	ntype := Nod(OTFUNC, nil, nil)
 	ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
 	ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
 	ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
@@ -3578,36 +3404,15 @@ func eqfor(t *Type, needsize *int) *Node {
 }
 
 func countfield(t *Type) int {
-	var t1 *Type
-	var n int
-
-	n = 0
-	for t1 = t.Type; t1 != nil; t1 = t1.Down {
+	n := 0
+	for t1 := t.Type; t1 != nil; t1 = t1.Down {
 		n++
 	}
 	return n
 }
 
 func walkcompare(np **Node, init **NodeList) {
-	var n *Node
-	var l *Node
-	var r *Node
-	var call *Node
-	var a *Node
-	var li *Node
-	var ri *Node
-	var expr *Node
-	var cmpl *Node
-	var cmpr *Node
-	var x *Node
-	var ok *Node
-	var andor int
-	var i int
-	var needsize int
-	var t *Type
-	var t1 *Type
-
-	n = *np
+	n := *np
 
 	// Given interface value l and concrete value r, rewrite
 	//   l == r
@@ -3616,9 +3421,9 @@ func walkcompare(np **Node, init **NodeList) {
 	// Handle != similarly.
 	// This avoids the allocation that would be required
 	// to convert r to l for comparison.
-	l = nil
+	l := (*Node)(nil)
 
-	r = nil
+	r := (*Node)(nil)
 	if Isinter(n.Left.Type) && !Isinter(n.Right.Type) {
 		l = n.Left
 		r = n.Right
@@ -3627,17 +3432,25 @@ func walkcompare(np **Node, init **NodeList) {
 		r = n.Left
 	}
 
+	var call *Node
+	var a *Node
+	var cmpl *Node
+	var cmpr *Node
+	var andor int
+	var expr *Node
+	var needsize int
+	var t *Type
 	if l != nil {
-		x = temp(r.Type)
-		ok = temp(Types[TBOOL])
+		x := temp(r.Type)
+		ok := temp(Types[TBOOL])
 
 		// l.(type(r))
-		a = Nod(ODOTTYPE, l, nil)
+		a := Nod(ODOTTYPE, l, nil)
 
 		a.Type = r.Type
 
 		// x, ok := l.(type(r))
-		expr = Nod(OAS2, nil, nil)
+		expr := Nod(OAS2, nil, nil)
 
 		expr.List = list1(x)
 		expr.List = list(expr.List, ok)
@@ -3705,7 +3518,9 @@ func walkcompare(np **Node, init **NodeList) {
 	if t.Etype == TARRAY && t.Bound <= 4 && issimple[t.Type.Etype] != 0 {
 		// Four or fewer elements of a basic type.
 		// Unroll comparisons.
-		for i = 0; int64(i) < t.Bound; i++ {
+		var li *Node
+		var ri *Node
+		for i := 0; int64(i) < t.Bound; i++ {
 			li = Nod(OINDEX, l, Nodintconst(int64(i)))
 			ri = Nod(OINDEX, r, Nodintconst(int64(i)))
 			a = Nod(int(n.Op), li, ri)
@@ -3726,7 +3541,9 @@ func walkcompare(np **Node, init **NodeList) {
 	if t.Etype == TSTRUCT && countfield(t) <= 4 {
 		// Struct of four or fewer fields.
 		// Inline comparisons.
-		for t1 = t.Type; t1 != nil; t1 = t1.Down {
+		var li *Node
+		var ri *Node
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
 			if isblanksym(t1.Sym) {
 				continue
 			}
@@ -3809,24 +3626,16 @@ func samecheap(a *Node, b *Node) bool {
 }
 
 func walkrotate(np **Node) {
-	var w int
-	var sl int
-	var sr int
-	var s int
-	var l *Node
-	var r *Node
-	var n *Node
-
 	if Thearch.Thechar == '9' {
 		return
 	}
 
-	n = *np
+	n := *np
 
 	// Want << | >> or >> | << or << ^ >> or >> ^ << on unsigned value.
-	l = n.Left
+	l := n.Left
 
-	r = n.Right
+	r := n.Right
 	if (n.Op != OOR && n.Op != OXOR) || (l.Op != OLSH && l.Op != ORSH) || (r.Op != OLSH && r.Op != ORSH) || n.Type == nil || Issigned[n.Type.Etype] != 0 || l.Op == r.Op {
 		return
 	}
@@ -3837,12 +3646,12 @@ func walkrotate(np **Node) {
 	}
 
 	// Constants adding to width?
-	w = int(l.Type.Width * 8)
+	w := int(l.Type.Width * 8)
 
 	if Smallintconst(l.Right) && Smallintconst(r.Right) {
-		sl = int(Mpgetfix(l.Right.Val.U.Xval))
+		sl := int(Mpgetfix(l.Right.Val.U.Xval))
 		if sl >= 0 {
-			sr = int(Mpgetfix(r.Right.Val.U.Xval))
+			sr := int(Mpgetfix(r.Right.Val.U.Xval))
 			if sr >= 0 && sl+sr == w {
 				goto yes
 			}
@@ -3863,7 +3672,7 @@ yes:
 	n.Op = OLROT
 
 	// Remove rotate 0 and rotate w.
-	s = int(Mpgetfix(n.Right.Val.U.Xval))
+	s := int(Mpgetfix(n.Right.Val.U.Xval))
 
 	if s == 0 || s == w {
 		n = n.Left
@@ -3877,18 +3686,13 @@ yes:
  * walkmul rewrites integer multiplication by powers of two as shifts.
  */
 func walkmul(np **Node, init **NodeList) {
-	var n *Node
-	var nl *Node
-	var nr *Node
-	var pow int
-	var neg int
-	var w int
-
-	n = *np
+	n := *np
 	if Isint[n.Type.Etype] == 0 {
 		return
 	}
 
+	var nr *Node
+	var nl *Node
 	if n.Right.Op == OLITERAL {
 		nl = n.Left
 		nr = n.Right
@@ -3899,9 +3703,11 @@ func walkmul(np **Node, init **NodeList) {
 		return
 	}
 
-	neg = 0
+	neg := 0
 
 	// x*0 is 0 (and side effects of x).
+	var pow int
+	var w int
 	if Mpgetfix(nr.Val.U.Xval) == 0 {
 		cheapexpr(nl, init)
 		Nodconst(n, n.Type, 0)
@@ -3952,42 +3758,29 @@ ret:
  * operations.
  */
 func walkdiv(np **Node, init **NodeList) {
-	var n *Node
-	var nl *Node
-	var nr *Node
 	// if >= 0, nr is 1<<pow // 1 if nr is negative.
-	var nc *Node
-	var n1 *Node
-	var n2 *Node
-	var n3 *Node
-	var n4 *Node
-	var pow int
-	var s int
-	var w int
-	var twide *Type
-	var m Magic
 
 	// TODO(minux)
 	if Thearch.Thechar == '9' {
 		return
 	}
 
-	n = *np
+	n := *np
 	if n.Right.Op != OLITERAL {
 		return
 	}
 
 	// nr is a constant.
-	nl = cheapexpr(n.Left, init)
+	nl := cheapexpr(n.Left, init)
 
-	nr = n.Right
+	nr := n.Right
 
 	// special cases of mod/div
 	// by a constant
-	w = int(nl.Type.Width * 8)
+	w := int(nl.Type.Width * 8)
 
-	s = 0
-	pow = powtwo(nr)
+	s := 0
+	pow := powtwo(nr)
 	if pow >= 1000 {
 		// negative power of 2
 		s = 1
@@ -4000,6 +3793,9 @@ func walkdiv(np **Node, init **NodeList) {
 		return
 	}
 
+	var n1 *Node
+	var m Magic
+	var n2 *Node
 	if pow < 0 {
 		goto divbymul
 	}
@@ -4025,32 +3821,32 @@ func walkdiv(np **Node, init **NodeList) {
 				// signed modulo 2^pow is like ANDing
 				// with the last pow bits, but if nl < 0,
 				// nl & (2^pow-1) is (nl+1)%2^pow - 1.
-				nc = Nod(OXXX, nil, nil)
+				nc := Nod(OXXX, nil, nil)
 
 				Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
-				n1 = Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+				n1 := Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
 				if pow == 1 {
 					typecheck(&n1, Erv)
 					n1 = cheapexpr(n1, init)
 
 					// n = (nl+ε)&1 -ε where ε=1 iff nl<0.
-					n2 = Nod(OSUB, nl, n1)
+					n2 := Nod(OSUB, nl, n1)
 
-					nc = Nod(OXXX, nil, nil)
+					nc := Nod(OXXX, nil, nil)
 					Nodconst(nc, nl.Type, 1)
-					n3 = Nod(OAND, n2, nc)
+					n3 := Nod(OAND, n2, nc)
 					n = Nod(OADD, n3, n1)
 				} else {
 					// n = (nl+ε)&(nr-1) - ε where ε=2^pow-1 iff nl<0.
-					nc = Nod(OXXX, nil, nil)
+					nc := Nod(OXXX, nil, nil)
 
 					Nodconst(nc, nl.Type, (1<<uint(pow))-1)
-					n2 = Nod(OAND, n1, nc) // n2 = 2^pow-1 iff nl<0.
+					n2 := Nod(OAND, n1, nc) // n2 = 2^pow-1 iff nl<0.
 					typecheck(&n2, Erv)
 					n2 = cheapexpr(n2, init)
 
-					n3 = Nod(OADD, nl, n2)
-					n4 = Nod(OAND, n3, nc)
+					n3 := Nod(OADD, nl, n2)
+					n4 := Nod(OAND, n3, nc)
 					n = Nod(OSUB, n4, n2)
 				}
 
@@ -4059,19 +3855,19 @@ func walkdiv(np **Node, init **NodeList) {
 				// arithmetic right shift does not give the correct rounding.
 				// if nl >= 0, nl >> n == nl / nr
 				// if nl < 0, we want to add 2^n-1 first.
-				nc = Nod(OXXX, nil, nil)
+				nc := Nod(OXXX, nil, nil)
 
 				Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
-				n1 = Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+				n1 := Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
 				if pow == 1 {
 					// nl+1 is nl-(-1)
 					n.Left = Nod(OSUB, nl, n1)
 				} else {
 					// Do a logical right right on -1 to keep pow bits.
-					nc = Nod(OXXX, nil, nil)
+					nc := Nod(OXXX, nil, nil)
 
 					Nodconst(nc, Types[Simtype[TUINT]], int64(w)-int64(pow))
-					n2 = Nod(ORSH, conv(n1, tounsigned(nl.Type)), nc)
+					n2 := Nod(ORSH, conv(n1, tounsigned(nl.Type)), nc)
 					n.Left = Nod(OADD, nl, conv(n2, nl.Type))
 				}
 
@@ -4090,7 +3886,7 @@ func walkdiv(np **Node, init **NodeList) {
 			break
 		}
 
-		nc = Nod(OXXX, nil, nil)
+		nc := Nod(OXXX, nil, nil)
 		if n.Op == OMOD {
 			// n = nl & (nr-1)
 			n.Op = OAND
@@ -4141,14 +3937,15 @@ divbymul:
 	case TUINT8,
 		TUINT16,
 		TUINT32:
-		nc = Nod(OXXX, nil, nil)
+		nc := Nod(OXXX, nil, nil)
 
 		Nodconst(nc, nl.Type, int64(m.Um))
-		n1 = Nod(OMUL, nl, nc)
+		n1 := Nod(OMUL, nl, nc)
 		typecheck(&n1, Erv)
 		n1.Op = OHMUL
 		if m.Ua != 0 {
 			// Select a Go type with (at least) twice the width.
+			var twide *Type
 			switch Simtype[nl.Type.Etype] {
 			default:
 				return
@@ -4170,16 +3967,16 @@ divbymul:
 
 			// add numerator (might overflow).
 			// n2 = (n1 + nl)
-			n2 = Nod(OADD, conv(n1, twide), conv(nl, twide))
+			n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
 
 			// shift by m.s
-			nc = Nod(OXXX, nil, nil)
+			nc := Nod(OXXX, nil, nil)
 
 			Nodconst(nc, Types[TUINT], int64(m.S))
 			n = conv(Nod(ORSH, n2, nc), nl.Type)
 		} else {
 			// n = n1 >> m.s
-			nc = Nod(OXXX, nil, nil)
+			nc := Nod(OXXX, nil, nil)
 
 			Nodconst(nc, Types[TUINT], int64(m.S))
 			n = Nod(ORSH, n1, nc)
@@ -4189,10 +3986,10 @@ divbymul:
 	case TINT8,
 		TINT16,
 		TINT32:
-		nc = Nod(OXXX, nil, nil)
+		nc := Nod(OXXX, nil, nil)
 
 		Nodconst(nc, nl.Type, m.Sm)
-		n1 = Nod(OMUL, nl, nc)
+		n1 := Nod(OMUL, nl, nc)
 		typecheck(&n1, Erv)
 		n1.Op = OHMUL
 		if m.Sm < 0 {
@@ -4204,13 +4001,13 @@ divbymul:
 		nc = Nod(OXXX, nil, nil)
 
 		Nodconst(nc, Types[TUINT], int64(m.S))
-		n2 = conv(Nod(ORSH, n1, nc), nl.Type)
+		n2 := conv(Nod(ORSH, n1, nc), nl.Type)
 
 		// add 1 iff n1 is negative.
 		nc = Nod(OXXX, nil, nil)
 
 		Nodconst(nc, Types[TUINT], int64(w)-1)
-		n3 = Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+		n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
 		n = Nod(OSUB, n2, n3)
 
 		// apply sign.
@@ -4237,25 +4034,21 @@ ret:
 
 // return 1 if integer n must be in range [0, max), 0 otherwise
 func bounded(n *Node, max int64) bool {
-	var v int64
-	var bits int32
-	var sign int
-
 	if n.Type == nil || Isint[n.Type.Etype] == 0 {
 		return false
 	}
 
-	sign = int(Issigned[n.Type.Etype])
-	bits = int32(8 * n.Type.Width)
+	sign := int(Issigned[n.Type.Etype])
+	bits := int32(8 * n.Type.Width)
 
 	if Smallintconst(n) {
-		v = Mpgetfix(n.Val.U.Xval)
+		v := Mpgetfix(n.Val.U.Xval)
 		return 0 <= v && v < max
 	}
 
 	switch n.Op {
 	case OAND:
-		v = -1
+		v := int64(-1)
 		if Smallintconst(n.Left) {
 			v = Mpgetfix(n.Left.Val.U.Xval)
 		} else if Smallintconst(n.Right) {
@@ -4268,7 +4061,7 @@ func bounded(n *Node, max int64) bool {
 
 	case OMOD:
 		if sign == 0 && Smallintconst(n.Right) {
-			v = Mpgetfix(n.Right.Val.U.Xval)
+			v := Mpgetfix(n.Right.Val.U.Xval)
 			if 0 <= v && v <= max {
 				return true
 			}
@@ -4276,7 +4069,7 @@ func bounded(n *Node, max int64) bool {
 
 	case ODIV:
 		if sign == 0 && Smallintconst(n.Right) {
-			v = Mpgetfix(n.Right.Val.U.Xval)
+			v := Mpgetfix(n.Right.Val.U.Xval)
 			for bits > 0 && v >= 2 {
 				bits--
 				v >>= 1
@@ -4285,7 +4078,7 @@ func bounded(n *Node, max int64) bool {
 
 	case ORSH:
 		if sign == 0 && Smallintconst(n.Right) {
-			v = Mpgetfix(n.Right.Val.U.Xval)
+			v := Mpgetfix(n.Right.Val.U.Xval)
 			if v > int64(bits) {
 				return true
 			}
@@ -4301,9 +4094,6 @@ func bounded(n *Node, max int64) bool {
 }
 
 func usefield(n *Node) {
-	var field *Type
-	var l *Type
-
 	if obj.Fieldtrack_enabled == 0 {
 		return
 	}
@@ -4317,7 +4107,7 @@ func usefield(n *Node) {
 		break
 	}
 
-	field = n.Paramfld
+	field := n.Paramfld
 	if field == nil {
 		Fatal("usefield %v %v without paramfld", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
 	}
@@ -4341,7 +4131,7 @@ func usefield(n *Node) {
 		Yyerror("tracked field must be exported (upper case)")
 	}
 
-	l = typ(0)
+	l := typ(0)
 	l.Type = field
 	l.Down = Curfn.Paramfld
 	Curfn.Paramfld = l
@@ -4462,17 +4252,7 @@ func candiscard(n *Node) bool {
 var walkprintfunc_prgen int
 
 func walkprintfunc(np **Node, init **NodeList) {
-	var n *Node
-	var a *Node
-	var fn *Node
-	var t *Node
-	var oldfn *Node
-	var l *NodeList
-	var printargs *NodeList
-	var num int
-	var buf string
-
-	n = *np
+	n := *np
 
 	if n.Ninit != nil {
 		walkstmtlist(n.Ninit)
@@ -4480,10 +4260,12 @@ func walkprintfunc(np **Node, init **NodeList) {
 		n.Ninit = nil
 	}
 
-	t = Nod(OTFUNC, nil, nil)
-	num = 0
-	printargs = nil
-	for l = n.List; l != nil; l = l.Next {
+	t := Nod(OTFUNC, nil, nil)
+	num := 0
+	printargs := (*NodeList)(nil)
+	var a *Node
+	var buf string
+	for l := n.List; l != nil; l = l.Next {
 		buf = fmt.Sprintf("a%d", num)
 		num++
 		a = Nod(ODCLFIELD, newname(Lookup(buf)), typenod(l.N.Type))
@@ -4491,7 +4273,7 @@ func walkprintfunc(np **Node, init **NodeList) {
 		printargs = list(printargs, a.Left)
 	}
 
-	fn = Nod(ODCLFUNC, nil, nil)
+	fn := Nod(ODCLFUNC, nil, nil)
 	walkprintfunc_prgen++
 	buf = fmt.Sprintf("print·%d", walkprintfunc_prgen)
 	fn.Nname = newname(Lookup(buf))
@@ -4499,7 +4281,7 @@ func walkprintfunc(np **Node, init **NodeList) {
 	fn.Nname.Ntype = t
 	declare(fn.Nname, PFUNC)
 
-	oldfn = Curfn
+	oldfn := Curfn
 	Curfn = nil
 	funchdr(fn)
 
diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go
index 66995a3cd7b4392e87b40781faa19bbcd45206d2..39e1ce527c65ac316739dcb93bffd221b5482eff 100644
--- a/src/cmd/internal/obj/data.go
+++ b/src/cmd/internal/obj/data.go
@@ -41,8 +41,7 @@ func mangle(file string) {
 }
 
 func Symgrow(ctxt *Link, s *LSym, lsiz int64) {
-	var siz int
-	siz = int(lsiz)
+	siz := int(lsiz)
 	if int64(siz) != lsiz {
 		log.Fatal("Symgrow size %d too long", lsiz)
 	}
@@ -143,9 +142,7 @@ func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
 }
 
 func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
-	var off int64
-
-	off = s.Size
+	off := s.Size
 	Setuintxx(ctxt, s, off, v, int64(wid))
 	return off
 }
@@ -183,17 +180,14 @@ func setuint64(ctxt *Link, s *LSym, r int64, v uint64) int64 {
 }
 
 func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
-	var i int64
-	var r *Reloc
-
 	if s.Type == 0 {
 		s.Type = SDATA
 	}
 	s.Reachable = 1
-	i = s.Size
+	i := s.Size
 	s.Size += int64(ctxt.Arch.Ptrsize)
 	Symgrow(ctxt, s, s.Size)
-	r = Addrel(s)
+	r := Addrel(s)
 	r.Sym = t
 	r.Off = int32(i)
 	r.Siz = uint8(ctxt.Arch.Ptrsize)
@@ -203,17 +197,14 @@ func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
 }
 
 func addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
-	var i int64
-	var r *Reloc
-
 	if s.Type == 0 {
 		s.Type = SDATA
 	}
 	s.Reachable = 1
-	i = s.Size
+	i := s.Size
 	s.Size += 4
 	Symgrow(ctxt, s, s.Size)
-	r = Addrel(s)
+	r := Addrel(s)
 	r.Sym = t
 	r.Off = int32(i)
 	r.Add = add
@@ -227,8 +218,6 @@ func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
 }
 
 func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
-	var r *Reloc
-
 	if s.Type == 0 {
 		s.Type = SDATA
 	}
@@ -238,7 +227,7 @@ func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
 		Symgrow(ctxt, s, s.Size)
 	}
 
-	r = Addrel(s)
+	r := Addrel(s)
 	r.Sym = t
 	r.Off = int32(off)
 	r.Siz = uint8(ctxt.Arch.Ptrsize)
@@ -252,17 +241,14 @@ func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
 }
 
 func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
-	var i int64
-	var r *Reloc
-
 	if s.Type == 0 {
 		s.Type = SDATA
 	}
 	s.Reachable = 1
-	i = s.Size
+	i := s.Size
 	s.Size += int64(ctxt.Arch.Ptrsize)
 	Symgrow(ctxt, s, s.Size)
-	r = Addrel(s)
+	r := Addrel(s)
 	r.Sym = t
 	r.Off = int32(i)
 	r.Siz = uint8(ctxt.Arch.Ptrsize)
@@ -271,17 +257,14 @@ func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
 }
 
 func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
-	var i int64
-	var r *Reloc
-
 	if s.Type == 0 {
 		s.Type = SDATA
 	}
 	s.Reachable = 1
-	i = s.Size
+	i := s.Size
 	s.Size += 4
 	Symgrow(ctxt, s, s.Size)
-	r = Addrel(s)
+	r := Addrel(s)
 	r.Sym = t
 	r.Off = int32(i)
 	r.Siz = 4
diff --git a/src/cmd/internal/obj/go.go b/src/cmd/internal/obj/go.go
index 1bd10fcab863418bf733d00d7f514d8ad9bedd99..496a5b83fff8f889555f0ca82ea9e9a027e047e2 100644
--- a/src/cmd/internal/obj/go.go
+++ b/src/cmd/internal/obj/go.go
@@ -36,9 +36,7 @@ var exper = []struct {
 }
 
 func addexp(s string) {
-	var i int
-
-	for i = 0; i < len(exper); i++ {
+	for i := 0; i < len(exper); i++ {
 		if exper[i].name == s {
 			if exper[i].val != nil {
 				*exper[i].val = 1
diff --git a/src/cmd/internal/obj/ld.go b/src/cmd/internal/obj/ld.go
index 99cf64a3edf3d6fe6eb6f7257bc3df2a2c18a9c8..4d2e4293aadd2a1323098cc5acca1b0052fa0dff 100644
--- a/src/cmd/internal/obj/ld.go
+++ b/src/cmd/internal/obj/ld.go
@@ -44,13 +44,11 @@ const (
 )
 
 func mkfwd(sym *LSym) {
-	var p *Prog
-	var i int
 	var dwn [LOG]int32
 	var cnt [LOG]int32
 	var lst [LOG]*Prog
 
-	for i = 0; i < LOG; i++ {
+	for i := 0; i < LOG; i++ {
 		if i == 0 {
 			cnt[i] = 1
 		} else {
@@ -60,8 +58,8 @@ func mkfwd(sym *LSym) {
 		lst[i] = nil
 	}
 
-	i = 0
-	for p = sym.Text; p != nil && p.Link != nil; p = p.Link {
+	i := 0
+	for p := sym.Text; p != nil && p.Link != nil; p = p.Link {
 		i--
 		if i < 0 {
 			i = LOG - 1
@@ -79,17 +77,13 @@ func mkfwd(sym *LSym) {
 }
 
 func Copyp(ctxt *Link, q *Prog) *Prog {
-	var p *Prog
-
-	p = ctxt.NewProg()
+	p := ctxt.NewProg()
 	*p = *q
 	return p
 }
 
 func Appendp(ctxt *Link, q *Prog) *Prog {
-	var p *Prog
-
-	p = ctxt.NewProg()
+	p := ctxt.NewProg()
 	p.Link = q.Link
 	q.Link = p
 	p.Lineno = q.Lineno
diff --git a/src/cmd/internal/obj/obj.go b/src/cmd/internal/obj/obj.go
index 214d9cae8b6ced5383954b2f167ff67922c42b73..05902e2e33f9494f4eeb096131b9c1d6ad8b0058 100644
--- a/src/cmd/internal/obj/obj.go
+++ b/src/cmd/internal/obj/obj.go
@@ -25,12 +25,8 @@ func Linklinefmt(ctxt *Link, lno0 int, showAll, showFullPath bool) string {
 	lno := int32(lno0)
 	lno1 := lno
 	var d int32
-	var i int
-	var n int
-	var h *Hist
-	n = 0
-	var fp string
-	for h = ctxt.Hist; h != nil; h = h.Link {
+	n := 0
+	for h := ctxt.Hist; h != nil; h = h.Link {
 		if h.Offset < 0 {
 			continue
 		}
@@ -65,7 +61,8 @@ func Linklinefmt(ctxt *Link, lno0 int, showAll, showFullPath bool) string {
 	if n > int(HISTSZ) {
 		n = int(HISTSZ)
 	}
-	for i = n - 1; i >= 0; i-- {
+	var fp string
+	for i := n - 1; i >= 0; i-- {
 		if i != n-1 {
 			if !showAll {
 				break
@@ -93,12 +90,12 @@ func Linklinefmt(ctxt *Link, lno0 int, showAll, showFullPath bool) string {
 // For portability, we allow ASCII case folding, so that haspathprefix("a/b/c", "A/B") is true.
 // Similarly, we allow slash folding, so that haspathprefix("a/b/c", "a\\b") is true.
 func haspathprefix(s string, t string) bool {
-	var i int
-	var cs int
-	var ct int
 	if len(t) > len(s) {
 		return false
 	}
+	var i int
+	var cs int
+	var ct int
 	for i = 0; i < len(t); i++ {
 		cs = int(s[i])
 		ct = int(t[i])
@@ -131,17 +128,10 @@ func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
 		line *Hist
 		ldel int32
 	}
-	var lno int32
 	var d int32
-	var dlno int32
-	var n int
-	var h *Hist
-	var buf string
-	var buf1 string
-	var file string
-	lno = int32(line)
-	n = 0
-	for h = ctxt.Hist; h != nil; h = h.Link {
+	lno := int32(line)
+	n := 0
+	for h := ctxt.Hist; h != nil; h = h.Link {
 		if h.Offset < 0 {
 			continue
 		}
@@ -182,6 +172,8 @@ func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
 		return
 	}
 	n--
+	var dlno int32
+	var file string
 	if a[n].line != nil {
 		file = a[n].line.Name
 		dlno = a[n].ldel - 1
@@ -189,6 +181,7 @@ func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
 		file = a[n].incl.Name
 		dlno = a[n].idel - 1
 	}
+	var buf string
 	if filepath.IsAbs(file) || strings.HasPrefix(file, "<") {
 		buf = fmt.Sprintf("%s", file)
 	} else {
@@ -199,14 +192,14 @@ func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
 		if len(buf) == len(ctxt.Trimpath) {
 			buf = "??"
 		} else {
-			buf1 = fmt.Sprintf("%s", buf[len(ctxt.Trimpath)+1:])
+			buf1 := fmt.Sprintf("%s", buf[len(ctxt.Trimpath)+1:])
 			if buf1[0] == '\x00' {
 				buf1 = "??"
 			}
 			buf = buf1
 		}
 	} else if ctxt.Goroot_final != "" && haspathprefix(buf, ctxt.Goroot) {
-		buf1 = fmt.Sprintf("%s%s", ctxt.Goroot_final, buf[len(ctxt.Goroot):])
+		buf1 := fmt.Sprintf("%s%s", ctxt.Goroot_final, buf[len(ctxt.Goroot):])
 		buf = buf1
 	}
 	lno -= dlno
@@ -215,8 +208,6 @@ func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
 }
 
 func Linklinehist(ctxt *Link, lineno int, f string, offset int) {
-	var h *Hist
-
 	if false { // debug['f']
 		if f != "" {
 			if offset != 0 {
@@ -229,7 +220,7 @@ func Linklinehist(ctxt *Link, lineno int, f string, offset int) {
 		}
 	}
 
-	h = new(Hist)
+	h := new(Hist)
 	*h = Hist{}
 	h.Name = f
 	h.Line = int32(lineno)
@@ -248,12 +239,10 @@ func Linklinehist(ctxt *Link, lineno int, f string, offset int) {
 func Linkprfile(ctxt *Link, line int) {
 	l := int32(line)
 	var i int
-	var n int
 	var a [HISTSZ]Hist
-	var h *Hist
 	var d int32
-	n = 0
-	for h = ctxt.Hist; h != nil; h = h.Link {
+	n := 0
+	for h := ctxt.Hist; h != nil; h = h.Link {
 		if l < h.Line {
 			break
 		}
@@ -286,7 +275,7 @@ func Linkprfile(ctxt *Link, line int) {
 	if n > HISTSZ {
 		n = HISTSZ
 	}
-	for i = 0; i < n; i++ {
+	for i := 0; i < n; i++ {
 		fmt.Printf("%s:%d ", a[i].Name, int(l-a[i].Line+a[i].Offset+1))
 	}
 }
@@ -295,9 +284,7 @@ func Linkprfile(ctxt *Link, line int) {
  * start a new Prog list.
  */
 func Linknewplist(ctxt *Link) *Plist {
-	var pl *Plist
-
-	pl = new(Plist)
+	pl := new(Plist)
 	*pl = Plist{}
 	if ctxt.Plist == nil {
 		ctxt.Plist = pl
diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go
index b15bd007afa7d61c0dda8adf042867c8e212def8..6e76a39c5374a84fba0c970507324d4b175644c3 100644
--- a/src/cmd/internal/obj/objfile.go
+++ b/src/cmd/internal/obj/objfile.go
@@ -18,15 +18,7 @@ var outfile string
 // does not write out object files.
 func Writeobjdirect(ctxt *Link, b *Biobuf) {
 	var flag int
-	var found int
-	var h *Hist
 	var s *LSym
-	var text *LSym
-	var etext *LSym
-	var curtext *LSym
-	var data *LSym
-	var edata *LSym
-	var pl *Plist
 	var p *Prog
 	var plink *Prog
 	var a *Auto
@@ -34,13 +26,13 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
 	// Build list of symbols, and assign instructions to lists.
 	// Ignore ctxt->plist boundaries. There are no guarantees there,
 	// and the C compilers and assemblers just use one big list.
-	text = nil
+	text := (*LSym)(nil)
 
-	curtext = nil
-	data = nil
-	etext = nil
-	edata = nil
-	for pl = ctxt.Plist; pl != nil; pl = pl.Link {
+	curtext := (*LSym)(nil)
+	data := (*LSym)(nil)
+	etext := (*LSym)(nil)
+	edata := (*LSym)(nil)
+	for pl := ctxt.Plist; pl != nil; pl = pl.Link {
 		for p = pl.Firstpc; p != nil; p = plink {
 			if ctxt.Debugasm != 0 && ctxt.Debugvlog != 0 {
 				fmt.Printf("obj: %v\n", p)
@@ -176,7 +168,8 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
 	}
 
 	// Add reference to Go arguments for C or assembly functions without them.
-	for s = text; s != nil; s = s.Next {
+	var found int
+	for s := text; s != nil; s = s.Next {
 		if !strings.HasPrefix(s.Name, "\"\".") {
 			continue
 		}
@@ -200,7 +193,7 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
 	}
 
 	// Turn functions into machine code images.
-	for s = text; s != nil; s = s.Next {
+	for s := text; s != nil; s = s.Next {
 		mkfwd(s)
 		linkpatch(ctxt, s)
 		ctxt.Arch.Follow(ctxt, s)
@@ -217,7 +210,7 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
 	Bputc(b, 1) // version
 
 	// Emit autolib.
-	for h = ctxt.Hist; h != nil; h = h.Link {
+	for h := ctxt.Hist; h != nil; h = h.Link {
 		if h.Offset < 0 {
 			wrstring(b, h.Name)
 		}
@@ -225,10 +218,10 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
 	wrstring(b, "")
 
 	// Emit symbols.
-	for s = text; s != nil; s = s.Next {
+	for s := text; s != nil; s = s.Next {
 		writesym(ctxt, b, s)
 	}
-	for s = data; s != nil; s = s.Next {
+	for s := data; s != nil; s = s.Next {
 		writesym(ctxt, b, s)
 	}
 
@@ -240,16 +233,6 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
 }
 
 func writesym(ctxt *Link, b *Biobuf, s *LSym) {
-	var r *Reloc
-	var i int
-	var j int
-	var c int
-	var n int
-	var pc *Pcln
-	var p *Prog
-	var a *Auto
-	var name string
-
 	if ctxt.Debugasm != 0 {
 		fmt.Fprintf(ctxt.Bso, "%s ", s.Name)
 		if s.Version != 0 {
@@ -276,10 +259,12 @@ func writesym(ctxt *Link, b *Biobuf, s *LSym) {
 		}
 
 		fmt.Fprintf(ctxt.Bso, "\n")
-		for p = s.Text; p != nil; p = p.Link {
+		for p := s.Text; p != nil; p = p.Link {
 			fmt.Fprintf(ctxt.Bso, "\t%#04x %v\n", uint(int(p.Pc)), p)
 		}
-		for i = 0; i < len(s.P); {
+		var c int
+		var j int
+		for i := 0; i < len(s.P); {
 			fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
 			for j = i; j < i+16 && j < len(s.P); j++ {
 				fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
@@ -301,7 +286,9 @@ func writesym(ctxt *Link, b *Biobuf, s *LSym) {
 			i += 16
 		}
 
-		for i = 0; i < len(s.R); i++ {
+		var r *Reloc
+		var name string
+		for i := 0; i < len(s.R); i++ {
 			r = &s.R[i]
 			name = ""
 			if r.Sym != nil {
@@ -325,7 +312,8 @@ func writesym(ctxt *Link, b *Biobuf, s *LSym) {
 	wrdata(b, s.P)
 
 	wrint(b, int64(len(s.R)))
-	for i = 0; i < len(s.R); i++ {
+	var r *Reloc
+	for i := 0; i < len(s.R); i++ {
 		r = &s.R[i]
 		wrint(b, int64(r.Off))
 		wrint(b, int64(r.Siz))
@@ -341,12 +329,12 @@ func writesym(ctxt *Link, b *Biobuf, s *LSym) {
 		wrint(b, int64(s.Locals))
 		wrint(b, int64(s.Nosplit))
 		wrint(b, int64(s.Leaf)|int64(s.Cfunc)<<1)
-		n = 0
-		for a = s.Autom; a != nil; a = a.Link {
+		n := 0
+		for a := s.Autom; a != nil; a = a.Link {
 			n++
 		}
 		wrint(b, int64(n))
-		for a = s.Autom; a != nil; a = a.Link {
+		for a := s.Autom; a != nil; a = a.Link {
 			wrsym(b, a.Asym)
 			wrint(b, int64(a.Aoffset))
 			if a.Name == NAME_AUTO {
@@ -359,35 +347,33 @@ func writesym(ctxt *Link, b *Biobuf, s *LSym) {
 			wrsym(b, a.Gotype)
 		}
 
-		pc = s.Pcln
+		pc := s.Pcln
 		wrdata(b, pc.Pcsp.P)
 		wrdata(b, pc.Pcfile.P)
 		wrdata(b, pc.Pcline.P)
 		wrint(b, int64(len(pc.Pcdata)))
-		for i = 0; i < len(pc.Pcdata); i++ {
+		for i := 0; i < len(pc.Pcdata); i++ {
 			wrdata(b, pc.Pcdata[i].P)
 		}
 		wrint(b, int64(len(pc.Funcdataoff)))
-		for i = 0; i < len(pc.Funcdataoff); i++ {
+		for i := 0; i < len(pc.Funcdataoff); i++ {
 			wrsym(b, pc.Funcdata[i])
 		}
-		for i = 0; i < len(pc.Funcdataoff); i++ {
+		for i := 0; i < len(pc.Funcdataoff); i++ {
 			wrint(b, pc.Funcdataoff[i])
 		}
 		wrint(b, int64(len(pc.File)))
-		for i = 0; i < len(pc.File); i++ {
+		for i := 0; i < len(pc.File); i++ {
 			wrpathsym(ctxt, b, pc.File[i])
 		}
 	}
 }
 
 func wrint(b *Biobuf, sval int64) {
-	var uv uint64
 	var v uint64
 	var buf [10]uint8
-	var p []uint8
-	uv = (uint64(sval) << 1) ^ uint64(int64(sval>>63))
-	p = buf[:]
+	uv := (uint64(sval) << 1) ^ uint64(int64(sval>>63))
+	p := buf[:]
 	for v = uv; v >= 0x80; v >>= 7 {
 		p[0] = uint8(v | 0x80)
 		p = p[1:]
diff --git a/src/cmd/internal/obj/pass.go b/src/cmd/internal/obj/pass.go
index 812e00b55780577f53ecf7f29b4a3f3dee96c03e..b03cd431a98587b448f4de9a3731f40271f025b4 100644
--- a/src/cmd/internal/obj/pass.go
+++ b/src/cmd/internal/obj/pass.go
@@ -33,9 +33,7 @@ package obj
 // Code and data passes.
 
 func Brchain(ctxt *Link, p *Prog) *Prog {
-	var i int
-
-	for i = 0; i < 20; i++ {
+	for i := 0; i < 20; i++ {
 		if p == nil || p.As != AJMP || p.Pcond == nil {
 			return p
 		}
@@ -46,10 +44,9 @@ func Brchain(ctxt *Link, p *Prog) *Prog {
 }
 
 func brloop(ctxt *Link, p *Prog) *Prog {
-	var c int
 	var q *Prog
 
-	c = 0
+	c := 0
 	for q = p; q != nil; q = q.Pcond {
 		if q.As != AJMP || q.Pcond == nil {
 			break
@@ -152,12 +149,11 @@ func checkaddr(ctxt *Link, p *Prog, a *Addr) {
 func linkpatch(ctxt *Link, sym *LSym) {
 	var c int32
 	var name string
-	var p *Prog
 	var q *Prog
 
 	ctxt.Cursym = sym
 
-	for p = sym.Text; p != nil; p = p.Link {
+	for p := sym.Text; p != nil; p = p.Link {
 		checkaddr(ctxt, p, &p.From)
 		checkaddr(ctxt, p, &p.From3)
 		checkaddr(ctxt, p, &p.To)
@@ -203,7 +199,7 @@ func linkpatch(ctxt *Link, sym *LSym) {
 		p.Pcond = q
 	}
 
-	for p = sym.Text; p != nil; p = p.Link {
+	for p := sym.Text; p != nil; p = p.Link {
 		p.Mark = 0 /* initialization for follow */
 		if p.Pcond != nil {
 			p.Pcond = brloop(ctxt, p.Pcond)
diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go
index f5cdd3a099e58417c8bdcb7698ce04c870478769..13bb1cd85c55c59b0334455861328a8d2c13d90a 100644
--- a/src/cmd/internal/obj/pcln.go
+++ b/src/cmd/internal/obj/pcln.go
@@ -28,17 +28,8 @@ func addvarint(ctxt *Link, d *Pcdata, val uint32) {
 // where func is the function, val is the current value, p is the instruction being
 // considered, and arg can be used to further parameterize valfunc.
 func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) {
-	var dbg int
-	var i int
-	var oldval int32
-	var val int32
-	var started int32
-	var delta uint32
-	var pc int64
-	var p *Prog
-
 	// To debug a specific function, uncomment second line and change name.
-	dbg = 0
+	dbg := 0
 
 	//dbg = strcmp(func->name, "main.main") == 0;
 	//dbg = strcmp(desc, "pctofile") == 0;
@@ -51,21 +42,22 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*
 		fmt.Fprintf(ctxt.Bso, "funcpctab %s [valfunc=%s]\n", func_.Name, desc)
 	}
 
-	val = -1
-	oldval = val
+	val := int32(-1)
+	oldval := val
 	if func_.Text == nil {
 		ctxt.Debugpcln -= int32(dbg)
 		return
 	}
 
-	pc = func_.Text.Pc
+	pc := func_.Text.Pc
 
 	if ctxt.Debugpcln != 0 {
 		fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(pc), val, func_.Text)
 	}
 
-	started = 0
-	for p = func_.Text; p != nil; p = p.Link {
+	started := int32(0)
+	var delta uint32
+	for p := func_.Text; p != nil; p = p.Link {
 		// Update val. If it's not changing, keep going.
 		val = valfunc(ctxt, func_, val, p, 0, arg)
 
@@ -134,7 +126,7 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*
 
 	if ctxt.Debugpcln != 0 {
 		fmt.Fprintf(ctxt.Bso, "wrote %d bytes to %p\n", len(dst.P), dst)
-		for i = 0; i < len(dst.P); i++ {
+		for i := 0; i < len(dst.P); i++ {
 			fmt.Fprintf(ctxt.Bso, " %02x", dst.P[i])
 		}
 		fmt.Fprintf(ctxt.Bso, "\n")
@@ -148,14 +140,11 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*
 // Because p->lineno applies to p, phase == 0 (before p)
 // takes care of the update.
 func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
-	var i int32
-	var l int32
-	var f *LSym
-	var pcln *Pcln
-
 	if p.As == ATEXT || p.As == ANOP || p.As == AUSEFIELD || p.Lineno == 0 || phase == 1 {
 		return oldval
 	}
+	var l int32
+	var f *LSym
 	linkgetline(ctxt, p.Lineno, &f, &l)
 	if f == nil {
 		//	print("getline failed for %s %P\n", ctxt->cursym->name, p);
@@ -165,12 +154,13 @@ func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg
 	if arg == nil {
 		return l
 	}
-	pcln = arg.(*Pcln)
+	pcln := arg.(*Pcln)
 
 	if f == pcln.Lastfile {
 		return int32(pcln.Lastindex)
 	}
 
+	var i int32
 	for i = 0; i < int32(len(pcln.File)); i++ {
 		file := pcln.File[i]
 		if file == f {
@@ -222,20 +212,14 @@ func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg i
 }
 
 func linkpcln(ctxt *Link, cursym *LSym) {
-	var p *Prog
-	var pcln *Pcln
-	var i int
-	var npcdata int
-	var nfuncdata int
-
 	ctxt.Cursym = cursym
 
-	pcln = new(Pcln)
+	pcln := new(Pcln)
 	cursym.Pcln = pcln
 
-	npcdata = 0
-	nfuncdata = 0
-	for p = cursym.Text; p != nil; p = p.Link {
+	npcdata := 0
+	nfuncdata := 0
+	for p := cursym.Text; p != nil; p = p.Link {
 		if p.As == APCDATA && p.From.Offset >= int64(npcdata) {
 			npcdata = int(p.From.Offset + 1)
 		}
@@ -257,7 +241,7 @@ func linkpcln(ctxt *Link, cursym *LSym) {
 	// tabulate which pc and func data we have.
 	havepc := make([]uint32, (npcdata+31)/32)
 	havefunc := make([]uint32, (nfuncdata+31)/32)
-	for p = cursym.Text; p != nil; p = p.Link {
+	for p := cursym.Text; p != nil; p = p.Link {
 		if p.As == AFUNCDATA {
 			if (havefunc[p.From.Offset/32]>>uint64(p.From.Offset%32))&1 != 0 {
 				ctxt.Diag("multiple definitions for FUNCDATA $%d", p.From.Offset)
@@ -271,7 +255,7 @@ func linkpcln(ctxt *Link, cursym *LSym) {
 	}
 
 	// pcdata.
-	for i = 0; i < npcdata; i++ {
+	for i := 0; i < npcdata; i++ {
 		if (havepc[i/32]>>uint(i%32))&1 == 0 {
 			continue
 		}
@@ -280,7 +264,8 @@ func linkpcln(ctxt *Link, cursym *LSym) {
 
 	// funcdata
 	if nfuncdata > 0 {
-		for p = cursym.Text; p != nil; p = p.Link {
+		var i int
+		for p := cursym.Text; p != nil; p = p.Link {
 			if p.As == AFUNCDATA {
 				i = int(p.From.Offset)
 				pcln.Funcdataoff[i] = p.To.Offset
@@ -297,13 +282,9 @@ func linkpcln(ctxt *Link, cursym *LSym) {
 // iteration over encoded pcdata tables.
 
 func getvarint(pp *[]byte) uint32 {
-	var p []byte
-	var shift int
-	var v uint32
-
-	v = 0
-	p = *pp
-	for shift = 0; ; shift += 7 {
+	v := uint32(0)
+	p := *pp
+	for shift := 0; ; shift += 7 {
 		v |= uint32(p[0]&0x7F) << uint(shift)
 		tmp7 := p
 		p = p[1:]
@@ -317,9 +298,6 @@ func getvarint(pp *[]byte) uint32 {
 }
 
 func pciternext(it *Pciter) {
-	var v uint32
-	var dv int32
-
 	it.pc = it.nextpc
 	if it.done != 0 {
 		return
@@ -330,7 +308,7 @@ func pciternext(it *Pciter) {
 	}
 
 	// value delta
-	v = getvarint(&it.p)
+	v := getvarint(&it.p)
 
 	if v == 0 && it.start == 0 {
 		it.done = 1
@@ -338,7 +316,7 @@ func pciternext(it *Pciter) {
 	}
 
 	it.start = 0
-	dv = int32(v>>1) ^ (int32(v<<31) >> 31)
+	dv := int32(v>>1) ^ (int32(v<<31) >> 31)
 	it.value += dv
 
 	// pc delta
diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go
index 30d0eb1751c1abb976e976d4a48ca5408039c2c4..d9df440f6ed0c0eca7f845078d2415c5f3e1d9d7 100644
--- a/src/cmd/internal/obj/sym.go
+++ b/src/cmd/internal/obj/sym.go
@@ -101,9 +101,7 @@ var headers = []struct {
 }
 
 func headtype(name string) int {
-	var i int
-
-	for i = 0; i < len(headers); i++ {
+	for i := 0; i < len(headers); i++ {
 		if name == headers[i].name {
 			return headers[i].val
 		}
@@ -114,9 +112,7 @@ func headtype(name string) int {
 var headstr_buf string
 
 func Headstr(v int) string {
-	var i int
-
-	for i = 0; i < len(headers); i++ {
+	for i := 0; i < len(headers); i++ {
 		if v == headers[i].val {
 			return headers[i].name
 		}
@@ -126,13 +122,11 @@ func Headstr(v int) string {
 }
 
 func Linknew(arch *LinkArch) *Link {
-	var ctxt *Link
-	var p string
 	var buf string
 
 	linksetexp()
 
-	ctxt = new(Link)
+	ctxt := new(Link)
 	ctxt.Arch = arch
 	ctxt.Version = HistVersion
 	ctxt.Goroot = Getgoroot()
@@ -211,7 +205,7 @@ func Linknew(arch *LinkArch) *Link {
 
 	// On arm, record goarm.
 	if ctxt.Arch.Thechar == '5' {
-		p = Getgoarm()
+		p := Getgoarm()
 		if p != "" {
 			ctxt.Goarm = int32(Atoi(p))
 		} else {
@@ -223,9 +217,7 @@ func Linknew(arch *LinkArch) *Link {
 }
 
 func linknewsym(ctxt *Link, symb string, v int) *LSym {
-	var s *LSym
-
-	s = new(LSym)
+	s := new(LSym)
 	*s = LSym{}
 
 	s.Dynid = -1
@@ -246,17 +238,14 @@ func linknewsym(ctxt *Link, symb string, v int) *LSym {
 }
 
 func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
-	var s *LSym
-	var h uint32
-
-	h = uint32(v)
+	h := uint32(v)
 	for i := 0; i < len(symb); i++ {
 		c := int(symb[i])
 		h = h + h + h + uint32(c)
 	}
 	h &= 0xffffff
 	h %= LINKHASH
-	for s = ctxt.Hash[h]; s != nil; s = s.Hash {
+	for s := ctxt.Hash[h]; s != nil; s = s.Hash {
 		if int(s.Version) == v && s.Name == symb {
 			return s
 		}
@@ -265,7 +254,7 @@ func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
 		return nil
 	}
 
-	s = linknewsym(ctxt, symb, v)
+	s := linknewsym(ctxt, symb, v)
 	s.Extname = s.Name
 	s.Hash = ctxt.Hash[h]
 	ctxt.Hash[h] = s