diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 48b4f7d0b5138b3ebffa8c488017353a31c8ae7d..5b8590c357f5e31780ab1765ed31af3f7810680a 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -414,7 +414,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
 		r := v.Reg()
 		a := v.Args[0].Reg()
 		if r == a {
-			if v.AuxInt == 1 {
+			switch v.AuxInt {
+			case 1:
 				var asm obj.As
 				// Software optimization manual recommends add $1,reg.
 				// But inc/dec is 1 byte smaller. ICC always uses inc
@@ -430,8 +431,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
 				p.To.Type = obj.TYPE_REG
 				p.To.Reg = r
 				return
-			}
-			if v.AuxInt == -1 {
+			case -1:
 				var asm obj.As
 				if v.Op == ssa.OpAMD64ADDQconst {
 					asm = x86.ADECQ
@@ -442,6 +442,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
 				p.To.Type = obj.TYPE_REG
 				p.To.Reg = r
 				return
+			case 0x80:
+				// 'SUBQ $-0x80, r' is shorter to encode than
+				// and functionally equivalent to 'ADDQ $0x80, r'.
+				asm := x86.ASUBL
+				if v.Op == ssa.OpAMD64ADDQconst {
+					asm = x86.ASUBQ
+				}
+				p := s.Prog(asm)
+				p.From.Type = obj.TYPE_CONST
+				p.From.Offset = -0x80
+				p.To.Type = obj.TYPE_REG
+				p.To.Reg = r
+				return
+
 			}
 			p := s.Prog(v.Op.Asm())
 			p.From.Type = obj.TYPE_CONST
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index 16d7d25d3e4007ccf3e8bf3505c1dda5e2823f97..b5976be9d258e6d2be94334509635bc58f731a6f 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -381,3 +381,13 @@ func MULS(a, b, c uint32) (uint32, uint32, uint32) {
 	r2 := c - b*64
 	return r0, r1, r2
 }
+
+func addSpecial(a, b, c uint32) (uint32, uint32, uint32) {
+	// amd64:`INCL`
+	a++
+	// amd64:`DECL`
+	b--
+	// amd64:`SUBL.*-128`
+	c += 128
+	return a, b, c
+}