diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..8d9a94a90680d9fc114a1b3a2b4123c233c324af
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2005-2008  Dustin Sallings <dustin@spy.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+<http://www.opensource.org/licenses/mit-license.php>
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 0000000000000000000000000000000000000000..f49dc337dcd7318c6e827b81b5020a5715c41bfd
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
+package humanize
+
+import (
+	"math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+	mag := 0
+	m := &big.Int{}
+	for n.Cmp(b) >= 0 {
+		n.DivMod(n, b, m)
+		mag++
+		if mag == maxmag && maxmag >= 0 {
+			break
+		}
+	}
+	return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+	mag := 0
+	m := &big.Int{}
+	for n.Cmp(b) >= 0 {
+		n.DivMod(n, b, m)
+		mag++
+	}
+	return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 0000000000000000000000000000000000000000..1a2bf61723922c0a199af8c6134be7e801b7edad
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,173 @@
+package humanize
+
+import (
+	"fmt"
+	"math/big"
+	"strings"
+	"unicode"
+)
+
+var (
+	bigIECExp = big.NewInt(1024)
+
+	// BigByte is one byte in bit.Ints
+	BigByte = big.NewInt(1)
+	// BigKiByte is 1,024 bytes in bit.Ints
+	BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+	// BigMiByte is 1,024 k bytes in bit.Ints
+	BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+	// BigGiByte is 1,024 m bytes in bit.Ints
+	BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+	// BigTiByte is 1,024 g bytes in bit.Ints
+	BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+	// BigPiByte is 1,024 t bytes in bit.Ints
+	BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+	// BigEiByte is 1,024 p bytes in bit.Ints
+	BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+	// BigZiByte is 1,024 e bytes in bit.Ints
+	BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+	// BigYiByte is 1,024 z bytes in bit.Ints
+	BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+)
+
+var (
+	bigSIExp = big.NewInt(1000)
+
+	// BigSIByte is one SI byte in big.Ints
+	BigSIByte = big.NewInt(1)
+	// BigKByte is 1,000 SI bytes in big.Ints
+	BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+	// BigMByte is 1,000 SI k bytes in big.Ints
+	BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+	// BigGByte is 1,000 SI m bytes in big.Ints
+	BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+	// BigTByte is 1,000 SI g bytes in big.Ints
+	BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+	// BigPByte is 1,000 SI t bytes in big.Ints
+	BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+	// BigEByte is 1,000 SI p bytes in big.Ints
+	BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+	// BigZByte is 1,000 SI e bytes in big.Ints
+	BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+	// BigYByte is 1,000 SI z bytes in big.Ints
+	BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+	"b":   BigByte,
+	"kib": BigKiByte,
+	"kb":  BigKByte,
+	"mib": BigMiByte,
+	"mb":  BigMByte,
+	"gib": BigGiByte,
+	"gb":  BigGByte,
+	"tib": BigTiByte,
+	"tb":  BigTByte,
+	"pib": BigPiByte,
+	"pb":  BigPByte,
+	"eib": BigEiByte,
+	"eb":  BigEByte,
+	"zib": BigZiByte,
+	"zb":  BigZByte,
+	"yib": BigYiByte,
+	"yb":  BigYByte,
+	// Without suffix
+	"":   BigByte,
+	"ki": BigKiByte,
+	"k":  BigKByte,
+	"mi": BigMiByte,
+	"m":  BigMByte,
+	"gi": BigGiByte,
+	"g":  BigGByte,
+	"ti": BigTiByte,
+	"t":  BigTByte,
+	"pi": BigPiByte,
+	"p":  BigPByte,
+	"ei": BigEiByte,
+	"e":  BigEByte,
+	"z":  BigZByte,
+	"zi": BigZiByte,
+	"y":  BigYByte,
+	"yi": BigYiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+	if s.Cmp(ten) < 0 {
+		return fmt.Sprintf("%d B", s)
+	}
+	c := (&big.Int{}).Set(s)
+	val, mag := oomm(c, base, len(sizes)-1)
+	suffix := sizes[mag]
+	f := "%.0f %s"
+	if val < 10 {
+		f = "%.1f %s"
+	}
+
+	return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+	sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+	return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+	sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+	return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	val := &big.Rat{}
+	_, err := fmt.Sscanf(num, "%f", val)
+	if err != nil {
+		return nil, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bigBytesSizeTable[extra]; ok {
+		mv := (&big.Rat{}).SetInt(m)
+		val.Mul(val, mv)
+		rv := &big.Int{}
+		rv.Div(val.Num(), val.Denom())
+		return rv, nil
+	}
+
+	return nil, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes_test.go b/vendor/github.com/dustin/go-humanize/bigbytes_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..236ad0806da411c154137b1b43998cd36888b112
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes_test.go
@@ -0,0 +1,220 @@
+package humanize
+
+import (
+	"math/big"
+	"testing"
+)
+
+func TestBigByteParsing(t *testing.T) {
+	tests := []struct {
+		in  string
+		exp uint64
+	}{
+		{"42", 42},
+		{"42MB", 42000000},
+		{"42MiB", 44040192},
+		{"42mb", 42000000},
+		{"42mib", 44040192},
+		{"42MIB", 44040192},
+		{"42 MB", 42000000},
+		{"42 MiB", 44040192},
+		{"42 mb", 42000000},
+		{"42 mib", 44040192},
+		{"42 MIB", 44040192},
+		{"42.5MB", 42500000},
+		{"42.5MiB", 44564480},
+		{"42.5 MB", 42500000},
+		{"42.5 MiB", 44564480},
+		// No need to say B
+		{"42M", 42000000},
+		{"42Mi", 44040192},
+		{"42m", 42000000},
+		{"42mi", 44040192},
+		{"42MI", 44040192},
+		{"42 M", 42000000},
+		{"42 Mi", 44040192},
+		{"42 m", 42000000},
+		{"42 mi", 44040192},
+		{"42 MI", 44040192},
+		{"42.5M", 42500000},
+		{"42.5Mi", 44564480},
+		{"42.5 M", 42500000},
+		{"42.5 Mi", 44564480},
+		{"1,005.03 MB", 1005030000},
+		// Large testing, breaks when too much larger than
+		// this.
+		{"12.5 EB", uint64(12.5 * float64(EByte))},
+		{"12.5 E", uint64(12.5 * float64(EByte))},
+		{"12.5 EiB", uint64(12.5 * float64(EiByte))},
+	}
+
+	for _, p := range tests {
+		got, err := ParseBigBytes(p.in)
+		if err != nil {
+			t.Errorf("Couldn't parse %v: %v", p.in, err)
+		} else {
+			if got.Uint64() != p.exp {
+				t.Errorf("Expected %v for %v, got %v",
+					p.exp, p.in, got)
+			}
+		}
+	}
+}
+
+func TestBigByteErrors(t *testing.T) {
+	got, err := ParseBigBytes("84 JB")
+	if err == nil {
+		t.Errorf("Expected error, got %v", got)
+	}
+	got, err = ParseBigBytes("")
+	if err == nil {
+		t.Errorf("Expected error parsing nothing")
+	}
+}
+
+func bbyte(in uint64) string {
+	return BigBytes((&big.Int{}).SetUint64(in))
+}
+
+func bibyte(in uint64) string {
+	return BigIBytes((&big.Int{}).SetUint64(in))
+}
+
+func TestBigBytes(t *testing.T) {
+	testList{
+		{"bytes(0)", bbyte(0), "0 B"},
+		{"bytes(1)", bbyte(1), "1 B"},
+		{"bytes(803)", bbyte(803), "803 B"},
+		{"bytes(999)", bbyte(999), "999 B"},
+
+		{"bytes(1024)", bbyte(1024), "1.0 kB"},
+		{"bytes(1MB - 1)", bbyte(MByte - Byte), "1000 kB"},
+
+		{"bytes(1MB)", bbyte(1024 * 1024), "1.0 MB"},
+		{"bytes(1GB - 1K)", bbyte(GByte - KByte), "1000 MB"},
+
+		{"bytes(1GB)", bbyte(GByte), "1.0 GB"},
+		{"bytes(1TB - 1M)", bbyte(TByte - MByte), "1000 GB"},
+
+		{"bytes(1TB)", bbyte(TByte), "1.0 TB"},
+		{"bytes(1PB - 1T)", bbyte(PByte - TByte), "999 TB"},
+
+		{"bytes(1PB)", bbyte(PByte), "1.0 PB"},
+		{"bytes(1PB - 1T)", bbyte(EByte - PByte), "999 PB"},
+
+		{"bytes(1EB)", bbyte(EByte), "1.0 EB"},
+		// Overflows.
+		// {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"},
+
+		{"bytes(0)", bibyte(0), "0 B"},
+		{"bytes(1)", bibyte(1), "1 B"},
+		{"bytes(803)", bibyte(803), "803 B"},
+		{"bytes(1023)", bibyte(1023), "1023 B"},
+
+		{"bytes(1024)", bibyte(1024), "1.0 KiB"},
+		{"bytes(1MB - 1)", bibyte(MiByte - IByte), "1024 KiB"},
+
+		{"bytes(1MB)", bibyte(1024 * 1024), "1.0 MiB"},
+		{"bytes(1GB - 1K)", bibyte(GiByte - KiByte), "1024 MiB"},
+
+		{"bytes(1GB)", bibyte(GiByte), "1.0 GiB"},
+		{"bytes(1TB - 1M)", bibyte(TiByte - MiByte), "1024 GiB"},
+
+		{"bytes(1TB)", bibyte(TiByte), "1.0 TiB"},
+		{"bytes(1PB - 1T)", bibyte(PiByte - TiByte), "1023 TiB"},
+
+		{"bytes(1PB)", bibyte(PiByte), "1.0 PiB"},
+		{"bytes(1PB - 1T)", bibyte(EiByte - PiByte), "1023 PiB"},
+
+		{"bytes(1EiB)", bibyte(EiByte), "1.0 EiB"},
+		// Overflows.
+		// {"bytes(1EB - 1P)", bibyte((KIByte*EIByte)-PiByte), "1023EB"},
+
+		{"bytes(5.5GiB)", bibyte(5.5 * GiByte), "5.5 GiB"},
+
+		{"bytes(5.5GB)", bbyte(5.5 * GByte), "5.5 GB"},
+	}.validate(t)
+}
+
+func TestVeryBigBytes(t *testing.T) {
+	b, _ := (&big.Int{}).SetString("15347691069326346944512", 10)
+	s := BigBytes(b)
+	if s != "15 ZB" {
+		t.Errorf("Expected 15 ZB, got %v", s)
+	}
+	s = BigIBytes(b)
+	if s != "13 ZiB" {
+		t.Errorf("Expected 13 ZiB, got %v", s)
+	}
+
+	b, _ = (&big.Int{}).SetString("15716035654990179271180288", 10)
+	s = BigBytes(b)
+	if s != "16 YB" {
+		t.Errorf("Expected 16 YB, got %v", s)
+	}
+	s = BigIBytes(b)
+	if s != "13 YiB" {
+		t.Errorf("Expected 13 YiB, got %v", s)
+	}
+}
+
+func TestVeryVeryBigBytes(t *testing.T) {
+	b, _ := (&big.Int{}).SetString("16093220510709943573688614912", 10)
+	s := BigBytes(b)
+	if s != "16093 YB" {
+		t.Errorf("Expected 16093 YB, got %v", s)
+	}
+	s = BigIBytes(b)
+	if s != "13312 YiB" {
+		t.Errorf("Expected 13312 YiB, got %v", s)
+	}
+}
+
+func TestParseVeryBig(t *testing.T) {
+	tests := []struct {
+		in  string
+		out string
+	}{
+		{"16 ZB", "16000000000000000000000"},
+		{"16 ZiB", "18889465931478580854784"},
+		{"16.5 ZB", "16500000000000000000000"},
+		{"16.5 ZiB", "19479761741837286506496"},
+		{"16 Z", "16000000000000000000000"},
+		{"16 Zi", "18889465931478580854784"},
+		{"16.5 Z", "16500000000000000000000"},
+		{"16.5 Zi", "19479761741837286506496"},
+
+		{"16 YB", "16000000000000000000000000"},
+		{"16 YiB", "19342813113834066795298816"},
+		{"16.5 YB", "16500000000000000000000000"},
+		{"16.5 YiB", "19947276023641381382651904"},
+		{"16 Y", "16000000000000000000000000"},
+		{"16 Yi", "19342813113834066795298816"},
+		{"16.5 Y", "16500000000000000000000000"},
+		{"16.5 Yi", "19947276023641381382651904"},
+	}
+
+	for _, test := range tests {
+		x, err := ParseBigBytes(test.in)
+		if err != nil {
+			t.Errorf("Error parsing %q: %v", test.in, err)
+			continue
+		}
+
+		if x.String() != test.out {
+			t.Errorf("Expected %q for %q, got %v", test.out, test.in, x)
+		}
+	}
+}
+
+func BenchmarkParseBigBytes(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		ParseBigBytes("16.5 Z")
+	}
+}
+
+func BenchmarkBigBytes(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		bibyte(16.5 * GByte)
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b498f4885c5940951fce0befc9a0881e6224ed4
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
+package humanize
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+	Byte = 1 << (iota * 10)
+	KiByte
+	MiByte
+	GiByte
+	TiByte
+	PiByte
+	EiByte
+)
+
+// SI Sizes.
+const (
+	IByte = 1
+	KByte = IByte * 1000
+	MByte = KByte * 1000
+	GByte = MByte * 1000
+	TByte = GByte * 1000
+	PByte = TByte * 1000
+	EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+	"b":   Byte,
+	"kib": KiByte,
+	"kb":  KByte,
+	"mib": MiByte,
+	"mb":  MByte,
+	"gib": GiByte,
+	"gb":  GByte,
+	"tib": TiByte,
+	"tb":  TByte,
+	"pib": PiByte,
+	"pb":  PByte,
+	"eib": EiByte,
+	"eb":  EByte,
+	// Without suffix
+	"":   Byte,
+	"ki": KiByte,
+	"k":  KByte,
+	"mi": MiByte,
+	"m":  MByte,
+	"gi": GiByte,
+	"g":  GByte,
+	"ti": TiByte,
+	"t":  TByte,
+	"pi": PiByte,
+	"p":  PByte,
+	"ei": EiByte,
+	"e":  EByte,
+}
+
+func logn(n, b float64) float64 {
+	return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+	if s < 10 {
+		return fmt.Sprintf("%d B", s)
+	}
+	e := math.Floor(logn(float64(s), base))
+	suffix := sizes[int(e)]
+	val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+	f := "%.0f %s"
+	if val < 10 {
+		f = "%.1f %s"
+	}
+
+	return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+	sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+	return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+	sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+	return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	f, err := strconv.ParseFloat(num, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bytesSizeTable[extra]; ok {
+		f *= float64(m)
+		if f >= math.MaxUint64 {
+			return 0, fmt.Errorf("too large: %v", s)
+		}
+		return uint64(f), nil
+	}
+
+	return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/bytes_test.go b/vendor/github.com/dustin/go-humanize/bytes_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0bb811c701050bdb1daa085c40956780ccbfec8b
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes_test.go
@@ -0,0 +1,146 @@
+package humanize
+
+import (
+	"testing"
+)
+
+func TestByteParsing(t *testing.T) {
+	tests := []struct {
+		in  string
+		exp uint64
+	}{
+		{"42", 42},
+		{"42MB", 42000000},
+		{"42MiB", 44040192},
+		{"42mb", 42000000},
+		{"42mib", 44040192},
+		{"42MIB", 44040192},
+		{"42 MB", 42000000},
+		{"42 MiB", 44040192},
+		{"42 mb", 42000000},
+		{"42 mib", 44040192},
+		{"42 MIB", 44040192},
+		{"42.5MB", 42500000},
+		{"42.5MiB", 44564480},
+		{"42.5 MB", 42500000},
+		{"42.5 MiB", 44564480},
+		// No need to say B
+		{"42M", 42000000},
+		{"42Mi", 44040192},
+		{"42m", 42000000},
+		{"42mi", 44040192},
+		{"42MI", 44040192},
+		{"42 M", 42000000},
+		{"42 Mi", 44040192},
+		{"42 m", 42000000},
+		{"42 mi", 44040192},
+		{"42 MI", 44040192},
+		{"42.5M", 42500000},
+		{"42.5Mi", 44564480},
+		{"42.5 M", 42500000},
+		{"42.5 Mi", 44564480},
+		// Bug #42
+		{"1,005.03 MB", 1005030000},
+		// Large testing, breaks when too much larger than
+		// this.
+		{"12.5 EB", uint64(12.5 * float64(EByte))},
+		{"12.5 E", uint64(12.5 * float64(EByte))},
+		{"12.5 EiB", uint64(12.5 * float64(EiByte))},
+	}
+
+	for _, p := range tests {
+		got, err := ParseBytes(p.in)
+		if err != nil {
+			t.Errorf("Couldn't parse %v: %v", p.in, err)
+		}
+		if got != p.exp {
+			t.Errorf("Expected %v for %v, got %v",
+				p.exp, p.in, got)
+		}
+	}
+}
+
+func TestByteErrors(t *testing.T) {
+	got, err := ParseBytes("84 JB")
+	if err == nil {
+		t.Errorf("Expected error, got %v", got)
+	}
+	got, err = ParseBytes("")
+	if err == nil {
+		t.Errorf("Expected error parsing nothing")
+	}
+	got, err = ParseBytes("16 EiB")
+	if err == nil {
+		t.Errorf("Expected error, got %v", got)
+	}
+}
+
+func TestBytes(t *testing.T) {
+	testList{
+		{"bytes(0)", Bytes(0), "0 B"},
+		{"bytes(1)", Bytes(1), "1 B"},
+		{"bytes(803)", Bytes(803), "803 B"},
+		{"bytes(999)", Bytes(999), "999 B"},
+
+		{"bytes(1024)", Bytes(1024), "1.0 kB"},
+		{"bytes(9999)", Bytes(9999), "10 kB"},
+		{"bytes(1MB - 1)", Bytes(MByte - Byte), "1000 kB"},
+
+		{"bytes(1MB)", Bytes(1024 * 1024), "1.0 MB"},
+		{"bytes(1GB - 1K)", Bytes(GByte - KByte), "1000 MB"},
+
+		{"bytes(1GB)", Bytes(GByte), "1.0 GB"},
+		{"bytes(1TB - 1M)", Bytes(TByte - MByte), "1000 GB"},
+		{"bytes(10MB)", Bytes(9999 * 1000), "10 MB"},
+
+		{"bytes(1TB)", Bytes(TByte), "1.0 TB"},
+		{"bytes(1PB - 1T)", Bytes(PByte - TByte), "999 TB"},
+
+		{"bytes(1PB)", Bytes(PByte), "1.0 PB"},
+		{"bytes(1PB - 1T)", Bytes(EByte - PByte), "999 PB"},
+
+		{"bytes(1EB)", Bytes(EByte), "1.0 EB"},
+		// Overflows.
+		// {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"},
+
+		{"bytes(0)", IBytes(0), "0 B"},
+		{"bytes(1)", IBytes(1), "1 B"},
+		{"bytes(803)", IBytes(803), "803 B"},
+		{"bytes(1023)", IBytes(1023), "1023 B"},
+
+		{"bytes(1024)", IBytes(1024), "1.0 KiB"},
+		{"bytes(1MB - 1)", IBytes(MiByte - IByte), "1024 KiB"},
+
+		{"bytes(1MB)", IBytes(1024 * 1024), "1.0 MiB"},
+		{"bytes(1GB - 1K)", IBytes(GiByte - KiByte), "1024 MiB"},
+
+		{"bytes(1GB)", IBytes(GiByte), "1.0 GiB"},
+		{"bytes(1TB - 1M)", IBytes(TiByte - MiByte), "1024 GiB"},
+
+		{"bytes(1TB)", IBytes(TiByte), "1.0 TiB"},
+		{"bytes(1PB - 1T)", IBytes(PiByte - TiByte), "1023 TiB"},
+
+		{"bytes(1PB)", IBytes(PiByte), "1.0 PiB"},
+		{"bytes(1PB - 1T)", IBytes(EiByte - PiByte), "1023 PiB"},
+
+		{"bytes(1EiB)", IBytes(EiByte), "1.0 EiB"},
+		// Overflows.
+		// {"bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB"},
+
+		{"bytes(5.5GiB)", IBytes(5.5 * GiByte), "5.5 GiB"},
+
+		{"bytes(5.5GB)", Bytes(5.5 * GByte), "5.5 GB"},
+	}.validate(t)
+}
+
+func BenchmarkParseBytes(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		ParseBytes("16.5 GB")
+	}
+}
+
+func BenchmarkBytes(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Bytes(16.5 * GByte)
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 0000000000000000000000000000000000000000..13611aaab875f32a011ad5ecf11f1116687c79c3
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,108 @@
+package humanize
+
+import (
+	"bytes"
+	"math"
+	"math/big"
+	"strconv"
+	"strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+	sign := ""
+
+	// Min int64 can't be negated to a usable value, so it has to be special cased.
+	if v == math.MinInt64 {
+		return "-9,223,372,036,854,775,808"
+	}
+
+	if v < 0 {
+		sign = "-"
+		v = 0 - v
+	}
+
+	parts := []string{"", "", "", "", "", "", ""}
+	j := len(parts) - 1
+
+	for v > 999 {
+		parts[j] = strconv.FormatInt(v%1000, 10)
+		switch len(parts[j]) {
+		case 2:
+			parts[j] = "0" + parts[j]
+		case 1:
+			parts[j] = "00" + parts[j]
+		}
+		v = v / 1000
+		j--
+	}
+	parts[j] = strconv.Itoa(int(v))
+	return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+	buf := &bytes.Buffer{}
+	if v < 0 {
+		buf.Write([]byte{'-'})
+		v = 0 - v
+	}
+
+	comma := []byte{','}
+
+	parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+	pos := 0
+	if len(parts[0])%3 != 0 {
+		pos += len(parts[0]) % 3
+		buf.WriteString(parts[0][:pos])
+		buf.Write(comma)
+	}
+	for ; pos < len(parts[0]); pos += 3 {
+		buf.WriteString(parts[0][pos : pos+3])
+		buf.Write(comma)
+	}
+	buf.Truncate(buf.Len() - 1)
+
+	if len(parts) > 1 {
+		buf.Write([]byte{'.'})
+		buf.WriteString(parts[1])
+	}
+	return buf.String()
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+	sign := ""
+	if b.Sign() < 0 {
+		sign = "-"
+		b.Abs(b)
+	}
+
+	athousand := big.NewInt(1000)
+	c := (&big.Int{}).Set(b)
+	_, m := oom(c, athousand)
+	parts := make([]string, m+1)
+	j := len(parts) - 1
+
+	mod := &big.Int{}
+	for b.Cmp(athousand) >= 0 {
+		b.DivMod(b, athousand, mod)
+		parts[j] = strconv.FormatInt(mod.Int64(), 10)
+		switch len(parts[j]) {
+		case 2:
+			parts[j] = "0" + parts[j]
+		case 1:
+			parts[j] = "00" + parts[j]
+		}
+		j--
+	}
+	parts[j] = strconv.Itoa(int(b.Int64()))
+	return sign + strings.Join(parts[j:], ",")
+}
diff --git a/vendor/github.com/dustin/go-humanize/comma_test.go b/vendor/github.com/dustin/go-humanize/comma_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..89daca5fb043c06116b83e76344965bd74be5689
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma_test.go
@@ -0,0 +1,136 @@
+package humanize
+
+import (
+	"math"
+	"math/big"
+	"testing"
+)
+
+func TestCommas(t *testing.T) {
+	testList{
+		{"0", Comma(0), "0"},
+		{"10", Comma(10), "10"},
+		{"100", Comma(100), "100"},
+		{"1,000", Comma(1000), "1,000"},
+		{"10,000", Comma(10000), "10,000"},
+		{"100,000", Comma(100000), "100,000"},
+		{"10,000,000", Comma(10000000), "10,000,000"},
+		{"10,100,000", Comma(10100000), "10,100,000"},
+		{"10,010,000", Comma(10010000), "10,010,000"},
+		{"10,001,000", Comma(10001000), "10,001,000"},
+		{"123,456,789", Comma(123456789), "123,456,789"},
+		{"maxint", Comma(9.223372e+18), "9,223,372,000,000,000,000"},
+		{"math.maxint", Comma(math.MaxInt64), "9,223,372,036,854,775,807"},
+		{"math.minint", Comma(math.MinInt64), "-9,223,372,036,854,775,808"},
+		{"minint", Comma(-9.223372e+18), "-9,223,372,000,000,000,000"},
+		{"-123,456,789", Comma(-123456789), "-123,456,789"},
+		{"-10,100,000", Comma(-10100000), "-10,100,000"},
+		{"-10,010,000", Comma(-10010000), "-10,010,000"},
+		{"-10,001,000", Comma(-10001000), "-10,001,000"},
+		{"-10,000,000", Comma(-10000000), "-10,000,000"},
+		{"-100,000", Comma(-100000), "-100,000"},
+		{"-10,000", Comma(-10000), "-10,000"},
+		{"-1,000", Comma(-1000), "-1,000"},
+		{"-100", Comma(-100), "-100"},
+		{"-10", Comma(-10), "-10"},
+	}.validate(t)
+}
+
+func TestCommafs(t *testing.T) {
+	testList{
+		{"0", Commaf(0), "0"},
+		{"10.11", Commaf(10.11), "10.11"},
+		{"100", Commaf(100), "100"},
+		{"1,000", Commaf(1000), "1,000"},
+		{"10,000", Commaf(10000), "10,000"},
+		{"100,000", Commaf(100000), "100,000"},
+		{"834,142.32", Commaf(834142.32), "834,142.32"},
+		{"10,000,000", Commaf(10000000), "10,000,000"},
+		{"10,100,000", Commaf(10100000), "10,100,000"},
+		{"10,010,000", Commaf(10010000), "10,010,000"},
+		{"10,001,000", Commaf(10001000), "10,001,000"},
+		{"123,456,789", Commaf(123456789), "123,456,789"},
+		{"maxf64", Commaf(math.MaxFloat64), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"},
+		{"minf64", Commaf(math.SmallestNonzeroFloat64), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"},
+		{"-123,456,789", Commaf(-123456789), "-123,456,789"},
+		{"-10,100,000", Commaf(-10100000), "-10,100,000"},
+		{"-10,010,000", Commaf(-10010000), "-10,010,000"},
+		{"-10,001,000", Commaf(-10001000), "-10,001,000"},
+		{"-10,000,000", Commaf(-10000000), "-10,000,000"},
+		{"-100,000", Commaf(-100000), "-100,000"},
+		{"-10,000", Commaf(-10000), "-10,000"},
+		{"-1,000", Commaf(-1000), "-1,000"},
+		{"-100.11", Commaf(-100.11), "-100.11"},
+		{"-10", Commaf(-10), "-10"},
+	}.validate(t)
+}
+
+func BenchmarkCommas(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Comma(1234567890)
+	}
+}
+
+func BenchmarkCommaf(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Commaf(1234567890.83584)
+	}
+}
+
+func BenchmarkBigCommas(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		BigComma(big.NewInt(1234567890))
+	}
+}
+
+func bigComma(i int64) string {
+	return BigComma(big.NewInt(i))
+}
+
+func TestBigCommas(t *testing.T) {
+	testList{
+		{"0", bigComma(0), "0"},
+		{"10", bigComma(10), "10"},
+		{"100", bigComma(100), "100"},
+		{"1,000", bigComma(1000), "1,000"},
+		{"10,000", bigComma(10000), "10,000"},
+		{"100,000", bigComma(100000), "100,000"},
+		{"10,000,000", bigComma(10000000), "10,000,000"},
+		{"10,100,000", bigComma(10100000), "10,100,000"},
+		{"10,010,000", bigComma(10010000), "10,010,000"},
+		{"10,001,000", bigComma(10001000), "10,001,000"},
+		{"123,456,789", bigComma(123456789), "123,456,789"},
+		{"maxint", bigComma(9.223372e+18), "9,223,372,000,000,000,000"},
+		{"minint", bigComma(-9.223372e+18), "-9,223,372,000,000,000,000"},
+		{"-123,456,789", bigComma(-123456789), "-123,456,789"},
+		{"-10,100,000", bigComma(-10100000), "-10,100,000"},
+		{"-10,010,000", bigComma(-10010000), "-10,010,000"},
+		{"-10,001,000", bigComma(-10001000), "-10,001,000"},
+		{"-10,000,000", bigComma(-10000000), "-10,000,000"},
+		{"-100,000", bigComma(-100000), "-100,000"},
+		{"-10,000", bigComma(-10000), "-10,000"},
+		{"-1,000", bigComma(-1000), "-1,000"},
+		{"-100", bigComma(-100), "-100"},
+		{"-10", bigComma(-10), "-10"},
+	}.validate(t)
+}
+
+func TestVeryBigCommas(t *testing.T) {
+	tests := []struct{ in, exp string }{
+		{
+			"84889279597249724975972597249849757294578485",
+			"84,889,279,597,249,724,975,972,597,249,849,757,294,578,485",
+		},
+		{
+			"-84889279597249724975972597249849757294578485",
+			"-84,889,279,597,249,724,975,972,597,249,849,757,294,578,485",
+		},
+	}
+	for _, test := range tests {
+		n, _ := (&big.Int{}).SetString(test.in, 10)
+		got := BigComma(n)
+		if test.exp != got {
+			t.Errorf("Expected %q, got %q", test.exp, got)
+		}
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 0000000000000000000000000000000000000000..620690dec7dd63b58e03dbc8b4ae23d6cf793a3d
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,40 @@
+// +build go1.6
+
+package humanize
+
+import (
+	"bytes"
+	"math/big"
+	"strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+	buf := &bytes.Buffer{}
+	if v.Sign() < 0 {
+		buf.Write([]byte{'-'})
+		v.Abs(v)
+	}
+
+	comma := []byte{','}
+
+	parts := strings.Split(v.Text('f', -1), ".")
+	pos := 0
+	if len(parts[0])%3 != 0 {
+		pos += len(parts[0]) % 3
+		buf.WriteString(parts[0][:pos])
+		buf.Write(comma)
+	}
+	for ; pos < len(parts[0]); pos += 3 {
+		buf.WriteString(parts[0][pos : pos+3])
+		buf.Write(comma)
+	}
+	buf.Truncate(buf.Len() - 1)
+
+	if len(parts) > 1 {
+		buf.Write([]byte{'.'})
+		buf.WriteString(parts[1])
+	}
+	return buf.String()
+}
diff --git a/vendor/github.com/dustin/go-humanize/commaf_test.go b/vendor/github.com/dustin/go-humanize/commaf_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..21f7f9e5f799662cf5e6f1e80c1d061b2d88a810
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf_test.go
@@ -0,0 +1,44 @@
+// +build go1.6
+
+package humanize
+
+import (
+	"math"
+	"math/big"
+	"testing"
+)
+
+func BenchmarkBigCommaf(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Commaf(1234567890.83584)
+	}
+}
+
+func TestBigCommafs(t *testing.T) {
+	testList{
+		{"0", BigCommaf(big.NewFloat(0)), "0"},
+		{"10.11", BigCommaf(big.NewFloat(10.11)), "10.11"},
+		{"100", BigCommaf(big.NewFloat(100)), "100"},
+		{"1,000", BigCommaf(big.NewFloat(1000)), "1,000"},
+		{"10,000", BigCommaf(big.NewFloat(10000)), "10,000"},
+		{"100,000", BigCommaf(big.NewFloat(100000)), "100,000"},
+		{"834,142.32", BigCommaf(big.NewFloat(834142.32)), "834,142.32"},
+		{"10,000,000", BigCommaf(big.NewFloat(10000000)), "10,000,000"},
+		{"10,100,000", BigCommaf(big.NewFloat(10100000)), "10,100,000"},
+		{"10,010,000", BigCommaf(big.NewFloat(10010000)), "10,010,000"},
+		{"10,001,000", BigCommaf(big.NewFloat(10001000)), "10,001,000"},
+		{"123,456,789", BigCommaf(big.NewFloat(123456789)), "123,456,789"},
+		{"maxf64", BigCommaf(big.NewFloat(math.MaxFloat64)), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"},
+		{"minf64", BigCommaf(big.NewFloat(math.SmallestNonzeroFloat64)), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004940656458412465"},
+		{"-123,456,789", BigCommaf(big.NewFloat(-123456789)), "-123,456,789"},
+		{"-10,100,000", BigCommaf(big.NewFloat(-10100000)), "-10,100,000"},
+		{"-10,010,000", BigCommaf(big.NewFloat(-10010000)), "-10,010,000"},
+		{"-10,001,000", BigCommaf(big.NewFloat(-10001000)), "-10,001,000"},
+		{"-10,000,000", BigCommaf(big.NewFloat(-10000000)), "-10,000,000"},
+		{"-100,000", BigCommaf(big.NewFloat(-100000)), "-100,000"},
+		{"-10,000", BigCommaf(big.NewFloat(-10000)), "-10,000"},
+		{"-1,000", BigCommaf(big.NewFloat(-1000)), "-1,000"},
+		{"-100.11", BigCommaf(big.NewFloat(-100.11)), "-100.11"},
+		{"-10", BigCommaf(big.NewFloat(-10)), "-10"},
+	}.validate(t)
+}
diff --git a/vendor/github.com/dustin/go-humanize/common_test.go b/vendor/github.com/dustin/go-humanize/common_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc7db151640401c8b56bce644e294076bdff9fe0
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/common_test.go
@@ -0,0 +1,18 @@
+package humanize
+
+import (
+	"testing"
+)
+
+type testList []struct {
+	name, got, exp string
+}
+
+func (tl testList) validate(t *testing.T) {
+	for _, test := range tl {
+		if test.got != test.exp {
+			t.Errorf("On %v, expected '%v', but got '%v'",
+				test.name, test.exp, test.got)
+		}
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/english/words.go b/vendor/github.com/dustin/go-humanize/english/words.go
new file mode 100644
index 0000000000000000000000000000000000000000..26e9918bdd390c528fe0b072694db1c6ddb5619b
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/english/words.go
@@ -0,0 +1,96 @@
+// Package english provides utilities to generate more user-friendly English output.
+package english
+
+import (
+	"fmt"
+	"strings"
+)
+
+// These are included because they are common technical terms.
+var specialPlurals = map[string]string{
+	"index":  "indices",
+	"matrix": "matrices",
+	"vertex": "vertices",
+}
+
+var sibilantEndings = []string{"s", "sh", "tch", "x"}
+
+var isVowel = map[byte]bool{
+	'A': true, 'E': true, 'I': true, 'O': true, 'U': true,
+	'a': true, 'e': true, 'i': true, 'o': true, 'u': true,
+}
+
+// PluralWord builds the plural form of an English word.
+// The simple English rules of regular pluralization will be used
+// if the plural form is an empty string (i.e. not explicitly given).
+// The special cases are not guaranteed to work for strings outside ASCII.
+func PluralWord(quantity int, singular, plural string) string {
+	if quantity == 1 {
+		return singular
+	}
+	if plural != "" {
+		return plural
+	}
+	if plural = specialPlurals[singular]; plural != "" {
+		return plural
+	}
+
+	// We need to guess what the English plural might be.  Keep this
+	// function simple!  It doesn't need to know about every possiblity;
+	// only regular rules and the most common special cases.
+	//
+	// Reference: http://en.wikipedia.org/wiki/English_plural
+
+	for _, ending := range sibilantEndings {
+		if strings.HasSuffix(singular, ending) {
+			return singular + "es"
+		}
+	}
+	l := len(singular)
+	if l >= 2 && singular[l-1] == 'o' && !isVowel[singular[l-2]] {
+		return singular + "es"
+	}
+	if l >= 2 && singular[l-1] == 'y' && !isVowel[singular[l-2]] {
+		return singular[:l-1] + "ies"
+	}
+
+	return singular + "s"
+}
+
+// Plural formats an integer and a string into a single pluralized string.
+// The simple English rules of regular pluralization will be used
+// if the plural form is an empty string (i.e. not explicitly given).
+func Plural(quantity int, singular, plural string) string {
+	return fmt.Sprintf("%d %s", quantity, PluralWord(quantity, singular, plural))
+}
+
+// WordSeries converts a list of words into a word series in English.
+// It returns a string containing all the given words separated by commas,
+// the coordinating conjunction, and a serial comma, as appropriate.
+func WordSeries(words []string, conjunction string) string {
+	switch len(words) {
+	case 0:
+		return ""
+	case 1:
+		return words[0]
+	default:
+		return fmt.Sprintf("%s %s %s", strings.Join(words[:len(words)-1], ", "), conjunction, words[len(words)-1])
+	}
+}
+
+// OxfordWordSeries converts a list of words into a word series in English,
+// using an Oxford comma (https://en.wikipedia.org/wiki/Serial_comma). It
+// returns a string containing all the given words separated by commas, the
+// coordinating conjunction, and a serial comma, as appropriate.
+func OxfordWordSeries(words []string, conjunction string) string {
+	switch len(words) {
+	case 0:
+		return ""
+	case 1:
+		return words[0]
+	case 2:
+		return strings.Join(words, fmt.Sprintf(" %s ", conjunction))
+	default:
+		return fmt.Sprintf("%s, %s %s", strings.Join(words[:len(words)-1], ", "), conjunction, words[len(words)-1])
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/english/words_test.go b/vendor/github.com/dustin/go-humanize/english/words_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c1d7bea6053154ebeab2f6be29fee0c5c148fb1
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/english/words_test.go
@@ -0,0 +1,94 @@
+package english
+
+import (
+	"testing"
+)
+
+func TestPluralWord(t *testing.T) {
+	tests := []struct {
+		n                int
+		singular, plural string
+		want             string
+	}{
+		{0, "object", "", "objects"},
+		{1, "object", "", "object"},
+		{-1, "object", "", "objects"},
+		{42, "object", "", "objects"},
+		{2, "vax", "vaxen", "vaxen"},
+
+		// special cases
+		{2, "index", "", "indices"},
+
+		// ending in a sibilant sound
+		{2, "bus", "", "buses"},
+		{2, "bush", "", "bushes"},
+		{2, "watch", "", "watches"},
+		{2, "box", "", "boxes"},
+
+		// ending with 'o' preceded by a consonant
+		{2, "hero", "", "heroes"},
+
+		// ending with 'y' preceded by a consonant
+		{2, "lady", "", "ladies"},
+		{2, "day", "", "days"},
+	}
+	for _, tt := range tests {
+		if got := PluralWord(tt.n, tt.singular, tt.plural); got != tt.want {
+			t.Errorf("PluralWord(%d, %q, %q)=%q; want: %q", tt.n, tt.singular, tt.plural, got, tt.want)
+		}
+	}
+}
+
+func TestPlural(t *testing.T) {
+	tests := []struct {
+		n                int
+		singular, plural string
+		want             string
+	}{
+		{1, "object", "", "1 object"},
+		{42, "object", "", "42 objects"},
+	}
+	for _, tt := range tests {
+		if got := Plural(tt.n, tt.singular, tt.plural); got != tt.want {
+			t.Errorf("Plural(%d, %q, %q)=%q; want: %q", tt.n, tt.singular, tt.plural, got, tt.want)
+		}
+	}
+}
+
+func TestWordSeries(t *testing.T) {
+	tests := []struct {
+		words       []string
+		conjunction string
+		want        string
+	}{
+		{[]string{}, "and", ""},
+		{[]string{"foo"}, "and", "foo"},
+		{[]string{"foo", "bar"}, "and", "foo and bar"},
+		{[]string{"foo", "bar", "baz"}, "and", "foo, bar and baz"},
+		{[]string{"foo", "bar", "baz"}, "or", "foo, bar or baz"},
+	}
+	for _, tt := range tests {
+		if got := WordSeries(tt.words, tt.conjunction); got != tt.want {
+			t.Errorf("WordSeries(%q, %q)=%q; want: %q", tt.words, tt.conjunction, got, tt.want)
+		}
+	}
+}
+
+func TestOxfordWordSeries(t *testing.T) {
+	tests := []struct {
+		words       []string
+		conjunction string
+		want        string
+	}{
+		{[]string{}, "and", ""},
+		{[]string{"foo"}, "and", "foo"},
+		{[]string{"foo", "bar"}, "and", "foo and bar"},
+		{[]string{"foo", "bar", "baz"}, "and", "foo, bar, and baz"},
+		{[]string{"foo", "bar", "baz"}, "or", "foo, bar, or baz"},
+	}
+	for _, tt := range tests {
+		if got := OxfordWordSeries(tt.words, tt.conjunction); got != tt.want {
+			t.Errorf("OxfordWordSeries(%q, %q)=%q; want: %q", tt.words, tt.conjunction, got, tt.want)
+		}
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 0000000000000000000000000000000000000000..c76190b106716add410d2b5a5f66d0ed0719f102
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,23 @@
+package humanize
+
+import "strconv"
+
+func stripTrailingZeros(s string) string {
+	offset := len(s) - 1
+	for offset > 0 {
+		if s[offset] == '.' {
+			offset--
+			break
+		}
+		if s[offset] != '0' {
+			break
+		}
+		offset--
+	}
+	return s[:offset+1]
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+	return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa_test.go b/vendor/github.com/dustin/go-humanize/ftoa_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..276d411b769cbc73935bf234a188ee3e6f8cf57c
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa_test.go
@@ -0,0 +1,55 @@
+package humanize
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"testing"
+)
+
+func TestFtoa(t *testing.T) {
+	testList{
+		{"200", Ftoa(200), "200"},
+		{"2", Ftoa(2), "2"},
+		{"2.2", Ftoa(2.2), "2.2"},
+		{"2.02", Ftoa(2.02), "2.02"},
+		{"200.02", Ftoa(200.02), "200.02"},
+	}.validate(t)
+}
+
+func BenchmarkFtoaRegexTrailing(b *testing.B) {
+	trailingZerosRegex := regexp.MustCompile(`\.?0+$`)
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		trailingZerosRegex.ReplaceAllString("2.00000", "")
+		trailingZerosRegex.ReplaceAllString("2.0000", "")
+		trailingZerosRegex.ReplaceAllString("2.000", "")
+		trailingZerosRegex.ReplaceAllString("2.00", "")
+		trailingZerosRegex.ReplaceAllString("2.0", "")
+		trailingZerosRegex.ReplaceAllString("2", "")
+	}
+}
+
+func BenchmarkFtoaFunc(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		stripTrailingZeros("2.00000")
+		stripTrailingZeros("2.0000")
+		stripTrailingZeros("2.000")
+		stripTrailingZeros("2.00")
+		stripTrailingZeros("2.0")
+		stripTrailingZeros("2")
+	}
+}
+
+func BenchmarkFmtF(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		_ = fmt.Sprintf("%f", 2.03584)
+	}
+}
+
+func BenchmarkStrconvF(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		strconv.FormatFloat(2.03584, 'f', 6, 64)
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 0000000000000000000000000000000000000000..a2c2da31ef1ad80b899607322395893bce9365e1
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 0000000000000000000000000000000000000000..dec618659969c41ac575428218b033d37d191997
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
+package humanize
+
+/*
+Slightly adapted from the source to fit go-humanize.
+
+Author: https://github.com/gorhill
+Source: https://gist.github.com/gorhill/5285193
+
+*/
+
+import (
+	"math"
+	"strconv"
+)
+
+var (
+	renderFloatPrecisionMultipliers = [...]float64{
+		1,
+		10,
+		100,
+		1000,
+		10000,
+		100000,
+		1000000,
+		10000000,
+		100000000,
+		1000000000,
+	}
+
+	renderFloatPrecisionRounders = [...]float64{
+		0.5,
+		0.05,
+		0.005,
+		0.0005,
+		0.00005,
+		0.000005,
+		0.0000005,
+		0.00000005,
+		0.000000005,
+		0.0000000005,
+	}
+)
+
+// FormatFloat produces a formatted number as string based on the following user-specified criteria:
+// * thousands separator
+// * decimal separator
+// * decimal precision
+//
+// Usage: s := RenderFloat(format, n)
+// The format parameter tells how to render the number n.
+//
+// See examples: http://play.golang.org/p/LXc1Ddm1lJ
+//
+// Examples of format strings, given n = 12345.6789:
+// "#,###.##" => "12,345.67"
+// "#,###." => "12,345"
+// "#,###" => "12345,678"
+// "#\u202F###,##" => "12 345,68"
+// "#.###,###### => 12.345,678900
+// "" (aka default format) => 12,345.67
+//
+// The highest precision allowed is 9 digits after the decimal symbol.
+// There is also a version for integer number, FormatInteger(),
+// which is convenient for calls within template.
+func FormatFloat(format string, n float64) string {
+	// Special cases:
+	//   NaN = "NaN"
+	//   +Inf = "+Infinity"
+	//   -Inf = "-Infinity"
+	if math.IsNaN(n) {
+		return "NaN"
+	}
+	if n > math.MaxFloat64 {
+		return "Infinity"
+	}
+	if n < -math.MaxFloat64 {
+		return "-Infinity"
+	}
+
+	// default format
+	precision := 2
+	decimalStr := "."
+	thousandStr := ","
+	positiveStr := ""
+	negativeStr := "-"
+
+	if len(format) > 0 {
+		format := []rune(format)
+
+		// If there is an explicit format directive,
+		// then default values are these:
+		precision = 9
+		thousandStr = ""
+
+		// collect indices of meaningful formatting directives
+		formatIndx := []int{}
+		for i, char := range format {
+			if char != '#' && char != '0' {
+				formatIndx = append(formatIndx, i)
+			}
+		}
+
+		if len(formatIndx) > 0 {
+			// Directive at index 0:
+			//   Must be a '+'
+			//   Raise an error if not the case
+			// index: 0123456789
+			//        +0.000,000
+			//        +000,000.0
+			//        +0000.00
+			//        +0000
+			if formatIndx[0] == 0 {
+				if format[formatIndx[0]] != '+' {
+					panic("RenderFloat(): invalid positive sign directive")
+				}
+				positiveStr = "+"
+				formatIndx = formatIndx[1:]
+			}
+
+			// Two directives:
+			//   First is thousands separator
+			//   Raise an error if not followed by 3-digit
+			// 0123456789
+			// 0.000,000
+			// 000,000.00
+			if len(formatIndx) == 2 {
+				if (formatIndx[1] - formatIndx[0]) != 4 {
+					panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
+				}
+				thousandStr = string(format[formatIndx[0]])
+				formatIndx = formatIndx[1:]
+			}
+
+			// One directive:
+			//   Directive is decimal separator
+			//   The number of digit-specifier following the separator indicates wanted precision
+			// 0123456789
+			// 0.00
+			// 000,0000
+			if len(formatIndx) == 1 {
+				decimalStr = string(format[formatIndx[0]])
+				precision = len(format) - formatIndx[0] - 1
+			}
+		}
+	}
+
+	// generate sign part
+	var signStr string
+	if n >= 0.000000001 {
+		signStr = positiveStr
+	} else if n <= -0.000000001 {
+		signStr = negativeStr
+		n = -n
+	} else {
+		signStr = ""
+		n = 0.0
+	}
+
+	// split number into integer and fractional parts
+	intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
+
+	// generate integer part string
+	intStr := strconv.FormatInt(int64(intf), 10)
+
+	// add thousand separator if required
+	if len(thousandStr) > 0 {
+		for i := len(intStr); i > 3; {
+			i -= 3
+			intStr = intStr[:i] + thousandStr + intStr[i:]
+		}
+	}
+
+	// no fractional part, we can leave now
+	if precision == 0 {
+		return signStr + intStr
+	}
+
+	// generate fractional part
+	fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
+	// may need padding
+	if len(fracStr) < precision {
+		fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
+	}
+
+	return signStr + intStr + decimalStr + fracStr
+}
+
+// FormatInteger produces a formatted number as string.
+// See FormatFloat.
+func FormatInteger(format string, n int) string {
+	return FormatFloat(format, float64(n))
+}
diff --git a/vendor/github.com/dustin/go-humanize/number_test.go b/vendor/github.com/dustin/go-humanize/number_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..516f3378d63cb87ac8cc72434186c96d54f5bb48
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number_test.go
@@ -0,0 +1,79 @@
+package humanize
+
+import (
+	"math"
+	"testing"
+)
+
+type TestStruct struct {
+	name      string
+	format    string
+	num       float64
+	formatted string
+}
+
+func TestFormatFloat(t *testing.T) {
+	tests := []TestStruct{
+		{"default", "", 12345.6789, "12,345.68"},
+		{"#", "#", 12345.6789, "12345.678900000"},
+		{"#.", "#.", 12345.6789, "12346"},
+		{"#,#", "#,#", 12345.6789, "12345,7"},
+		{"#,##", "#,##", 12345.6789, "12345,68"},
+		{"#,###", "#,###", 12345.6789, "12345,679"},
+		{"#,###.", "#,###.", 12345.6789, "12,346"},
+		{"#,###.##", "#,###.##", 12345.6789, "12,345.68"},
+		{"#,###.###", "#,###.###", 12345.6789, "12,345.679"},
+		{"#,###.####", "#,###.####", 12345.6789, "12,345.6789"},
+		{"#.###,######", "#.###,######", 12345.6789, "12.345,678900"},
+		{"bug46", "#,###.##", 52746220055.92342, "52,746,220,055.92"},
+		{"#\u202f###,##", "#\u202f###,##", 12345.6789, "12 345,68"},
+
+		// special cases
+		{"NaN", "#", math.NaN(), "NaN"},
+		{"+Inf", "#", math.Inf(1), "Infinity"},
+		{"-Inf", "#", math.Inf(-1), "-Infinity"},
+		{"signStr <= -0.000000001", "", -0.000000002, "-0.00"},
+		{"signStr = 0", "", 0, "0.00"},
+		{"Format directive must start with +", "+000", 12345.6789, "+12345.678900000"},
+	}
+
+	for _, test := range tests {
+		got := FormatFloat(test.format, test.num)
+		if got != test.formatted {
+			t.Errorf("On %v (%v, %v), got %v, wanted %v",
+				test.name, test.format, test.num, got, test.formatted)
+		}
+	}
+	// Test a single integer
+	got := FormatInteger("#", 12345)
+	if got != "12345.000000000" {
+		t.Errorf("On %v (%v, %v), got %v, wanted %v",
+			"integerTest", "#", 12345, got, "12345.000000000")
+	}
+	// Test the things that could panic
+	panictests := []TestStruct{
+		{"RenderFloat(): invalid positive sign directive", "-", 12345.6789, "12,345.68"},
+		{"RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers", "0.01", 12345.6789, "12,345.68"},
+	}
+	for _, test := range panictests {
+		didPanic := false
+		var message interface{}
+		func() {
+
+			defer func() {
+				if message = recover(); message != nil {
+					didPanic = true
+				}
+			}()
+
+			// call the target function
+			_ = FormatFloat(test.format, test.num)
+
+		}()
+		if didPanic != true {
+			t.Errorf("On %v, should have panic and did not.",
+				test.name)
+		}
+	}
+
+}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 0000000000000000000000000000000000000000..43d88a861950eac85b0f742a59621f92345d7109
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
+package humanize
+
+import "strconv"
+
+// Ordinal gives you the input number in a rank/ordinal format.
+//
+// Ordinal(3) -> 3rd
+func Ordinal(x int) string {
+	suffix := "th"
+	switch x % 10 {
+	case 1:
+		if x%100 != 11 {
+			suffix = "st"
+		}
+	case 2:
+		if x%100 != 12 {
+			suffix = "nd"
+		}
+	case 3:
+		if x%100 != 13 {
+			suffix = "rd"
+		}
+	}
+	return strconv.Itoa(x) + suffix
+}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals_test.go b/vendor/github.com/dustin/go-humanize/ordinals_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..51d85ee7a0b4c8d7876cbccce63a24f027b47970
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals_test.go
@@ -0,0 +1,22 @@
+package humanize
+
+import (
+	"testing"
+)
+
+func TestOrdinals(t *testing.T) {
+	testList{
+		{"0", Ordinal(0), "0th"},
+		{"1", Ordinal(1), "1st"},
+		{"2", Ordinal(2), "2nd"},
+		{"3", Ordinal(3), "3rd"},
+		{"4", Ordinal(4), "4th"},
+		{"10", Ordinal(10), "10th"},
+		{"11", Ordinal(11), "11th"},
+		{"12", Ordinal(12), "12th"},
+		{"13", Ordinal(13), "13th"},
+		{"101", Ordinal(101), "101st"},
+		{"102", Ordinal(102), "102nd"},
+		{"103", Ordinal(103), "103rd"},
+	}.validate(t)
+}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 0000000000000000000000000000000000000000..b24e48169f4f9f042d8d7e01c8d3410f5aa56b33
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,113 @@
+package humanize
+
+import (
+	"errors"
+	"math"
+	"regexp"
+	"strconv"
+)
+
+var siPrefixTable = map[float64]string{
+	-24: "y", // yocto
+	-21: "z", // zepto
+	-18: "a", // atto
+	-15: "f", // femto
+	-12: "p", // pico
+	-9:  "n", // nano
+	-6:  "µ", // micro
+	-3:  "m", // milli
+	0:   "",
+	3:   "k", // kilo
+	6:   "M", // mega
+	9:   "G", // giga
+	12:  "T", // tera
+	15:  "P", // peta
+	18:  "E", // exa
+	21:  "Z", // zetta
+	24:  "Y", // yotta
+}
+
+var revSIPrefixTable = revfmap(siPrefixTable)
+
+// revfmap reverses the map and precomputes the power multiplier
+func revfmap(in map[float64]string) map[string]float64 {
+	rv := map[string]float64{}
+	for k, v := range in {
+		rv[v] = math.Pow(10, k)
+	}
+	return rv
+}
+
+var riParseRegex *regexp.Regexp
+
+func init() {
+	ri := `^([\-0-9.]+)\s?([`
+	for _, v := range siPrefixTable {
+		ri += v
+	}
+	ri += `]?)(.*)`
+
+	riParseRegex = regexp.MustCompile(ri)
+}
+
+// ComputeSI finds the most appropriate SI prefix for the given number
+// and returns the prefix along with the value adjusted to be within
+// that prefix.
+//
+// See also: SI, ParseSI.
+//
+// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
+func ComputeSI(input float64) (float64, string) {
+	if input == 0 {
+		return 0, ""
+	}
+	mag := math.Abs(input)
+	exponent := math.Floor(logn(mag, 10))
+	exponent = math.Floor(exponent/3) * 3
+
+	value := mag / math.Pow(10, exponent)
+
+	// Handle special case where value is exactly 1000.0
+	// Should return 1 M instead of 1000 k
+	if value == 1000.0 {
+		exponent += 3
+		value = mag / math.Pow(10, exponent)
+	}
+
+	value = math.Copysign(value, input)
+
+	prefix := siPrefixTable[exponent]
+	return value, prefix
+}
+
+// SI returns a string with default formatting.
+//
+// SI uses Ftoa to format float value, removing trailing zeros.
+//
+// See also: ComputeSI, ParseSI.
+//
+// e.g. SI(1000000, "B") -> 1 MB
+// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
+func SI(input float64, unit string) string {
+	value, prefix := ComputeSI(input)
+	return Ftoa(value) + " " + prefix + unit
+}
+
+var errInvalid = errors.New("invalid input")
+
+// ParseSI parses an SI string back into the number and unit.
+//
+// See also: SI, ComputeSI.
+//
+// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
+func ParseSI(input string) (float64, string, error) {
+	found := riParseRegex.FindStringSubmatch(input)
+	if len(found) != 4 {
+		return 0, "", errInvalid
+	}
+	mag := revSIPrefixTable[found[2]]
+	unit := found[3]
+
+	base, err := strconv.ParseFloat(found[1], 64)
+	return base * mag, unit, err
+}
diff --git a/vendor/github.com/dustin/go-humanize/si_test.go b/vendor/github.com/dustin/go-humanize/si_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..bc5bac66984671c418490fd863e58424fae84d25
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si_test.go
@@ -0,0 +1,101 @@
+package humanize
+
+import (
+	"math"
+	"testing"
+)
+
+func TestSI(t *testing.T) {
+	tests := []struct {
+		name      string
+		num       float64
+		formatted string
+	}{
+		{"e-24", 1e-24, "1 yF"},
+		{"e-21", 1e-21, "1 zF"},
+		{"e-18", 1e-18, "1 aF"},
+		{"e-15", 1e-15, "1 fF"},
+		{"e-12", 1e-12, "1 pF"},
+		{"e-12", 2.2345e-12, "2.2345 pF"},
+		{"e-12", 2.23e-12, "2.23 pF"},
+		{"e-11", 2.23e-11, "22.3 pF"},
+		{"e-10", 2.2e-10, "220 pF"},
+		{"e-9", 2.2e-9, "2.2 nF"},
+		{"e-8", 2.2e-8, "22 nF"},
+		{"e-7", 2.2e-7, "220 nF"},
+		{"e-6", 2.2e-6, "2.2 µF"},
+		{"e-6", 1e-6, "1 µF"},
+		{"e-5", 2.2e-5, "22 µF"},
+		{"e-4", 2.2e-4, "220 µF"},
+		{"e-3", 2.2e-3, "2.2 mF"},
+		{"e-2", 2.2e-2, "22 mF"},
+		{"e-1", 2.2e-1, "220 mF"},
+		{"e+0", 2.2e-0, "2.2 F"},
+		{"e+0", 2.2, "2.2 F"},
+		{"e+1", 2.2e+1, "22 F"},
+		{"0", 0, "0 F"},
+		{"e+1", 22, "22 F"},
+		{"e+2", 2.2e+2, "220 F"},
+		{"e+2", 220, "220 F"},
+		{"e+3", 2.2e+3, "2.2 kF"},
+		{"e+3", 2200, "2.2 kF"},
+		{"e+4", 2.2e+4, "22 kF"},
+		{"e+4", 22000, "22 kF"},
+		{"e+5", 2.2e+5, "220 kF"},
+		{"e+6", 2.2e+6, "2.2 MF"},
+		{"e+6", 1e+6, "1 MF"},
+		{"e+7", 2.2e+7, "22 MF"},
+		{"e+8", 2.2e+8, "220 MF"},
+		{"e+9", 2.2e+9, "2.2 GF"},
+		{"e+10", 2.2e+10, "22 GF"},
+		{"e+11", 2.2e+11, "220 GF"},
+		{"e+12", 2.2e+12, "2.2 TF"},
+		{"e+15", 2.2e+15, "2.2 PF"},
+		{"e+18", 2.2e+18, "2.2 EF"},
+		{"e+21", 2.2e+21, "2.2 ZF"},
+		{"e+24", 2.2e+24, "2.2 YF"},
+
+		// special case
+		{"1F", 1000 * 1000, "1 MF"},
+		{"1F", 1e6, "1 MF"},
+
+		// negative number
+		{"-100 F", -100, "-100 F"},
+	}
+
+	for _, test := range tests {
+		got := SI(test.num, "F")
+		if got != test.formatted {
+			t.Errorf("On %v (%v), got %v, wanted %v",
+				test.name, test.num, got, test.formatted)
+		}
+
+		gotf, gotu, err := ParseSI(test.formatted)
+		if err != nil {
+			t.Errorf("Error parsing %v (%v): %v", test.name, test.formatted, err)
+			continue
+		}
+
+		if math.Abs(1-(gotf/test.num)) > 0.01 {
+			t.Errorf("On %v (%v), got %v, wanted %v (±%v)",
+				test.name, test.formatted, gotf, test.num,
+				math.Abs(1-(gotf/test.num)))
+		}
+		if gotu != "F" {
+			t.Errorf("On %v (%v), expected unit F, got %v",
+				test.name, test.formatted, gotu)
+		}
+	}
+
+	// Parse error
+	gotf, gotu, err := ParseSI("x1.21JW") // 1.21 jigga whats
+	if err == nil {
+		t.Errorf("Expected error on x1.21JW, got %v %v", gotf, gotu)
+	}
+}
+
+func BenchmarkParseSI(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		ParseSI("2.2346ZB")
+	}
+}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd3fbf5efc0cc20d715748c4958b03d55f74d525
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
+package humanize
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"time"
+)
+
+// Seconds-based time units
+const (
+	Day      = 24 * time.Hour
+	Week     = 7 * Day
+	Month    = 30 * Day
+	Year     = 12 * Month
+	LongTime = 37 * Year
+)
+
+// Time formats a time into a relative string.
+//
+// Time(someT) -> "3 weeks ago"
+func Time(then time.Time) string {
+	return RelTime(then, time.Now(), "ago", "from now")
+}
+
+// A RelTimeMagnitude struct contains a relative time point at which
+// the relative format of time will switch to a new format string.  A
+// slice of these in ascending order by their "D" field is passed to
+// CustomRelTime to format durations.
+//
+// The Format field is a string that may contain a "%s" which will be
+// replaced with the appropriate signed label (e.g. "ago" or "from
+// now") and a "%d" that will be replaced by the quantity.
+//
+// The DivBy field is the amount of time the time difference must be
+// divided by in order to display correctly.
+//
+// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
+// DivBy should be time.Minute so whatever the duration is will be
+// expressed in minutes.
+type RelTimeMagnitude struct {
+	D      time.Duration
+	Format string
+	DivBy  time.Duration
+}
+
+var defaultMagnitudes = []RelTimeMagnitude{
+	{time.Second, "now", time.Second},
+	{2 * time.Second, "1 second %s", 1},
+	{time.Minute, "%d seconds %s", time.Second},
+	{2 * time.Minute, "1 minute %s", 1},
+	{time.Hour, "%d minutes %s", time.Minute},
+	{2 * time.Hour, "1 hour %s", 1},
+	{Day, "%d hours %s", time.Hour},
+	{2 * Day, "1 day %s", 1},
+	{Week, "%d days %s", Day},
+	{2 * Week, "1 week %s", 1},
+	{Month, "%d weeks %s", Week},
+	{2 * Month, "1 month %s", 1},
+	{Year, "%d months %s", Month},
+	{18 * Month, "1 year %s", 1},
+	{2 * Year, "2 years %s", 1},
+	{LongTime, "%d years %s", Year},
+	{math.MaxInt64, "a long while %s", 1},
+}
+
+// RelTime formats a time into a relative string.
+//
+// It takes two times and two labels.  In addition to the generic time
+// delta string (e.g. 5 minutes), the labels are used applied so that
+// the label corresponding to the smaller time is applied.
+//
+// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
+func RelTime(a, b time.Time, albl, blbl string) string {
+	return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
+}
+
+// CustomRelTime formats a time into a relative string.
+//
+// It takes two times two labels and a table of relative time formats.
+// In addition to the generic time delta string (e.g. 5 minutes), the
+// labels are used applied so that the label corresponding to the
+// smaller time is applied.
+func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
+	lbl := albl
+	diff := b.Sub(a)
+
+	if a.After(b) {
+		lbl = blbl
+		diff = a.Sub(b)
+	}
+
+	n := sort.Search(len(magnitudes), func(i int) bool {
+		return magnitudes[i].D > diff
+	})
+
+	if n >= len(magnitudes) {
+		n = len(magnitudes) - 1
+	}
+	mag := magnitudes[n]
+	args := []interface{}{}
+	escaped := false
+	for _, ch := range mag.Format {
+		if escaped {
+			switch ch {
+			case 's':
+				args = append(args, lbl)
+			case 'd':
+				args = append(args, diff/mag.DivBy)
+			}
+			escaped = false
+		} else {
+			escaped = ch == '%'
+		}
+	}
+	return fmt.Sprintf(mag.Format, args...)
+}
diff --git a/vendor/github.com/dustin/go-humanize/times_test.go b/vendor/github.com/dustin/go-humanize/times_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b1ab8bf8288de6478b82e3013a922bb0c68fbb77
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times_test.go
@@ -0,0 +1,124 @@
+package humanize
+
+import (
+	"math"
+	"testing"
+	"time"
+)
+
+func TestPast(t *testing.T) {
+	now := time.Now()
+	testList{
+		{"now", Time(now), "now"},
+		{"1 second ago", Time(now.Add(-1 * time.Second)), "1 second ago"},
+		{"12 seconds ago", Time(now.Add(-12 * time.Second)), "12 seconds ago"},
+		{"30 seconds ago", Time(now.Add(-30 * time.Second)), "30 seconds ago"},
+		{"45 seconds ago", Time(now.Add(-45 * time.Second)), "45 seconds ago"},
+		{"1 minute ago", Time(now.Add(-63 * time.Second)), "1 minute ago"},
+		{"15 minutes ago", Time(now.Add(-15 * time.Minute)), "15 minutes ago"},
+		{"1 hour ago", Time(now.Add(-63 * time.Minute)), "1 hour ago"},
+		{"2 hours ago", Time(now.Add(-2 * time.Hour)), "2 hours ago"},
+		{"21 hours ago", Time(now.Add(-21 * time.Hour)), "21 hours ago"},
+		{"1 day ago", Time(now.Add(-26 * time.Hour)), "1 day ago"},
+		{"2 days ago", Time(now.Add(-49 * time.Hour)), "2 days ago"},
+		{"3 days ago", Time(now.Add(-3 * Day)), "3 days ago"},
+		{"1 week ago (1)", Time(now.Add(-7 * Day)), "1 week ago"},
+		{"1 week ago (2)", Time(now.Add(-12 * Day)), "1 week ago"},
+		{"2 weeks ago", Time(now.Add(-15 * Day)), "2 weeks ago"},
+		{"1 month ago", Time(now.Add(-39 * Day)), "1 month ago"},
+		{"3 months ago", Time(now.Add(-99 * Day)), "3 months ago"},
+		{"1 year ago (1)", Time(now.Add(-365 * Day)), "1 year ago"},
+		{"1 year ago (1)", Time(now.Add(-400 * Day)), "1 year ago"},
+		{"2 years ago (1)", Time(now.Add(-548 * Day)), "2 years ago"},
+		{"2 years ago (2)", Time(now.Add(-725 * Day)), "2 years ago"},
+		{"2 years ago (3)", Time(now.Add(-800 * Day)), "2 years ago"},
+		{"3 years ago", Time(now.Add(-3 * Year)), "3 years ago"},
+		{"long ago", Time(now.Add(-LongTime)), "a long while ago"},
+	}.validate(t)
+}
+
+func TestReltimeOffbyone(t *testing.T) {
+	testList{
+		{"1w-1", RelTime(time.Unix(0, 0), time.Unix(7*24*60*60, -1), "ago", ""), "6 days ago"},
+		{"1w±0", RelTime(time.Unix(0, 0), time.Unix(7*24*60*60, 0), "ago", ""), "1 week ago"},
+		{"1w+1", RelTime(time.Unix(0, 0), time.Unix(7*24*60*60, 1), "ago", ""), "1 week ago"},
+		{"2w-1", RelTime(time.Unix(0, 0), time.Unix(14*24*60*60, -1), "ago", ""), "1 week ago"},
+		{"2w±0", RelTime(time.Unix(0, 0), time.Unix(14*24*60*60, 0), "ago", ""), "2 weeks ago"},
+		{"2w+1", RelTime(time.Unix(0, 0), time.Unix(14*24*60*60, 1), "ago", ""), "2 weeks ago"},
+	}.validate(t)
+}
+
+func TestFuture(t *testing.T) {
+	// Add a little time so that these things properly line up in
+	// the future.
+	now := time.Now().Add(time.Millisecond * 250)
+	testList{
+		{"now", Time(now), "now"},
+		{"1 second from now", Time(now.Add(+1 * time.Second)), "1 second from now"},
+		{"12 seconds from now", Time(now.Add(+12 * time.Second)), "12 seconds from now"},
+		{"30 seconds from now", Time(now.Add(+30 * time.Second)), "30 seconds from now"},
+		{"45 seconds from now", Time(now.Add(+45 * time.Second)), "45 seconds from now"},
+		{"15 minutes from now", Time(now.Add(+15 * time.Minute)), "15 minutes from now"},
+		{"2 hours from now", Time(now.Add(+2 * time.Hour)), "2 hours from now"},
+		{"21 hours from now", Time(now.Add(+21 * time.Hour)), "21 hours from now"},
+		{"1 day from now", Time(now.Add(+26 * time.Hour)), "1 day from now"},
+		{"2 days from now", Time(now.Add(+49 * time.Hour)), "2 days from now"},
+		{"3 days from now", Time(now.Add(+3 * Day)), "3 days from now"},
+		{"1 week from now (1)", Time(now.Add(+7 * Day)), "1 week from now"},
+		{"1 week from now (2)", Time(now.Add(+12 * Day)), "1 week from now"},
+		{"2 weeks from now", Time(now.Add(+15 * Day)), "2 weeks from now"},
+		{"1 month from now", Time(now.Add(+30 * Day)), "1 month from now"},
+		{"1 year from now", Time(now.Add(+365 * Day)), "1 year from now"},
+		{"2 years from now", Time(now.Add(+2 * Year)), "2 years from now"},
+		{"a while from now", Time(now.Add(+LongTime)), "a long while from now"},
+	}.validate(t)
+}
+
+func TestRange(t *testing.T) {
+	start := time.Time{}
+	end := time.Unix(math.MaxInt64, math.MaxInt64)
+	x := RelTime(start, end, "ago", "from now")
+	if x != "a long while from now" {
+		t.Errorf("Expected a long while from now, got %q", x)
+	}
+}
+
+func TestCustomRelTime(t *testing.T) {
+	now := time.Now().Add(time.Millisecond * 250)
+	magnitudes := []RelTimeMagnitude{
+		{time.Second, "now", time.Second},
+		{2 * time.Second, "1 second %s", 1},
+		{time.Minute, "%d seconds %s", time.Second},
+		{Day - time.Second, "%d minutes %s", time.Minute},
+		{Day, "%d hours %s", time.Hour},
+		{2 * Day, "1 day %s", 1},
+		{Week, "%d days %s", Day},
+		{2 * Week, "1 week %s", 1},
+		{6 * Month, "%d weeks %s", Week},
+		{Year, "%d months %s", Month},
+	}
+	customRelTime := func(then time.Time) string {
+		return CustomRelTime(then, time.Now(), "ago", "from now", magnitudes)
+	}
+	testList{
+		{"now", customRelTime(now), "now"},
+		{"1 second from now", customRelTime(now.Add(+1 * time.Second)), "1 second from now"},
+		{"12 seconds from now", customRelTime(now.Add(+12 * time.Second)), "12 seconds from now"},
+		{"30 seconds from now", customRelTime(now.Add(+30 * time.Second)), "30 seconds from now"},
+		{"45 seconds from now", customRelTime(now.Add(+45 * time.Second)), "45 seconds from now"},
+		{"15 minutes from now", customRelTime(now.Add(+15 * time.Minute)), "15 minutes from now"},
+		{"2 hours from now", customRelTime(now.Add(+2 * time.Hour)), "120 minutes from now"},
+		{"21 hours from now", customRelTime(now.Add(+21 * time.Hour)), "1260 minutes from now"},
+		{"1 day from now", customRelTime(now.Add(+26 * time.Hour)), "1 day from now"},
+		{"2 days from now", customRelTime(now.Add(+49 * time.Hour)), "2 days from now"},
+		{"3 days from now", customRelTime(now.Add(+3 * Day)), "3 days from now"},
+		{"1 week from now (1)", customRelTime(now.Add(+7 * Day)), "1 week from now"},
+		{"1 week from now (2)", customRelTime(now.Add(+12 * Day)), "1 week from now"},
+		{"2 weeks from now", customRelTime(now.Add(+15 * Day)), "2 weeks from now"},
+		{"1 month from now", customRelTime(now.Add(+30 * Day)), "4 weeks from now"},
+		{"6 months from now", customRelTime(now.Add(+6*Month - time.Second)), "25 weeks from now"},
+		{"1 year from now", customRelTime(now.Add(+365 * Day)), "12 months from now"},
+		{"2 years from now", customRelTime(now.Add(+2 * Year)), "24 months from now"},
+		{"a while from now", customRelTime(now.Add(+LongTime)), "444 months from now"},
+	}.validate(t)
+}
diff --git a/vendor/github.com/go-ini/ini/section_test.go b/vendor/github.com/go-ini/ini/section_test.go
index 1f16cf2afeef386cd89a40389f71ce223b2c0fe7..80282c1979ff47a4a821203df71a3185933bc2a8 100644
--- a/vendor/github.com/go-ini/ini/section_test.go
+++ b/vendor/github.com/go-ini/ini/section_test.go
@@ -72,4 +72,4 @@ func Test_SectionRaw(t *testing.T) {
 111111111111111111100000000000111000000000 – end my lesson state data`)
 		})
 	})
-}
+}
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec554e4fe9ae585d930179d0a1a666f482cc3577
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/functional_tests.go
@@ -0,0 +1,4912 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net/http"
+	"net/url"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	minio "github.com/minio/minio-go"
+	log "github.com/sirupsen/logrus"
+
+	"github.com/dustin/go-humanize"
+	"github.com/minio/minio-go/pkg/encrypt"
+	"github.com/minio/minio-go/pkg/policy"
+)
+
+const (
+	sixtyFiveMiB   = 65 * humanize.MiByte // 65MiB
+	thirtyThreeKiB = 33 * humanize.KiByte // 33KiB
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+	letterIdxBits = 6                    // 6 bits to represent a letter index
+	letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+	letterIdxMax  = 63 / letterIdxBits   // # of letter indices fitting in 63 bits
+)
+const (
+	serverEndpoint = "SERVER_ENDPOINT"
+	accessKey      = "ACCESS_KEY"
+	secretKey      = "SECRET_KEY"
+	enableHTTPS    = "ENABLE_HTTPS"
+)
+
+type mintJSONFormatter struct {
+}
+
+func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
+	data := make(log.Fields, len(entry.Data))
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
+
+// log successful test runs
+func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
+	// calculate the test case duration
+	duration := time.Since(startTime)
+	// log with the fields as per mint
+	fields := log.Fields{"name": "minio-go", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "pass"}
+	return log.WithFields(fields)
+}
+
+// log failed test runs
+func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
+	// calculate the test case duration
+	duration := time.Since(startTime)
+	var fields log.Fields
+	// log with the fields as per mint
+	if err != nil {
+		fields = log.Fields{"name": "minio-go", "function": function, "args": args,
+			"duration": duration.Nanoseconds() / 1000000, "status": "fail", "alert": alert, "message": message, "error": err}
+	} else {
+		fields = log.Fields{"name": "minio-go", "function": function, "args": args,
+			"duration": duration.Nanoseconds() / 1000000, "status": "fail", "alert": alert, "message": message}
+	}
+	return log.WithFields(fields)
+}
+
+// log not applicable test runs
+func ignoredLog(function string, args map[string]interface{}, startTime time.Time, message string) *log.Entry {
+	// calculate the test case duration
+	duration := time.Since(startTime)
+	// log with the fields as per mint
+	fields := log.Fields{"name": "minio-go", "function": function, "args": args,
+		"duration": duration.Nanoseconds() / 1000000, "status": "na", "message": message}
+	return log.WithFields(fields)
+}
+
+func init() {
+	// If server endpoint is not set, all tests default to
+	// using https://play.minio.io:9000
+	if os.Getenv(serverEndpoint) == "" {
+		os.Setenv(serverEndpoint, "play.minio.io:9000")
+		os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F")
+		os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
+		os.Setenv(enableHTTPS, "1")
+	}
+}
+
+func getDataDir() (dir string) {
+	dir = os.Getenv("MINT_DATA_DIR")
+	if dir == "" {
+		dir = "/mint/data"
+	}
+	return
+}
+
+func getFilePath(filename string) (filepath string) {
+	if getDataDir() != "" {
+		filepath = getDataDir() + "/" + filename
+	}
+	return
+}
+
+type sizedReader struct {
+	io.Reader
+	size int
+}
+
+func (l *sizedReader) Size() int {
+	return l.size
+}
+
+func (l *sizedReader) Close() error {
+	return nil
+}
+
+type randomReader struct{ seed []byte }
+
+func (r *randomReader) Read(b []byte) (int, error) {
+	return copy(b, bytes.Repeat(r.seed, len(b))), nil
+}
+
+// read data from file if it exists or optionally create a buffer of particular size
+func getDataReader(fileName string, size int) io.ReadCloser {
+	if _, err := os.Stat(getFilePath(fileName)); os.IsNotExist(err) {
+		return &sizedReader{
+			Reader: io.LimitReader(&randomReader{seed: []byte("a")}, int64(size)),
+			size:   size,
+		}
+	}
+	reader, _ := os.Open(getFilePath(fileName))
+	return reader
+}
+
+// randString generates random names and prepends them with a known prefix.
+func randString(n int, src rand.Source, prefix string) string {
+	b := make([]byte, n)
+	// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
+	for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+		if remain == 0 {
+			cache, remain = src.Int63(), letterIdxMax
+		}
+		if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+			b[i] = letterBytes[idx]
+			i--
+		}
+		cache >>= letterIdxBits
+		remain--
+	}
+	return prefix + string(b[0:30-len(prefix)])
+}
+
+func isQuickMode() bool {
+	return os.Getenv("MODE") == "quick"
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketError() {
+	region := "eu-central-1"
+
+	// initialize logging params
+	startTime := time.Now()
+	function := "MakeBucket(bucketName, region)"
+	// initialize logging params
+	args := map[string]interface{}{
+		"bucketName": "",
+		"region":     region,
+	}
+
+	// skipping region functional tests for non s3 runs
+	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+		ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+		return
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket in 'eu-central-1'.
+	if err = c.MakeBucket(bucketName, region); err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket Failed", err).Fatal()
+	}
+	if err = c.MakeBucket(bucketName, region); err == nil {
+		failureLog(function, args, startTime, "", "Bucket already exists", err).Fatal()
+	}
+	// Verify valid error response from server.
+	if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+		minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+		failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
+	}
+	if err = c.RemoveBucket(bucketName); err != nil {
+		failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+	}
+
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegions() {
+	region := "eu-central-1"
+	// initialize logging params
+	startTime := time.Now()
+	function := "MakeBucket(bucketName, region)"
+	// initialize logging params
+	args := map[string]interface{}{
+		"bucketName": "",
+		"region":     region,
+	}
+
+	// skipping region functional tests for non s3 runs
+	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+		ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+		return
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket in 'eu-central-1'.
+	if err = c.MakeBucket(bucketName, region); err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	if err = c.RemoveBucket(bucketName); err != nil {
+		failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+	}
+
+	// Make a new bucket with '.' in its name, in 'us-west-2'. This
+	// request is internally staged into a path style instead of
+	// virtual host style.
+	region = "us-west-2"
+	args["region"] = region
+	if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Remove the newly created bucket.
+	if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
+		failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+	}
+
+	successLogger(function, args, startTime).Info()
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectReadAt() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutObject(bucketName, objectName, reader, objectContentType)"
+	args := map[string]interface{}{
+		"bucketName":        "",
+		"objectName":        "",
+		"objectContentType": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
+	}
+
+	// Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
+	// Use different data for each part for multipart tests to ensure part order at the end.
+	var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+	defer reader.Close()
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	// Object content type
+	objectContentType := "binary/octet-stream"
+	args["objectContentType"] = objectContentType
+
+	n, err := c.PutObject(bucketName, objectName, reader, objectContentType)
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(sixtyFiveMiB) {
+		failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Get Object failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat Object failed", err).Fatal()
+	}
+	if st.Size != int64(sixtyFiveMiB) {
+		failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
+	}
+	if st.ContentType != objectContentType {
+		failureLog(function, args, startTime, "", "Content types don't match", err).Fatal()
+	}
+	if err := r.Close(); err != nil {
+		failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+	}
+	if err := r.Close(); err == nil {
+		failureLog(function, args, startTime, "", "Object is already closed, didn't return error on Close", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	successLogger(function, args, startTime).Info()
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectWithMetadata() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutObjectWithMetadata(bucketName, objectName, reader, metadata, progress)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+		"metadata":   "",
+	}
+
+	if isQuickMode() {
+		ignoredLog(function, args, startTime, "Skipping functional tests for short runs").Info()
+		return
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
+	}
+
+	// Generate data using 2 parts
+	// Use different data in each part for multipart tests to ensure part order at the end.
+	var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+	defer reader.Close()
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	// Object custom metadata
+	customContentType := "custom/contenttype"
+
+	n, err := c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
+		"Content-Type": {customContentType},
+	}, nil)
+	args["metadata"] = map[string][]string{
+		"Content-Type": {customContentType},
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(sixtyFiveMiB) {
+		failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	if st.Size != int64(sixtyFiveMiB) {
+		failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
+	}
+	if st.ContentType != customContentType {
+		failureLog(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err).Fatal()
+	}
+	if err := r.Close(); err != nil {
+		failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+	}
+	if err := r.Close(); err == nil {
+		failureLog(function, args, startTime, "", "Object already closed, should respond with error", err).Fatal()
+	}
+
+	if err = c.RemoveObject(bucketName, objectName); err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	if err = c.RemoveBucket(bucketName); err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	successLogger(function, args, startTime).Info()
+}
+
+// Test put object with streaming signature.
+func testPutObjectStreaming() {
+	// initialize logging params
+	objectName := "test-object"
+	startTime := time.Now()
+	function := "PutObjectStreaming(bucketName, objectName, reader)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": objectName,
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+		"minio-go-test")
+	args["bucketName"] = bucketName
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Upload an object.
+	sizes := []int64{0, 64*1024 - 1, 64 * 1024}
+
+	for _, size := range sizes {
+		data := bytes.Repeat([]byte("a"), int(size))
+		n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data))
+		if err != nil {
+			failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+		}
+
+		if n != size {
+			failureLog(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err).Fatal()
+		}
+	}
+
+	// Remove the object.
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	// Remove the bucket.
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test listing partially uploaded objects.
+func testListPartiallyUploaded() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+	args := map[string]interface{}{
+		"bucketName":  "",
+		"objectName":  "",
+		"isRecursive": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Enable tracing, write to stdout.
+	// c.TraceOn(os.Stderr)
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	r := bytes.NewReader(bytes.Repeat([]byte("0"), sixtyFiveMiB*2))
+
+	reader, writer := io.Pipe()
+	go func() {
+		i := 0
+		for i < 25 {
+			_, cerr := io.CopyN(writer, r, (sixtyFiveMiB*2)/25)
+			if cerr != nil {
+				failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+			}
+			i++
+			r.Seek(0, 0)
+		}
+		writer.CloseWithError(errors.New("proactively closed to be verified later"))
+	}()
+
+	objectName := bucketName + "-resumable"
+	args["objectName"] = objectName
+
+	_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+	if err == nil {
+		failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+	}
+	if !strings.Contains(err.Error(), "proactively closed to be verified later") {
+		failureLog(function, args, startTime, "", "String not found in PutObject output", err).Fatal()
+	}
+
+	doneCh := make(chan struct{})
+	defer close(doneCh)
+	isRecursive := true
+	args["isRecursive"] = isRecursive
+
+	multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
+	for multiPartObject := range multiPartObjectCh {
+		if multiPartObject.Err != nil {
+			failureLog(function, args, startTime, "", "Multipart object error", multiPartObject.Err).Fatal()
+		}
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test get object seeker from the end, using whence set to '2'.
+func testGetObjectSeekEnd() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	buf, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+	}
+
+	pos, err := r.Seek(-100, 2)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
+	}
+	if pos != st.Size-100 {
+		failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
+	}
+	buf2 := make([]byte, 100)
+	m, err := io.ReadFull(r, buf2)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Error reading through io.ReadFull", err).Fatal()
+	}
+	if m != len(buf2) {
+		failureLog(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
+	}
+	hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
+	hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
+	if hexBuf1 != hexBuf2 {
+		failureLog(function, args, startTime, "", "Values at same index dont match", err).Fatal()
+	}
+	pos, err = r.Seek(-100, 2)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
+	}
+	if pos != st.Size-100 {
+		failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
+	}
+	if err = r.Close(); err != nil {
+		failureLog(function, args, startTime, "", "ObjectClose failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwice() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+	}
+	if err := r.Close(); err != nil {
+		failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+	}
+	if err := r.Close(); err == nil {
+		failureLog(function, args, startTime, "", "Already closed object. No error returned", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test removing multiple objects with Remove API
+func testRemoveMultipleObjects() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "RemoveObjects(bucketName, objectsCh)"
+	args := map[string]interface{}{
+		"bucketName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Enable tracing, write to stdout.
+	// c.TraceOn(os.Stderr)
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+	// Multi remove of 1100 objects
+	nrObjects := 1100
+
+	objectsCh := make(chan string)
+
+	go func() {
+		defer close(objectsCh)
+		// Upload objects and send them to objectsCh
+		for i := 0; i < nrObjects; i++ {
+			objectName := "sample" + strconv.Itoa(i) + ".txt"
+			_, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
+			if err != nil {
+				failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+				continue
+			}
+			objectsCh <- objectName
+		}
+	}()
+
+	// Call RemoveObjects API
+	errorCh := c.RemoveObjects(bucketName, objectsCh)
+
+	// Check if errorCh doesn't receive any error
+	select {
+	case r, more := <-errorCh:
+		if more {
+			failureLog(function, args, startTime, "", "Unexpected error", r.Err).Fatal()
+		}
+	}
+
+	// Clean the bucket created by the test
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests removing partially uploaded objects.
+func testRemovePartiallyUploaded() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "RemoveIncompleteUpload(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Enable tracing, write to stdout.
+	// c.TraceOn(os.Stderr)
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+	reader, writer := io.Pipe()
+	go func() {
+		i := 0
+		for i < 25 {
+			_, cerr := io.CopyN(writer, r, 128*1024)
+			if cerr != nil {
+				failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+			}
+			i++
+			r.Seek(0, 0)
+		}
+		writer.CloseWithError(errors.New("proactively closed to be verified later"))
+	}()
+
+	objectName := bucketName + "-resumable"
+	args["objectName"] = objectName
+
+	_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+	if err == nil {
+		failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+	}
+	if !strings.Contains(err.Error(), "proactively closed to be verified later") {
+		failureLog(function, args, startTime, "", "String not found", err).Fatal()
+	}
+	err = c.RemoveIncompleteUpload(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests FPutObject of a big file to trigger multipart
+func testFPutObjectMultipart() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+	args := map[string]interface{}{
+		"bucketName":        "",
+		"objectName":        "",
+		"fileName":          "",
+		"objectContentType": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+	var fileName = getFilePath("datafile-65-MB")
+	if os.Getenv("MINT_DATA_DIR") == "" {
+		// Make a temp file with minPartSize bytes of data.
+		file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+		if err != nil {
+			failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+		}
+		// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+		_, err = io.Copy(file, getDataReader("non-existent", sixtyFiveMiB))
+		if err != nil {
+			failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+		}
+		err = file.Close()
+		if err != nil {
+			failureLog(function, args, startTime, "", "File Close failed", err).Fatal()
+		}
+		fileName = file.Name()
+		args["fileName"] = fileName
+	}
+	totalSize := sixtyFiveMiB * 1
+	// Set base object name
+	objectName := bucketName + "FPutObject" + "-standard"
+	args["objectName"] = objectName
+
+	objectContentType := "testapplication/octet-stream"
+	args["objectContentType"] = objectContentType
+
+	// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+	n, err := c.FPutObject(bucketName, objectName, fileName, objectContentType)
+	if err != nil {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+	if n != int64(totalSize) {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	objInfo, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Unexpected error", err).Fatal()
+	}
+	if objInfo.Size != int64(totalSize) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err).Fatal()
+	}
+	if objInfo.ContentType != objectContentType {
+		failureLog(function, args, startTime, "", "ContentType doesn't match", err).Fatal()
+	}
+
+	// Remove all objects and bucket and temp file
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests FPutObject with null contentType (default = application/octet-stream)
+func testFPutObject() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
+	// Use different data in part for multipart tests to check parts are uploaded in correct order.
+	var fName = getFilePath("datafile-65-MB")
+	if os.Getenv("MINT_DATA_DIR") == "" {
+		// Make a temp file with minPartSize bytes of data.
+		file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+		if err != nil {
+			failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+		}
+
+		// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+		var buffer = bytes.Repeat([]byte(string('a')), sixtyFiveMiB)
+		if _, err = file.Write(buffer); err != nil {
+			failureLog(function, args, startTime, "", "File write failed", err).Fatal()
+		}
+		// Close the file pro-actively for windows.
+		err = file.Close()
+		if err != nil {
+			failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+		}
+		fName = file.Name()
+	}
+	var totalSize = sixtyFiveMiB * 1
+
+	// Set base object name
+	objectName := bucketName + "FPutObject"
+	args["objectName"] = objectName
+
+	// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+	n, err := c.FPutObject(bucketName, objectName+"-standard", fName, "application/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+	if n != int64(totalSize) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+	}
+
+	// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+	n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, "")
+	if err != nil {
+		failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+	}
+	if n != int64(totalSize) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+	}
+	srcFile, err := os.Open(fName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "File open failed", err).Fatal()
+	}
+	defer srcFile.Close()
+	// Add extension to temp file name
+	tmpFile, err := os.Create(fName + ".gtar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "File create failed", err).Fatal()
+	}
+	defer tmpFile.Close()
+	_, err = io.Copy(tmpFile, srcFile)
+	if err != nil {
+		failureLog(function, args, startTime, "", "File copy failed", err).Fatal()
+	}
+
+	// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+	n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", "")
+	if err != nil {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+	if n != int64(totalSize) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+	}
+
+	// Check headers
+	rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+	if rStandard.ContentType != "application/octet-stream" {
+		failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
+	}
+
+	rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+	if rOctet.ContentType != "application/octet-stream" {
+		failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
+	}
+
+	rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+	if rGTar.ContentType != "application/x-gtar" {
+		failureLog(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err).Fatal()
+	}
+
+	// Remove all objects and bucket and temp file
+	err = c.RemoveObject(bucketName, objectName+"-standard")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName+"-Octet")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName+"-GTar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	err = os.Remove(fName + ".gtar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctional() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	buf, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	// Save the data
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+	}
+
+	defer func() {
+		err = c.RemoveObject(bucketName, objectName)
+		if err != nil {
+			failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+		}
+		err = c.RemoveBucket(bucketName)
+		if err != nil {
+			failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+		}
+	}()
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat object failed", err).Fatal()
+	}
+
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
+	}
+
+	// This following function helps us to compare data from the reader after seek
+	// with the data from the original buffer
+	cmpData := func(r io.Reader, start, end int) {
+		if end-start == 0 {
+			return
+		}
+		buffer := bytes.NewBuffer([]byte{})
+		if _, err := io.CopyN(buffer, r, int64(thirtyThreeKiB)); err != nil {
+			if err != io.EOF {
+				failureLog(function, args, startTime, "", "CopyN failed", err).Fatal()
+			}
+		}
+		if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+			failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+		}
+	}
+
+	// Generic seek error for errors other than io.EOF
+	seekErr := errors.New("seek error")
+
+	testCases := []struct {
+		offset    int64
+		whence    int
+		pos       int64
+		err       error
+		shouldCmp bool
+		start     int
+		end       int
+	}{
+		// Start from offset 0, fetch data and compare
+		{0, 0, 0, nil, true, 0, 0},
+		// Start from offset 2048, fetch data and compare
+		{2048, 0, 2048, nil, true, 2048, thirtyThreeKiB},
+		// Start from offset larger than possible
+		{int64(thirtyThreeKiB) + 1024, 0, 0, seekErr, false, 0, 0},
+		// Move to offset 0 without comparing
+		{0, 0, 0, nil, false, 0, 0},
+		// Move one step forward and compare
+		{1, 1, 1, nil, true, 1, thirtyThreeKiB},
+		// Move larger than possible
+		{int64(thirtyThreeKiB), 1, 0, seekErr, false, 0, 0},
+		// Provide negative offset with CUR_SEEK
+		{int64(-1), 1, 0, seekErr, false, 0, 0},
+		// Test with whence SEEK_END and with positive offset
+		{1024, 2, int64(thirtyThreeKiB) - 1024, io.EOF, true, 0, 0},
+		// Test with whence SEEK_END and with negative offset
+		{-1024, 2, int64(thirtyThreeKiB) - 1024, nil, true, thirtyThreeKiB - 1024, thirtyThreeKiB},
+		// Test with whence SEEK_END and with large negative offset
+		{-int64(thirtyThreeKiB) * 2, 2, 0, seekErr, true, 0, 0},
+	}
+
+	for i, testCase := range testCases {
+		// Perform seek operation
+		n, err := r.Seek(testCase.offset, testCase.whence)
+		// We expect an error
+		if testCase.err == seekErr && err == nil {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
+		}
+		// We expect a specific error
+		if testCase.err != seekErr && testCase.err != err {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
+		}
+		// If we expect an error go to the next loop
+		if testCase.err != nil {
+			continue
+		}
+		// Check the returned seek pos
+		if n != testCase.pos {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err).Fatal()
+		}
+		// Compare only if shouldCmp is activated
+		if testCase.shouldCmp {
+			cmpData(r, testCase.start, testCase.end)
+		}
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctional() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	buf, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	// Save the data
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+	}
+
+	// read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+	offset := int64(2048)
+
+	// read directly
+	buf1 := make([]byte, 512)
+	buf2 := make([]byte, 512)
+	buf3 := make([]byte, 512)
+	buf4 := make([]byte, 512)
+
+	// Test readAt before stat is called.
+	m, err := r.ReadAt(buf1, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf1) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf1, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+	offset += 512
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
+	}
+
+	m, err = r.ReadAt(buf2, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf2) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf2, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+	offset += 512
+	m, err = r.ReadAt(buf3, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf3) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf3, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+	offset += 512
+	m, err = r.ReadAt(buf4, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf4) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf4, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+
+	buf5 := make([]byte, n)
+	// Read the whole object.
+	m, err = r.ReadAt(buf5, 0)
+	if err != nil {
+		if err != io.EOF {
+			failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+		}
+	}
+	if m != len(buf5) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf, buf5) {
+		failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
+	}
+
+	buf6 := make([]byte, n+1)
+	// Read the whole object and beyond.
+	_, err = r.ReadAt(buf6, 0)
+	if err != nil {
+		if err != io.EOF {
+			failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+		}
+	}
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test Presigned Post Policy
+func testPresignedPostPolicy() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PresignedPostPolicy(policy)"
+	args := map[string]interface{}{
+		"policy": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+	buf, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	// Save the data
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+	}
+
+	policy := minio.NewPostPolicy()
+
+	if err := policy.SetBucket(""); err == nil {
+		failureLog(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err).Fatal()
+	}
+	if err := policy.SetKey(""); err == nil {
+		failureLog(function, args, startTime, "", "SetKey did not fail for invalid conditions", err).Fatal()
+	}
+	if err := policy.SetKeyStartsWith(""); err == nil {
+		failureLog(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err).Fatal()
+	}
+	if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
+		failureLog(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err).Fatal()
+	}
+	if err := policy.SetContentType(""); err == nil {
+		failureLog(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err).Fatal()
+	}
+	if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
+		failureLog(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err).Fatal()
+	}
+
+	policy.SetBucket(bucketName)
+	policy.SetKey(objectName)
+	policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+	policy.SetContentType("image/png")
+	policy.SetContentLengthRange(1024, 1024*1024)
+	args["policy"] = policy
+
+	_, _, err = c.PresignedPostPolicy(policy)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedPostPolicy failed", err).Fatal()
+	}
+
+	policy = minio.NewPostPolicy()
+
+	// Remove all objects and buckets
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests copy object
+func testCopyObject() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "CopyObject(dst, src)"
+	args := map[string]interface{}{
+		"dst": "",
+		"src": "",
+	}
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Make a new bucket in 'us-east-1' (destination bucket).
+	err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+	}
+
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	// Check the various fields of source object against destination object.
+	objInfo, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	// Copy Source
+	src := minio.NewSourceInfo(bucketName, objectName, nil)
+
+	// Set copy conditions.
+
+	// All invalid conditions first.
+	err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
+	}
+	err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
+	}
+	err = src.SetMatchETagCond("")
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
+	}
+	err = src.SetMatchETagExceptCond("")
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
+	}
+
+	err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
+	}
+	err = src.SetMatchETagCond(objInfo.ETag)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
+	}
+	args["src"] = src
+
+	dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+	args["dst"] = dst
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+
+	// Perform the Copy
+	err = c.CopyObject(dst, src)
+	if err != nil {
+		failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+	}
+
+	// Source object
+	r, err = c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	// Destination object
+	readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	// Check the various fields of source object against destination object.
+	objInfo, err = r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	objInfoCopy, err := readerCopy.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	if objInfo.Size != objInfoCopy.Size {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err).Fatal()
+	}
+
+	// CopyObject again but with wrong conditions
+	src = minio.NewSourceInfo(bucketName, objectName, nil)
+	err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
+	}
+	err = src.SetMatchETagExceptCond(objInfo.ETag)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
+	}
+
+	// Perform the Copy which should fail
+	err = c.CopyObject(dst, src)
+	if err == nil {
+		failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
+	}
+
+	// Remove all objects and buckets
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName + "-copy")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// TestEncryptionPutGet tests client side encryption
+func testEncryptionPutGet() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
+	args := map[string]interface{}{
+		"bucketName":   "",
+		"objectName":   "",
+		"cbcMaterials": "",
+		"metadata":     "",
+	}
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate a symmetric key
+	symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+	// Generate an assymmetric key from predefine public and private certificates
+	privateKey, err := hex.DecodeString(
+		"30820277020100300d06092a864886f70d0101010500048202613082025d" +
+			"0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
+			"bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
+			"5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
+			"cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
+			"15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
+			"c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
+			"57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
+			"5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
+			"bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
+			"41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
+			"0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
+			"d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
+			"f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
+			"27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
+			"6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
+			"d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
+			"bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
+			"bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
+			"0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
+			"47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
+			"9945cb5c7d")
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
+	}
+
+	publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
+		"b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
+		"97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
+		"5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
+		"c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
+		"80a89e43f29b570203010001")
+	if err != nil {
+		failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
+	}
+
+	// Generate an asymmetric key
+	asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal()
+	}
+
+	// Object custom metadata
+	customContentType := "custom/contenttype"
+	args["metadata"] = customContentType
+
+	testCases := []struct {
+		buf    []byte
+		encKey encrypt.Key
+	}{
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
+		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+
+		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
+		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
+		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
+		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
+		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
+		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+	}
+
+	for i, testCase := range testCases {
+		// Generate a random object name
+		objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+		args["objectName"] = objectName
+
+		// Secured object
+		cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
+		args["cbcMaterials"] = cbcMaterials
+
+		if err != nil {
+			failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal()
+		}
+
+		// Put encrypted data
+		_, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil)
+		if err != nil {
+			failureLog(function, args, startTime, "", "PutEncryptedObject failed", err).Fatal()
+		}
+
+		// Read the data back
+		r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+		if err != nil {
+			failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal()
+		}
+		defer r.Close()
+
+		// Compare the sent object with the received one
+		recvBuffer := bytes.NewBuffer([]byte{})
+		if _, err = io.Copy(recvBuffer, r); err != nil {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal()
+		}
+		if recvBuffer.Len() != len(testCase.buf) {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal()
+		}
+		if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal()
+		}
+
+		// Remove test object
+		err = c.RemoveObject(bucketName, objectName)
+		if err != nil {
+			failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal()
+		}
+
+	}
+
+	// Remove test bucket
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		err = c.RemoveBucket(bucketName)
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+func testBucketNotification() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "SetBucketNotification(bucketName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+	}
+
+	if os.Getenv("NOTIFY_BUCKET") == "" ||
+		os.Getenv("NOTIFY_SERVICE") == "" ||
+		os.Getenv("NOTIFY_REGION") == "" ||
+		os.Getenv("NOTIFY_ACCOUNTID") == "" ||
+		os.Getenv("NOTIFY_RESOURCE") == "" {
+		ignoredLog(function, args, startTime, "Skipped notification test as it is not configured").Info()
+		return
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable to debug
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	bucketName := os.Getenv("NOTIFY_BUCKET")
+	args["bucketName"] = bucketName
+
+	topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
+	queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
+
+	topicConfig := minio.NewNotificationConfig(topicArn)
+
+	topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+	topicConfig.AddFilterSuffix("jpg")
+
+	queueConfig := minio.NewNotificationConfig(queueArn)
+	queueConfig.AddEvents(minio.ObjectCreatedAll)
+	queueConfig.AddFilterPrefix("photos/")
+
+	bNotification := minio.BucketNotification{}
+	bNotification.AddTopic(topicConfig)
+
+	// Add the same topicConfig again, should have no effect
+	// because it is duplicated
+	bNotification.AddTopic(topicConfig)
+	if len(bNotification.TopicConfigs) != 1 {
+		failureLog(function, args, startTime, "", "Duplicate entry added", err).Fatal()
+	}
+
+	// Add and remove a queue config
+	bNotification.AddQueue(queueConfig)
+	bNotification.RemoveQueueByArn(queueArn)
+
+	err = c.SetBucketNotification(bucketName, bNotification)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetBucketNotification failed", err).Fatal()
+	}
+
+	bNotification, err = c.GetBucketNotification(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetBucketNotification failed", err).Fatal()
+	}
+
+	if len(bNotification.TopicConfigs) != 1 {
+		failureLog(function, args, startTime, "", "Topic config is empty", err).Fatal()
+	}
+
+	if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
+		failureLog(function, args, startTime, "", "Couldn't get the suffix", err).Fatal()
+	}
+
+	err = c.RemoveAllBucketNotification(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveAllBucketNotification failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests comprehensive list of all methods.
+func testFunctional() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testFunctional()"
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	c, err := minio.New(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, nil, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable to debug
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	function = "MakeBucket(bucketName, region)"
+	args := map[string]interface{}{
+		"bucketName": bucketName,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate a random file name.
+	fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	file, err := os.Create(fileName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "File creation failed", err).Fatal()
+	}
+	for i := 0; i < 3; i++ {
+		buf := make([]byte, rand.Intn(1<<19))
+		_, err = file.Write(buf)
+		if err != nil {
+			failureLog(function, args, startTime, "", "File write failed", err).Fatal()
+		}
+	}
+	file.Close()
+
+	// Verify if bucket exits and you have access.
+	var exists bool
+	exists, err = c.BucketExists(bucketName)
+	function = "BucketExists(bucketName)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
+	}
+	if !exists {
+		failureLog(function, args, startTime, "", "Could not find the bucket", err).Fatal()
+	}
+
+	// Asserting the default bucket policy.
+	policyAccess, err := c.GetBucketPolicy(bucketName, "")
+	function = "GetBucketPolicy(bucketName, objectPrefix)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+	}
+	if policyAccess != "none" {
+		failureLog(function, args, startTime, "", "policy should be set to none", err).Fatal()
+	}
+	// Set the bucket policy to 'public readonly'.
+	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
+	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+		"bucketPolicy": policy.BucketPolicyReadOnly,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+	}
+	// should return policy `readonly`.
+	policyAccess, err = c.GetBucketPolicy(bucketName, "")
+	function = "GetBucketPolicy(bucketName, objectPrefix)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+	}
+	if policyAccess != "readonly" {
+		failureLog(function, args, startTime, "", "policy should be set to readonly", err).Fatal()
+	}
+
+	// Make the bucket 'public writeonly'.
+	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
+	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+		"bucketPolicy": policy.BucketPolicyWriteOnly,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+	}
+	// should return policy `writeonly`.
+	policyAccess, err = c.GetBucketPolicy(bucketName, "")
+	function = "GetBucketPolicy(bucketName, objectPrefix)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+	}
+	if policyAccess != "writeonly" {
+		failureLog(function, args, startTime, "", "policy should be set to writeonly", err).Fatal()
+	}
+	// Make the bucket 'public read/write'.
+	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+		"bucketPolicy": policy.BucketPolicyReadWrite,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+	}
+	// should return policy `readwrite`.
+	policyAccess, err = c.GetBucketPolicy(bucketName, "")
+	function = "GetBucketPolicy(bucketName, objectPrefix)"
+	args = map[string]interface{}{
+		"bucketName":   bucketName,
+		"objectPrefix": "",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+	}
+	if policyAccess != "readwrite" {
+		failureLog(function, args, startTime, "", "policy should be set to readwrite", err).Fatal()
+	}
+	// List all buckets.
+	buckets, err := c.ListBuckets()
+	function = "ListBuckets()"
+	args = nil
+
+	if len(buckets) == 0 {
+		failureLog(function, args, startTime, "", "Found bucket list to be empty", err).Fatal()
+	}
+	if err != nil {
+		failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
+	}
+
+	// Verify if previously created bucket is listed in list buckets.
+	bucketFound := false
+	for _, bucket := range buckets {
+		if bucket.Name == bucketName {
+			bucketFound = true
+		}
+	}
+
+	// If bucket not found error out.
+	if !bucketFound {
+		failureLog(function, args, startTime, "", "Bucket: "+bucketName+" not found", err).Fatal()
+	}
+
+	objectName := bucketName + "unique"
+
+	// Generate data
+	buf := bytes.Repeat([]byte("f"), 1<<19)
+
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+	function = "PutObject(bucketName, objectName, reader, contentType)"
+	args = map[string]interface{}{
+		"bucketName":  bucketName,
+		"objectName":  objectName,
+		"contentType": "",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(len(buf)) {
+		failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
+	}
+
+	n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+	args = map[string]interface{}{
+		"bucketName":  bucketName,
+		"objectName":  objectName + "-nolength",
+		"contentType": "binary/octet-stream",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(len(buf)) {
+		failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
+	}
+
+	// Instantiate a done channel to close all listing.
+	doneCh := make(chan struct{})
+	defer close(doneCh)
+
+	objFound := false
+	isRecursive := true // Recursive is true.
+
+	function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+	args = map[string]interface{}{
+		"bucketName":  bucketName,
+		"objectName":  objectName,
+		"isRecursive": isRecursive,
+	}
+
+	for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+		if obj.Key == objectName {
+			objFound = true
+			break
+		}
+	}
+	if !objFound {
+		failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
+	}
+
+	objFound = false
+	isRecursive = true // Recursive is true.
+	function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
+	args = map[string]interface{}{
+		"bucketName":  bucketName,
+		"objectName":  objectName,
+		"isRecursive": isRecursive,
+	}
+
+	for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
+		if obj.Key == objectName {
+			objFound = true
+			break
+		}
+	}
+	if !objFound {
+		failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
+	}
+
+	incompObjNotFound := true
+
+	function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+	args = map[string]interface{}{
+		"bucketName":  bucketName,
+		"objectName":  objectName,
+		"isRecursive": isRecursive,
+	}
+
+	for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+		if objIncompl.Key != "" {
+			incompObjNotFound = false
+			break
+		}
+	}
+	if !incompObjNotFound {
+		failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
+	}
+
+	newReader, err := c.GetObject(bucketName, objectName)
+	function = "GetObject(bucketName, objectName)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	newReadBytes, err := ioutil.ReadAll(newReader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	if !bytes.Equal(newReadBytes, buf) {
+		failureLog(function, args, startTime, "", "GetObject bytes mismatch", err).Fatal()
+	}
+
+	err = c.FGetObject(bucketName, objectName, fileName+"-f")
+	function = "FGetObject(bucketName, objectName, fileName)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+		"fileName":   fileName + "-f",
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "FGetObject failed", err).Fatal()
+	}
+
+	// Generate presigned HEAD object url.
+	presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+	function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+		"expires":    3600 * time.Second,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+	}
+	// Verify if presigned url works.
+	resp, err := http.Head(presignedHeadURL.String())
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+	}
+	if resp.StatusCode != http.StatusOK {
+		failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+	}
+	if resp.Header.Get("ETag") == "" {
+		failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+	}
+	resp.Body.Close()
+
+	// Generate presigned GET object url.
+	presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+	function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+		"expires":    3600 * time.Second,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+	}
+
+	// Verify if presigned url works.
+	resp, err = http.Get(presignedGetURL.String())
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+	}
+	if resp.StatusCode != http.StatusOK {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+	}
+	newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+	}
+	resp.Body.Close()
+	if !bytes.Equal(newPresignedBytes, buf) {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+	}
+
+	// Set request parameters.
+	reqParams := make(url.Values)
+	reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+	presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+		"expires":    3600 * time.Second,
+		"reqParams":  reqParams,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+	}
+	// Verify if presigned url works.
+	resp, err = http.Get(presignedGetURL.String())
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+	}
+	if resp.StatusCode != http.StatusOK {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+	}
+	newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+	}
+	if !bytes.Equal(newPresignedBytes, buf) {
+		failureLog(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err).Fatal()
+	}
+	if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+		failureLog(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err).Fatal()
+	}
+
+	presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+
+	function = "PresignedPutObject(bucketName, objectName, expires)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+		"expires":    3600 * time.Second,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+	}
+
+	buf = bytes.Repeat([]byte("g"), 1<<19)
+
+	req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+	if err != nil {
+		failureLog(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err).Fatal()
+	}
+	httpClient := &http.Client{
+		// Setting a sensible time out of 30secs to wait for response
+		// headers. Request is pro-actively cancelled after 30secs
+		// with no response.
+		Timeout:   30 * time.Second,
+		Transport: http.DefaultTransport,
+	}
+	resp, err = httpClient.Do(req)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+	}
+
+	newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject after PresignedPutObject failed", err).Fatal()
+	}
+
+	newReadBytes, err = ioutil.ReadAll(newReader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll after GetObject failed", err).Fatal()
+	}
+
+	if !bytes.Equal(newReadBytes, buf) {
+		failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName)
+	function = "RemoveObject(bucketName, objectName)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+		"objectName": objectName,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveObject(bucketName, objectName+"-f")
+	args["objectName"] = objectName + "-f"
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName+"-nolength")
+	args["objectName"] = objectName + "-nolength"
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName+"-presigned")
+	args["objectName"] = objectName + "-presigned"
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	function = "RemoveBucket(bucketName)"
+	args = map[string]interface{}{
+		"bucketName": bucketName,
+	}
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err == nil {
+		failureLog(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err).Fatal()
+	}
+	if err.Error() != "The specified bucket does not exist" {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	if err = os.Remove(fileName); err != nil {
+		failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
+	}
+	if err = os.Remove(fileName + "-f"); err != nil {
+		failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
+	}
+	function = "testFunctional()"
+	successLogger(function, args, startTime).Info()
+}
+
+// Test for validating GetObject Reader* methods functioning when the
+// object is modified in the object store.
+func testGetObjectObjectModified() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Make a new bucket.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+	defer c.RemoveBucket(bucketName)
+
+	// Upload an object.
+	objectName := "myobject"
+	content := "helloworld"
+	_, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text")
+	if err != nil {
+		failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
+	}
+
+	defer c.RemoveObject(bucketName, objectName)
+
+	reader, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err).Fatal()
+	}
+	defer reader.Close()
+
+	// Read a few bytes of the object.
+	b := make([]byte, 5)
+	n, err := reader.ReadAt(b, 0)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err).Fatal()
+	}
+
+	// Upload different contents to the same object while object is being read.
+	newContent := "goodbyeworld"
+	_, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text")
+	if err != nil {
+		failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
+	}
+
+	// Confirm that a Stat() call in between doesn't change the Object's cached etag.
+	_, err = reader.Stat()
+	expectedError := "At least one of the pre-conditions you specified did not hold"
+	if err.Error() != expectedError {
+		failureLog(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
+	}
+
+	// Read again only to find object contents have been modified since last read.
+	_, err = reader.ReadAt(b, int64(n))
+	if err.Error() != expectedError {
+		failureLog(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test validates putObject to upload a file seeked at a given offset.
+func testPutObjectUploadSeekedObject() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
+	args := map[string]interface{}{
+		"bucketName":   "",
+		"objectName":   "",
+		"fileToUpload": "",
+		"contentType":  "binary/octet-stream",
+	}
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Make a new bucket.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+	defer c.RemoveBucket(bucketName)
+
+	tempfile, err := ioutil.TempFile("", "minio-go-upload-test-")
+	args["fileToUpload"] = tempfile
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "TempFile create failed", err).Fatal()
+	}
+
+	var data []byte
+	if fileName := getFilePath("datafile-100-kB"); fileName != "" {
+		data, _ = ioutil.ReadFile(fileName)
+	} else {
+		// Generate data more than 32K
+		data = bytes.Repeat([]byte("1"), 120000)
+	}
+	var length = len(data)
+	if _, err = tempfile.Write(data); err != nil {
+		failureLog(function, args, startTime, "", "TempFile write failed", err).Fatal()
+	}
+
+	objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+	args["objectName"] = objectName
+
+	offset := length / 2
+	if _, err := tempfile.Seek(int64(offset), 0); err != nil {
+		failureLog(function, args, startTime, "", "TempFile seek failed", err).Fatal()
+	}
+
+	n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+	if n != int64(length-offset) {
+		failureLog(function, args, startTime, "", "Invalid length returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
+	}
+	tempfile.Close()
+	if err = os.Remove(tempfile.Name()); err != nil {
+		failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+	}
+
+	length = int(n)
+
+	obj, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	n, err = obj.Seek(int64(offset), 0)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+	}
+	if n != int64(offset) {
+		failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(offset))+" got "+string(n), err).Fatal()
+	}
+
+	n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	if n != int64(length-offset) {
+		failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
+	}
+
+	if err = c.RemoveObject(bucketName, objectName); err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	if err = c.RemoveBucket(bucketName); err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketErrorV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "MakeBucket(bucketName, region)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"region":     "eu-west-1",
+	}
+
+	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+		ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+		return
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket in 'eu-west-1'.
+	if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+	if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
+		failureLog(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err).Fatal()
+	}
+	// Verify valid error response from server.
+	if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+		minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+		failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
+	}
+	if err = c.RemoveBucket(bucketName); err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwiceV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "MakeBucket(bucketName, region)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"region":     "eu-west-1",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
+	}
+	if err := r.Close(); err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	if err := r.Close(); err == nil {
+		failureLog(function, args, startTime, "", "Object is already closed, should return error", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests removing partially uploaded objects.
+func testRemovePartiallyUploadedV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "RemoveIncompleteUpload(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Enable tracing, write to stdout.
+	// c.TraceOn(os.Stderr)
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+	reader, writer := io.Pipe()
+	go func() {
+		i := 0
+		for i < 25 {
+			_, cerr := io.CopyN(writer, r, 128*1024)
+			if cerr != nil {
+				failureLog(function, args, startTime, "", "Copy failed", cerr).Fatal()
+			}
+			i++
+			r.Seek(0, 0)
+		}
+		writer.CloseWithError(errors.New("proactively closed to be verified later"))
+	}()
+
+	objectName := bucketName + "-resumable"
+	args["objectName"] = objectName
+
+	_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+	if err == nil {
+		failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+	}
+	if err.Error() != "proactively closed to be verified later" {
+		failureLog(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err).Fatal()
+	}
+	err = c.RemoveIncompleteUpload(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests FPutObject hidden contentType setting
+func testFPutObjectV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "FPutObject(bucketName, objectName, fileName, contentType)"
+	args := map[string]interface{}{
+		"bucketName":  "",
+		"objectName":  "",
+		"fileName":    "",
+		"contentType": "application/octet-stream",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Make a temp file with 11*1024*1024 bytes of data.
+	file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+	if err != nil {
+		failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+	}
+
+	r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+	n, err := io.CopyN(file, r, 11*1024*1024)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+	}
+	if n != int64(11*1024*1024) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+	}
+
+	// Close the file pro-actively for windows.
+	err = file.Close()
+	if err != nil {
+		failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+	}
+
+	// Set base object name
+	objectName := bucketName + "FPutObject"
+	args["objectName"] = objectName
+	args["fileName"] = file.Name()
+
+	// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+	n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+	if n != int64(11*1024*1024) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+	}
+
+	// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+	n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+	args["objectName"] = objectName + "-Octet"
+	args["contentType"] = ""
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+	if n != int64(11*1024*1024) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+	}
+
+	// Add extension to temp file name
+	fileName := file.Name()
+	err = os.Rename(file.Name(), fileName+".gtar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "Rename failed", err).Fatal()
+	}
+
+	// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+	n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+	args["objectName"] = objectName + "-Octet"
+	args["contentType"] = ""
+	args["fileName"] = fileName + ".gtar"
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+	}
+	if n != int64(11*1024*1024) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+	}
+
+	// Check headers
+	rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+	if rStandard.ContentType != "application/octet-stream" {
+		failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err).Fatal()
+	}
+
+	rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+	if rOctet.ContentType != "application/octet-stream" {
+		failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err).Fatal()
+	}
+
+	rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+	if rGTar.ContentType != "application/x-gtar" {
+		failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err).Fatal()
+	}
+
+	// Remove all objects and bucket and temp file
+	err = c.RemoveObject(bucketName, objectName+"-standard")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName+"-Octet")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName+"-GTar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	err = os.Remove(fileName + ".gtar")
+	if err != nil {
+		failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegionsV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "MakeBucket(bucketName, region)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"region":     "eu-west-1",
+	}
+
+	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+		ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+		return
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket in 'eu-central-1'.
+	if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	if err = c.RemoveBucket(bucketName); err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	// Make a new bucket with '.' in its name, in 'us-west-2'. This
+	// request is internally staged into a path style instead of
+	// virtual host style.
+	if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
+		args["bucketName"] = bucketName + ".withperiod"
+		args["region"] = "us-west-2"
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Remove the newly created bucket.
+	if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctionalV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	buf, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	// Save the data.
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+	}
+
+	offset := int64(2048)
+	n, err = r.Seek(offset, 0)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+	}
+	if n != offset {
+		failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
+	}
+	n, err = r.Seek(0, 1)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+	}
+	if n != offset {
+		failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
+	}
+	_, err = r.Seek(offset, 2)
+	if err == nil {
+		failureLog(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err).Fatal()
+	}
+	n, err = r.Seek(-offset, 2)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+	}
+	if n != st.Size-offset {
+		failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err).Fatal()
+	}
+
+	var buffer1 bytes.Buffer
+	if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+		if err != io.EOF {
+			failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+		}
+	}
+	if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
+		failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+	}
+
+	// Seek again and read again.
+	n, err = r.Seek(offset-1, 0)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+	}
+	if n != (offset - 1) {
+		failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err).Fatal()
+	}
+
+	var buffer2 bytes.Buffer
+	if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+		if err != io.EOF {
+			failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+		}
+	}
+	// Verify now lesser bytes.
+	if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+		failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctionalV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "GetObject(bucketName, objectName)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	args["objectName"] = objectName
+
+	buf, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	// Save the data
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
+	}
+
+	// Read the data back
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	st, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	if st.Size != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
+	}
+
+	offset := int64(2048)
+
+	// Read directly
+	buf2 := make([]byte, 512)
+	buf3 := make([]byte, 512)
+	buf4 := make([]byte, 512)
+
+	m, err := r.ReadAt(buf2, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf2) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf2, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+	offset += 512
+	m, err = r.ReadAt(buf3, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf3) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf3, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+	offset += 512
+	m, err = r.ReadAt(buf4, offset)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+	}
+	if m != len(buf4) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf4, buf[offset:offset+512]) {
+		failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+	}
+
+	buf5 := make([]byte, n)
+	// Read the whole object.
+	m, err = r.ReadAt(buf5, 0)
+	if err != nil {
+		if err != io.EOF {
+			failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+		}
+	}
+	if m != len(buf5) {
+		failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err).Fatal()
+	}
+	if !bytes.Equal(buf, buf5) {
+		failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
+	}
+
+	buf6 := make([]byte, n+1)
+	// Read the whole object and beyond.
+	_, err = r.ReadAt(buf6, 0)
+	if err != nil {
+		if err != io.EOF {
+			failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+		}
+	}
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Tests copy object
+func testCopyObjectV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "CopyObject(destination, source)"
+	args := map[string]interface{}{
+		"destination": "",
+		"source":      "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Make a new bucket in 'us-east-1' (destination bucket).
+	err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate 33K of data.
+	var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+	defer reader.Close()
+
+	// Save the data
+	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(thirtyThreeKiB) {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+	}
+
+	r, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	// Check the various fields of source object against destination object.
+	objInfo, err := r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+
+	// Copy Source
+	src := minio.NewSourceInfo(bucketName, objectName, nil)
+
+	// Set copy conditions.
+
+	// All invalid conditions first.
+	err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
+	}
+	err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
+	}
+	err = src.SetMatchETagCond("")
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
+	}
+	err = src.SetMatchETagExceptCond("")
+	if err == nil {
+		failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
+	}
+
+	err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
+	}
+	err = src.SetMatchETagCond(objInfo.ETag)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
+	}
+	args["source"] = src
+
+	dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+	args["destination"] = dst
+
+	// Perform the Copy
+	err = c.CopyObject(dst, src)
+	if err != nil {
+		failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+	}
+
+	// Source object
+	r, err = c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	// Destination object
+	readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	// Check the various fields of source object against destination object.
+	objInfo, err = r.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	objInfoCopy, err := readerCopy.Stat()
+	if err != nil {
+		failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+	}
+	if objInfo.Size != objInfoCopy.Size {
+		failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err).Fatal()
+	}
+
+	// CopyObject again but with wrong conditions
+	src = minio.NewSourceInfo(bucketName, objectName, nil)
+	err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
+	}
+	err = src.SetMatchETagExceptCond(objInfo.ETag)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
+	}
+
+	// Perform the Copy which should fail
+	err = c.CopyObject(dst, src)
+	if err == nil {
+		failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
+	}
+
+	// Remove all objects and buckets
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+
+	err = c.RemoveBucket(bucketName + "-copy")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+func testComposeObjectErrorCasesWrapper(c *minio.Client) {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testComposeObjectErrorCasesWrapper(minioClient)"
+	args := map[string]interface{}{}
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err := c.MakeBucket(bucketName, "us-east-1")
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Test that more than 10K source objects cannot be
+	// concatenated.
+	srcArr := [10001]minio.SourceInfo{}
+	srcSlice := srcArr[:]
+	dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+
+	if err := c.ComposeObject(dst, srcSlice); err == nil {
+		failureLog(function, args, startTime, "", "Expected error in ComposeObject", err).Fatal()
+	} else if err.Error() != "There must be as least one and up to 10000 source objects." {
+		failureLog(function, args, startTime, "", "Got unexpected error", err).Fatal()
+	}
+
+	// Create a source with invalid offset spec and check that
+	// error is returned:
+	// 1. Create the source object.
+	const badSrcSize = 5 * 1024 * 1024
+	buf := bytes.Repeat([]byte("1"), badSrcSize)
+	_, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+	// 2. Set invalid range spec on the object (going beyond
+	// object size)
+	badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
+	err = badSrc.SetRange(1, badSrcSize)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Setting NewSourceInfo failed", err).Fatal()
+	}
+	// 3. ComposeObject call should fail.
+	if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
+		failureLog(function, args, startTime, "", "ComposeObject expected to fail", err).Fatal()
+	} else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
+		failureLog(function, args, startTime, "", "Got invalid error", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test expected error cases
+func testComposeObjectErrorCasesV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testComposeObjectErrorCasesV2()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	testComposeObjectErrorCasesWrapper(c)
+}
+
+func testComposeMultipleSources(c *minio.Client) {
+	// initialize logging params
+	startTime := time.Now()
+	function := "ComposeObject(destination, sources)"
+	args := map[string]interface{}{
+		"destination": "",
+		"sources":     "",
+	}
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err := c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Upload a small source object
+	const srcSize = 1024 * 1024 * 5
+	buf := bytes.Repeat([]byte("1"), srcSize)
+	_, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	// We will append 10 copies of the object.
+	srcs := []minio.SourceInfo{}
+	for i := 0; i < 10; i++ {
+		srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil))
+	}
+	// make the last part very small
+	err = srcs[9].SetRange(0, 0)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetRange failed", err).Fatal()
+	}
+	args["sources"] = srcs
+
+	dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
+	args["destination"] = dst
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+	err = c.ComposeObject(dst, srcs)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+	}
+
+	objProps, err := c.StatObject(bucketName, "dstObject")
+	if err != nil {
+		failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+	}
+
+	if objProps.Size != 9*srcSize+1 {
+		failureLog(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test concatenating multiple objects objects
+func testCompose10KSourcesV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testCompose10KSourcesV2(minioClient)"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	testComposeMultipleSources(c)
+}
+
+func testEncryptedCopyObjectWrapper(c *minio.Client) {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testEncryptedCopyObjectWrapper(minioClient)"
+	args := map[string]interface{}{}
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err := c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
+	key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
+
+	// 1. create an sse-c encrypted object to copy by uploading
+	const srcSize = 1024 * 1024
+	buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+	metadata := make(map[string][]string)
+	for k, v := range key1.GetSSEHeaders() {
+		metadata[k] = append(metadata[k], v)
+	}
+	_, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+	}
+
+	// 2. copy object and change encryption key
+	src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
+	dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+
+	err = c.CopyObject(dst, src)
+	if err != nil {
+		failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+	}
+
+	// 3. get copied object and check if content is equal
+	reqH := minio.NewGetReqHeaders()
+	for k, v := range key2.GetSSEHeaders() {
+		reqH.Set(k, v)
+	}
+	coreClient := minio.Core{c}
+	reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+	defer reader.Close()
+
+	decBytes, err := ioutil.ReadAll(reader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+	if !bytes.Equal(decBytes, buf) {
+		failureLog(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObject() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testEncryptedCopyObject()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	// c.TraceOn(os.Stderr)
+	testEncryptedCopyObjectWrapper(c)
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObjectV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testEncryptedCopyObjectV2()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+	}
+
+	testEncryptedCopyObjectWrapper(c)
+}
+
+func testUserMetadataCopying() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testUserMetadataCopying()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	// c.TraceOn(os.Stderr)
+	testUserMetadataCopyingWrapper(c)
+}
+
+func testUserMetadataCopyingWrapper(c *minio.Client) {
+	// initialize logging params
+	startTime := time.Now()
+	function := "CopyObject(destination, source)"
+	args := map[string]interface{}{
+		"destination": "",
+		"source":      "",
+	}
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+	// Make a new bucket in 'us-east-1' (source bucket).
+	err := c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	fetchMeta := func(object string) (h http.Header) {
+		objInfo, err := c.StatObject(bucketName, object)
+		if err != nil {
+			failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+		}
+		h = make(http.Header)
+		for k, vs := range objInfo.Metadata {
+			if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+				for _, v := range vs {
+					h.Add(k, v)
+				}
+			}
+		}
+		return h
+	}
+
+	// 1. create a client encrypted object to copy by uploading
+	const srcSize = 1024 * 1024
+	buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+	metadata := make(http.Header)
+	metadata.Set("x-amz-meta-myheader", "myvalue")
+	_, err = c.PutObjectWithMetadata(bucketName, "srcObject",
+		bytes.NewReader(buf), metadata, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObjectWithMetadata failed", err).Fatal()
+	}
+	if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
+		failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+	}
+
+	// 2. create source
+	src := minio.NewSourceInfo(bucketName, "srcObject", nil)
+	// 2.1 create destination with metadata set
+	dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+
+	// 3. Check that copying to an object with metadata set resets
+	// the headers on the copy.
+	err = c.CopyObject(dst1, src)
+	args["destination"] = dst1
+	args["source"] = src
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+	}
+
+	expectedHeaders := make(http.Header)
+	expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+	if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
+		failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+	}
+
+	// 4. create destination with no metadata set and same source
+	dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+
+	}
+	src = minio.NewSourceInfo(bucketName, "srcObject", nil)
+
+	// 5. Check that copying to an object with no metadata set,
+	// copies metadata.
+	err = c.CopyObject(dst2, src)
+	args["destination"] = dst2
+	args["source"] = src
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+	}
+
+	expectedHeaders = metadata
+	if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
+		failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+	}
+
+	// 6. Compose a pair of sources.
+	srcs := []minio.SourceInfo{
+		minio.NewSourceInfo(bucketName, "srcObject", nil),
+		minio.NewSourceInfo(bucketName, "srcObject", nil),
+	}
+	dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+
+	err = c.ComposeObject(dst3, srcs)
+	function = "ComposeObject(destination, sources)"
+	args["destination"] = dst3
+	args["source"] = srcs
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+	}
+
+	// Check that no headers are copied in this case
+	if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
+		failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+	}
+
+	// 7. Compose a pair of sources with dest user metadata set.
+	srcs = []minio.SourceInfo{
+		minio.NewSourceInfo(bucketName, "srcObject", nil),
+		minio.NewSourceInfo(bucketName, "srcObject", nil),
+	}
+	dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
+	if err != nil {
+		failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+	}
+
+	err = c.ComposeObject(dst4, srcs)
+	function = "ComposeObject(destination, sources)"
+	args["destination"] = dst4
+	args["source"] = srcs
+
+	if err != nil {
+		failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+	}
+
+	// Check that no headers are copied in this case
+	expectedHeaders = make(http.Header)
+	expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+	if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
+		failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+func testUserMetadataCopyingV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testUserMetadataCopyingV2()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+	}
+
+	// c.TraceOn(os.Stderr)
+	testUserMetadataCopyingWrapper(c)
+}
+
+// Test put object with size -1 byte object.
+func testPutObjectNoLengthV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+		"size":       -1,
+		"metadata":   nil,
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+		"minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	objectName := bucketName + "unique"
+	args["objectName"] = objectName
+
+	// Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
+	// Use different data for each part for multipart tests to ensure part order at the end.
+	var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+	defer reader.Close()
+
+	// Upload an object.
+	n, err := c.PutObjectWithSize(bucketName, objectName, reader, -1, nil, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+	}
+	if n != int64(sixtyFiveMiB) {
+		failureLog(function, args, startTime, "", "Expected upload object size "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+	}
+
+	// Remove the object.
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	// Remove the bucket.
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test put objects of unknown size.
+func testPutObjectsUnknownV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutObjectStreaming(bucketName, objectName, reader)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+		"minio-go-test")
+	args["bucketName"] = bucketName
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Issues are revealed by trying to upload multiple files of unknown size
+	// sequentially (on 4GB machines)
+	for i := 1; i <= 4; i++ {
+		// Simulate that we could be receiving byte slices of data that we want
+		// to upload as a file
+		rpipe, wpipe := io.Pipe()
+		defer rpipe.Close()
+		go func() {
+			b := []byte("test")
+			wpipe.Write(b)
+			wpipe.Close()
+		}()
+
+		// Upload the object.
+		objectName := fmt.Sprintf("%sunique%d", bucketName, i)
+		args["objectName"] = objectName
+
+		n, err := c.PutObjectStreaming(bucketName, objectName, rpipe)
+		if err != nil {
+			failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+		}
+		if n != int64(4) {
+			failureLog(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err).Fatal()
+		}
+
+		// Remove the object.
+		err = c.RemoveObject(bucketName, objectName)
+		if err != nil {
+			failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+		}
+	}
+
+	// Remove the bucket.
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test put object with 0 byte object.
+func testPutObject0ByteV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+		"size":       0,
+		"metadata":   nil,
+	}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	// Instantiate new minio client object.
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+	}
+
+	// Enable tracing, write to stderr.
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+		"minio-go-test")
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	objectName := bucketName + "unique"
+
+	// Upload an object.
+	n, err := c.PutObjectWithSize(bucketName, objectName, bytes.NewReader([]byte("")), 0, nil, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+	}
+	if n != 0 {
+		failureLog(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err).Fatal()
+	}
+
+	// Remove the object.
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+
+	// Remove the bucket.
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Test expected error cases
+func testComposeObjectErrorCases() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testComposeObjectErrorCases()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	testComposeObjectErrorCasesWrapper(c)
+}
+
+// Test concatenating 10K objects
+func testCompose10KSources() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testCompose10KSources()"
+	args := map[string]interface{}{}
+
+	// Instantiate new minio client object
+	c, err := minio.NewV4(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+	}
+
+	testComposeMultipleSources(c)
+}
+
+// Tests comprehensive list of all methods.
+func testFunctionalV2() {
+	// initialize logging params
+	startTime := time.Now()
+	function := "testFunctionalV2()"
+	args := map[string]interface{}{}
+
+	// Seed random based on current time.
+	rand.Seed(time.Now().Unix())
+
+	c, err := minio.NewV2(
+		os.Getenv(serverEndpoint),
+		os.Getenv(accessKey),
+		os.Getenv(secretKey),
+		mustParseBool(os.Getenv(enableHTTPS)),
+	)
+	if err != nil {
+		failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+	}
+
+	// Enable to debug
+	// c.TraceOn(os.Stderr)
+
+	// Set user agent.
+	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+	// Generate a new random bucket name.
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+	// Make a new bucket.
+	err = c.MakeBucket(bucketName, "us-east-1")
+	if err != nil {
+		failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+	}
+
+	// Generate a random file name.
+	fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+	file, err := os.Create(fileName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "file create failed", err).Fatal()
+	}
+	for i := 0; i < 3; i++ {
+		buf := make([]byte, rand.Intn(1<<19))
+		_, err = file.Write(buf)
+		if err != nil {
+			failureLog(function, args, startTime, "", "file write failed", err).Fatal()
+		}
+	}
+	file.Close()
+
+	// Verify if bucket exits and you have access.
+	var exists bool
+	exists, err = c.BucketExists(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
+	}
+	if !exists {
+		failureLog(function, args, startTime, "", "Could not find existing bucket "+bucketName, err).Fatal()
+	}
+
+	// Make the bucket 'public read/write'.
+	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+	if err != nil {
+		failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+	}
+
+	// List all buckets.
+	buckets, err := c.ListBuckets()
+	if len(buckets) == 0 {
+		failureLog(function, args, startTime, "", "List buckets cannot be empty", err).Fatal()
+	}
+	if err != nil {
+		failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
+	}
+
+	// Verify if previously created bucket is listed in list buckets.
+	bucketFound := false
+	for _, bucket := range buckets {
+		if bucket.Name == bucketName {
+			bucketFound = true
+		}
+	}
+
+	// If bucket not found error out.
+	if !bucketFound {
+		failureLog(function, args, startTime, "", "Bucket "+bucketName+"not found", err).Fatal()
+	}
+
+	objectName := bucketName + "unique"
+
+	// Generate data
+	buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
+
+	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+	if n != int64(len(buf)) {
+		failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
+	}
+
+	n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+	if err != nil {
+		failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+	}
+
+	if n != int64(len(buf)) {
+		failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
+	}
+
+	// Instantiate a done channel to close all listing.
+	doneCh := make(chan struct{})
+	defer close(doneCh)
+
+	objFound := false
+	isRecursive := true // Recursive is true.
+	for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+		if obj.Key == objectName {
+			objFound = true
+			break
+		}
+	}
+	if !objFound {
+		failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
+	}
+
+	objFound = false
+	isRecursive = true // Recursive is true.
+	for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+		if obj.Key == objectName {
+			objFound = true
+			break
+		}
+	}
+	if !objFound {
+		failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
+	}
+
+	incompObjNotFound := true
+	for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+		if objIncompl.Key != "" {
+			incompObjNotFound = false
+			break
+		}
+	}
+	if !incompObjNotFound {
+		failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
+	}
+
+	newReader, err := c.GetObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	newReadBytes, err := ioutil.ReadAll(newReader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	if !bytes.Equal(newReadBytes, buf) {
+		failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+	}
+
+	err = c.FGetObject(bucketName, objectName, fileName+"-f")
+	if err != nil {
+		failureLog(function, args, startTime, "", "FgetObject failed", err).Fatal()
+	}
+
+	// Generate presigned HEAD object url.
+	presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+	}
+	// Verify if presigned url works.
+	resp, err := http.Head(presignedHeadURL.String())
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedHeadObject URL head request failed", err).Fatal()
+	}
+	if resp.StatusCode != http.StatusOK {
+		failureLog(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err).Fatal()
+	}
+	if resp.Header.Get("ETag") == "" {
+		failureLog(function, args, startTime, "", "Got empty ETag", err).Fatal()
+	}
+	resp.Body.Close()
+
+	// Generate presigned GET object url.
+	presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+	}
+	// Verify if presigned url works.
+	resp, err = http.Get(presignedGetURL.String())
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
+	}
+	if resp.StatusCode != http.StatusOK {
+		failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
+	}
+	newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+	resp.Body.Close()
+	if !bytes.Equal(newPresignedBytes, buf) {
+		failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+	}
+
+	// Set request parameters.
+	reqParams := make(url.Values)
+	reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+	// Generate presigned GET object url.
+	presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+	}
+	// Verify if presigned url works.
+	resp, err = http.Get(presignedGetURL.String())
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
+	}
+	if resp.StatusCode != http.StatusOK {
+		failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
+	}
+	newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+	if !bytes.Equal(newPresignedBytes, buf) {
+		failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+	}
+	// Verify content disposition.
+	if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+		failureLog(function, args, startTime, "", "wrong Content-Disposition received ", err).Fatal()
+	}
+
+	presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+	if err != nil {
+		failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+	}
+	// Generate data more than 32K
+	buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
+
+	req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+	if err != nil {
+		failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
+	}
+	httpClient := &http.Client{
+		// Setting a sensible time out of 30secs to wait for response
+		// headers. Request is pro-actively cancelled after 30secs
+		// with no response.
+		Timeout:   30 * time.Second,
+		Transport: http.DefaultTransport,
+	}
+	resp, err = httpClient.Do(req)
+	if err != nil {
+		failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
+	}
+
+	newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+	if err != nil {
+		failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+	}
+
+	newReadBytes, err = ioutil.ReadAll(newReader)
+	if err != nil {
+		failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+	}
+
+	if !bytes.Equal(newReadBytes, buf) {
+		failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+	}
+
+	err = c.RemoveObject(bucketName, objectName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveObject(bucketName, objectName+"-f")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveObject(bucketName, objectName+"-nolength")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveObject(bucketName, objectName+"-presigned")
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err != nil {
+		failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+	}
+	err = c.RemoveBucket(bucketName)
+	if err == nil {
+		failureLog(function, args, startTime, "", "RemoveBucket should fail as bucket does not exist", err).Fatal()
+	}
+	if err.Error() != "The specified bucket does not exist" {
+		failureLog(function, args, startTime, "", "RemoveBucket failed with wrong error message", err).Fatal()
+	}
+	if err = os.Remove(fileName); err != nil {
+		failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+	}
+	if err = os.Remove(fileName + "-f"); err != nil {
+		failureLog(function, args, startTime, "", "File removes failed", err).Fatal()
+	}
+	successLogger(function, args, startTime).Info()
+}
+
+// Convert string to bool and always return false if any error
+func mustParseBool(str string) bool {
+	b, err := strconv.ParseBool(str)
+	if err != nil {
+		return false
+	}
+	return b
+}
+
+func main() {
+	// Output to stdout instead of the default stderr
+	log.SetOutput(os.Stdout)
+	// create custom formatter
+	mintFormatter := mintJSONFormatter{}
+	// set custom formatter
+	log.SetFormatter(&mintFormatter)
+	// log Info or above -- success cases are Info level, failures are Fatal level
+	log.SetLevel(log.InfoLevel)
+	// execute tests
+	if !isQuickMode() {
+		testMakeBucketErrorV2()
+		testGetObjectClosedTwiceV2()
+		testRemovePartiallyUploadedV2()
+		testFPutObjectV2()
+		testMakeBucketRegionsV2()
+		testGetObjectReadSeekFunctionalV2()
+		testGetObjectReadAtFunctionalV2()
+		testCopyObjectV2()
+		testFunctionalV2()
+		testComposeObjectErrorCasesV2()
+		testCompose10KSourcesV2()
+		testEncryptedCopyObjectV2()
+		testUserMetadataCopyingV2()
+		testPutObject0ByteV2()
+		testPutObjectNoLengthV2()
+		testPutObjectsUnknownV2()
+		testMakeBucketError()
+		testMakeBucketRegions()
+		testPutObjectWithMetadata()
+		testPutObjectReadAt()
+		testPutObjectStreaming()
+		testListPartiallyUploaded()
+		testGetObjectSeekEnd()
+		testGetObjectClosedTwice()
+		testRemoveMultipleObjects()
+		testRemovePartiallyUploaded()
+		testFPutObjectMultipart()
+		testFPutObject()
+		testGetObjectReadSeekFunctional()
+		testGetObjectReadAtFunctional()
+		testPresignedPostPolicy()
+		testCopyObject()
+		testEncryptionPutGet()
+		testComposeObjectErrorCases()
+		testCompose10KSources()
+		testUserMetadataCopying()
+		testEncryptedCopyObject()
+		testBucketNotification()
+		testFunctional()
+		testGetObjectObjectModified()
+		testPutObjectUploadSeekedObject()
+	} else {
+		testFunctional()
+		testFunctionalV2()
+	}
+}
diff --git a/vendor/github.com/prometheus/common/AUTHORS.md b/vendor/github.com/prometheus/common/AUTHORS.md
deleted file mode 100644
index c63f4d39511614f0b3401f3077681032f6d70a23..0000000000000000000000000000000000000000
--- a/vendor/github.com/prometheus/common/AUTHORS.md
+++ /dev/null
@@ -1,11 +0,0 @@
-Maintainers of this repository:
-
-* Fabian Reinartz <fabian@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Björn Rabenstein <beorn@soundcloud.com>
-* Fabian Reinartz <fabian@soundcloud.com>
-* Julius Volz <julius.volz@gmail.com>
-* Miguel Molina <hi@mvader.me>
diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go
index 33eb922ce82b41d460d7f8e382265df2a21b6b85..9195c34bfd591e486bcb34ce4adc83abc0073093 100644
--- a/vendor/github.com/prometheus/common/config/config.go
+++ b/vendor/github.com/prometheus/common/config/config.go
@@ -28,3 +28,20 @@ func checkOverflow(m map[string]interface{}, ctx string) error {
 	}
 	return nil
 }
+
+// Secret special type for storing secrets.
+type Secret string
+
+// MarshalYAML implements the yaml.Marshaler interface for Secrets.
+func (s Secret) MarshalYAML() (interface{}, error) {
+	if s != "" {
+		return "<secret>", nil
+	}
+	return nil, nil
+}
+
+//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets.
+func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	type plain Secret
+	return unmarshal((*plain)(s))
+}
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea231bf8d2c7e9e6a06191a3203162f865c66709
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -0,0 +1,281 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"gopkg.in/yaml.v2"
+)
+
+// BasicAuth contains basic HTTP authentication credentials.
+type BasicAuth struct {
+	Username string `yaml:"username"`
+	Password Secret `yaml:"password"`
+
+	// Catches all undefined fields and must be empty after parsing.
+	XXX map[string]interface{} `yaml:",inline"`
+}
+
+// URL is a custom URL type that allows validation at configuration load time.
+type URL struct {
+	*url.URL
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface for URLs.
+func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+
+	urlp, err := url.Parse(s)
+	if err != nil {
+		return err
+	}
+	u.URL = urlp
+	return nil
+}
+
+// MarshalYAML implements the yaml.Marshaler interface for URLs.
+func (u URL) MarshalYAML() (interface{}, error) {
+	if u.URL != nil {
+		return u.String(), nil
+	}
+	return nil, nil
+}
+
+// HTTPClientConfig configures an HTTP client.
+type HTTPClientConfig struct {
+	// The HTTP basic authentication credentials for the targets.
+	BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
+	// The bearer token for the targets.
+	BearerToken Secret `yaml:"bearer_token,omitempty"`
+	// The bearer token file for the targets.
+	BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
+	// HTTP proxy server to use to connect to the targets.
+	ProxyURL URL `yaml:"proxy_url,omitempty"`
+	// TLSConfig to use to connect to the targets.
+	TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
+
+	// Catches all undefined fields and must be empty after parsing.
+	XXX map[string]interface{} `yaml:",inline"`
+}
+
+// Validate validates the HTTPClientConfig to check only one of BearerToken,
+// BasicAuth and BearerTokenFile is configured.
+func (c *HTTPClientConfig) Validate() error {
+	if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 {
+		return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured")
+	}
+	if c.BasicAuth != nil && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) {
+		return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured")
+	}
+	return nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface
+func (c *HTTPClientConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	type plain HTTPClientConfig
+	err := unmarshal((*plain)(c))
+	if err != nil {
+		return err
+	}
+	err = c.Validate()
+	if err != nil {
+		return c.Validate()
+	}
+	return checkOverflow(c.XXX, "http_client_config")
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (a *BasicAuth) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	type plain BasicAuth
+	err := unmarshal((*plain)(a))
+	if err != nil {
+		return err
+	}
+	return checkOverflow(a.XXX, "basic_auth")
+}
+
+// NewHTTPClientFromConfig returns a new HTTP client configured for the
+// given config.HTTPClientConfig.
+func NewHTTPClientFromConfig(cfg *HTTPClientConfig) (*http.Client, error) {
+	tlsConfig, err := NewTLSConfig(&cfg.TLSConfig)
+	if err != nil {
+		return nil, err
+	}
+
+	// It's the caller's job to handle timeouts
+	var rt http.RoundTripper = &http.Transport{
+		Proxy:             http.ProxyURL(cfg.ProxyURL.URL),
+		DisableKeepAlives: true,
+		TLSClientConfig:   tlsConfig,
+	}
+
+	// If a bearer token is provided, create a round tripper that will set the
+	// Authorization header correctly on each request.
+	bearerToken := cfg.BearerToken
+	if len(bearerToken) == 0 && len(cfg.BearerTokenFile) > 0 {
+		b, err := ioutil.ReadFile(cfg.BearerTokenFile)
+		if err != nil {
+			return nil, fmt.Errorf("unable to read bearer token file %s: %s", cfg.BearerTokenFile, err)
+		}
+		bearerToken = Secret(strings.TrimSpace(string(b)))
+	}
+
+	if len(bearerToken) > 0 {
+		rt = NewBearerAuthRoundTripper(bearerToken, rt)
+	}
+
+	if cfg.BasicAuth != nil {
+		rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, Secret(cfg.BasicAuth.Password), rt)
+	}
+
+	// Return a new client with the configured round tripper.
+	return &http.Client{Transport: rt}, nil
+}
+
+type bearerAuthRoundTripper struct {
+	bearerToken Secret
+	rt          http.RoundTripper
+}
+
+type basicAuthRoundTripper struct {
+	username string
+	password Secret
+	rt       http.RoundTripper
+}
+
+// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has
+// already been set.
+func NewBasicAuthRoundTripper(username string, password Secret, rt http.RoundTripper) http.RoundTripper {
+	return &basicAuthRoundTripper{username, password, rt}
+}
+
+func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) == 0 {
+		req = cloneRequest(req)
+		req.Header.Set("Authorization", "Bearer "+string(rt.bearerToken))
+	}
+
+	return rt.rt.RoundTrip(req)
+}
+
+// NewBearerAuthRoundTripper adds the provided bearer token to a request unless the authorization
+// header has already been set.
+func NewBearerAuthRoundTripper(bearer Secret, rt http.RoundTripper) http.RoundTripper {
+	return &bearerAuthRoundTripper{bearer, rt}
+}
+
+func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) != 0 {
+		return rt.RoundTrip(req)
+	}
+	req = cloneRequest(req)
+	req.SetBasicAuth(rt.username, string(rt.password))
+	return rt.rt.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// Shallow copy of the struct.
+	r2 := new(http.Request)
+	*r2 = *r
+	// Deep copy of the Header.
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
+
+// NewTLSConfig creates a new tls.Config from the given config.TLSConfig.
+func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) {
+	tlsConfig := &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify}
+
+	// If a CA cert is provided then let's read it in so we can validate the
+	// scrape target's certificate properly.
+	if len(cfg.CAFile) > 0 {
+		caCertPool := x509.NewCertPool()
+		// Load CA cert.
+		caCert, err := ioutil.ReadFile(cfg.CAFile)
+		if err != nil {
+			return nil, fmt.Errorf("unable to use specified CA cert %s: %s", cfg.CAFile, err)
+		}
+		caCertPool.AppendCertsFromPEM(caCert)
+		tlsConfig.RootCAs = caCertPool
+	}
+
+	if len(cfg.ServerName) > 0 {
+		tlsConfig.ServerName = cfg.ServerName
+	}
+
+	// If a client cert & key is provided then configure TLS config accordingly.
+	if len(cfg.CertFile) > 0 && len(cfg.KeyFile) == 0 {
+		return nil, fmt.Errorf("client cert file %q specified without client key file", cfg.CertFile)
+	} else if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 {
+		return nil, fmt.Errorf("client key file %q specified without client cert file", cfg.KeyFile)
+	} else if len(cfg.CertFile) > 0 && len(cfg.KeyFile) > 0 {
+		cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
+		if err != nil {
+			return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", cfg.CertFile, cfg.KeyFile, err)
+		}
+		tlsConfig.Certificates = []tls.Certificate{cert}
+	}
+	tlsConfig.BuildNameToCertificate()
+
+	return tlsConfig, nil
+}
+
+// TLSConfig configures the options for TLS connections.
+type TLSConfig struct {
+	// The CA cert to use for the targets.
+	CAFile string `yaml:"ca_file,omitempty"`
+	// The client cert file for the targets.
+	CertFile string `yaml:"cert_file,omitempty"`
+	// The client key file for the targets.
+	KeyFile string `yaml:"key_file,omitempty"`
+	// Used to verify the hostname for the targets.
+	ServerName string `yaml:"server_name,omitempty"`
+	// Disable target certificate validation.
+	InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
+
+	// Catches all undefined fields and must be empty after parsing.
+	XXX map[string]interface{} `yaml:",inline"`
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	type plain TLSConfig
+	if err := unmarshal((*plain)(c)); err != nil {
+		return err
+	}
+	return checkOverflow(c.XXX, "TLS config")
+}
+
+func (c HTTPClientConfig) String() string {
+	b, err := yaml.Marshal(c)
+	if err != nil {
+		return fmt.Sprintf("<error creating http client config string: %s>", err)
+	}
+	return string(b)
+}
diff --git a/vendor/github.com/prometheus/common/config/http_config_test.go b/vendor/github.com/prometheus/common/config/http_config_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b13e101b99834f7ab7258c48bff04b79dbf3e42
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/http_config_test.go
@@ -0,0 +1,157 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+	"testing"
+
+	yaml "gopkg.in/yaml.v2"
+)
+
+var invalidHTTPClientConfigs = []struct {
+	httpClientConfigFile string
+	errMsg               string
+}{
+	{
+		httpClientConfigFile: "testdata/http.conf.bearer-token-and-file-set.bad.yml",
+		errMsg:               "at most one of bearer_token & bearer_token_file must be configured",
+	},
+	{
+		httpClientConfigFile: "testdata/http.conf.empty.bad.yml",
+		errMsg:               "at most one of basic_auth, bearer_token & bearer_token_file must be configured",
+	},
+}
+
+func TestAuthRoundTrippers(t *testing.T) {
+
+	cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.good.yml")
+	if err != nil {
+		t.Errorf("Error loading HTTP client config: %v", err)
+	}
+
+	tlsConfig, err := NewTLSConfig(&cfg.TLSConfig)
+	if err != nil {
+		t.Errorf("Error creating new TLS config: %v", err)
+	}
+
+	rt := &http.Transport{
+		Proxy:             http.ProxyURL(cfg.ProxyURL.URL),
+		DisableKeepAlives: true,
+		TLSClientConfig:   tlsConfig,
+	}
+	req := new(http.Request)
+
+	bearerAuthRoundTripper := NewBearerAuthRoundTripper("mysecret", rt)
+	bearerAuthRoundTripper.RoundTrip(req)
+
+	basicAuthRoundTripper := NewBasicAuthRoundTripper("username", "password", rt)
+	basicAuthRoundTripper.RoundTrip(req)
+}
+
+func TestHideHTTPClientConfigSecrets(t *testing.T) {
+	c, _, err := LoadHTTPConfigFile("testdata/http.conf.good.yml")
+	if err != nil {
+		t.Errorf("Error parsing %s: %s", "testdata/http.conf.good.yml", err)
+	}
+
+	// String method must not reveal authentication credentials.
+	s := c.String()
+	if strings.Contains(s, "mysecret") {
+		t.Fatal("http client config's String method reveals authentication credentials.")
+	}
+}
+
+func mustParseURL(u string) *URL {
+	parsed, err := url.Parse(u)
+	if err != nil {
+		panic(err)
+	}
+	return &URL{URL: parsed}
+}
+
+func TestNewClientFromConfig(t *testing.T) {
+	cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.good.yml")
+	if err != nil {
+		t.Errorf("Error loading HTTP client config: %v", err)
+	}
+	_, err = NewHTTPClientFromConfig(cfg)
+	if err != nil {
+		t.Errorf("Error creating new client from config: %v", err)
+	}
+}
+
+func TestNewClientFromInvalidConfig(t *testing.T) {
+	cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.invalid-bearer-token-file.bad.yml")
+	if err != nil {
+		t.Errorf("Error loading HTTP client config: %v", err)
+	}
+	_, err = NewHTTPClientFromConfig(cfg)
+	if err == nil {
+		t.Error("Expected error creating new client from invalid config but got none")
+	}
+	if !strings.Contains(err.Error(), "unable to read bearer token file file: open file: no such file or directory") {
+		t.Errorf("Expected error with config but got: %s", err.Error())
+	}
+}
+
+func TestValidateHTTPConfig(t *testing.T) {
+	cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.good.yml")
+	if err != nil {
+		t.Errorf("Error loading HTTP client config: %v", err)
+	}
+	err = cfg.Validate()
+	if err != nil {
+		t.Fatalf("Error validating %s: %s", "testdata/http.conf.good.yml", err)
+	}
+}
+
+func TestInvalidHTTPConfigs(t *testing.T) {
+	for _, ee := range invalidHTTPClientConfigs {
+		_, _, err := LoadHTTPConfigFile(ee.httpClientConfigFile)
+		if err == nil {
+			t.Error("Expected error with config but got none")
+			continue
+		}
+		if !strings.Contains(err.Error(), ee.errMsg) {
+			t.Errorf("Expected error for invalid HTTP client configuration to contain %q but got: %s", ee.errMsg, err)
+		}
+	}
+}
+
+// LoadHTTPConfig parses the YAML input s into a HTTPClientConfig.
+func LoadHTTPConfig(s string) (*HTTPClientConfig, error) {
+	cfg := &HTTPClientConfig{}
+	err := yaml.Unmarshal([]byte(s), cfg)
+	if err != nil {
+		return nil, err
+	}
+	return cfg, nil
+}
+
+// LoadHTTPConfigFile parses the given YAML file into a HTTPClientConfig.
+func LoadHTTPConfigFile(filename string) (*HTTPClientConfig, []byte, error) {
+	content, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, nil, err
+	}
+	cfg, err := LoadHTTPConfig(string(content))
+	if err != nil {
+		return nil, nil, err
+	}
+	return cfg, content, nil
+}
diff --git a/vendor/github.com/prometheus/common/config/tls_config.go b/vendor/github.com/prometheus/common/config/tls_config.go
deleted file mode 100644
index 7c7e7cb02ace1ad7f58c1bb738295d68d6b0fc54..0000000000000000000000000000000000000000
--- a/vendor/github.com/prometheus/common/config/tls_config.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"fmt"
-	"io/ioutil"
-)
-
-// TLSConfig configures the options for TLS connections.
-type TLSConfig struct {
-	// The CA cert to use for the targets.
-	CAFile string `yaml:"ca_file,omitempty"`
-	// The client cert file for the targets.
-	CertFile string `yaml:"cert_file,omitempty"`
-	// The client key file for the targets.
-	KeyFile string `yaml:"key_file,omitempty"`
-	// Disable target certificate validation.
-	InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
-
-	// Catches all undefined fields and must be empty after parsing.
-	XXX map[string]interface{} `yaml:",inline"`
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	type plain TLSConfig
-	if err := unmarshal((*plain)(c)); err != nil {
-		return err
-	}
-	return checkOverflow(c.XXX, "TLS config")
-}
-
-// GenerateConfig produces a tls.Config based on TLS connection options.
-// It loads certificate files from disk if they are defined.
-func (c *TLSConfig) GenerateConfig() (*tls.Config, error) {
-	tlsConfig := &tls.Config{InsecureSkipVerify: c.InsecureSkipVerify}
-
-	// If a CA cert is provided then let's read it in so we can validate the
-	// scrape target's certificate properly.
-	if len(c.CAFile) > 0 {
-		caCertPool := x509.NewCertPool()
-		// Load CA cert.
-		caCert, err := ioutil.ReadFile(c.CAFile)
-		if err != nil {
-			return nil, fmt.Errorf("unable to use specified CA cert %s: %s", c.CAFile, err)
-		}
-		caCertPool.AppendCertsFromPEM(caCert)
-		tlsConfig.RootCAs = caCertPool
-	}
-
-	if len(c.CertFile) > 0 && len(c.KeyFile) == 0 {
-		return nil, fmt.Errorf("client cert file %q specified without client key file", c.CertFile)
-	} else if len(c.KeyFile) > 0 && len(c.CertFile) == 0 {
-		return nil, fmt.Errorf("client key file %q specified without client cert file", c.KeyFile)
-	} else if len(c.CertFile) > 0 && len(c.KeyFile) > 0 {
-		cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
-		if err != nil {
-			return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
-		}
-		tlsConfig.Certificates = []tls.Certificate{cert}
-	}
-	tlsConfig.BuildNameToCertificate()
-
-	return tlsConfig, nil
-}
diff --git a/vendor/github.com/prometheus/common/config/tls_config_test.go b/vendor/github.com/prometheus/common/config/tls_config_test.go
index 444303532e3bd057a9c481d1c300ebea51091851..e2bd68edbc630719fa27efee9a7c04b47ad662f8 100644
--- a/vendor/github.com/prometheus/common/config/tls_config_test.go
+++ b/vendor/github.com/prometheus/common/config/tls_config_test.go
@@ -33,7 +33,7 @@ func LoadTLSConfig(filename string) (*tls.Config, error) {
 	if err = yaml.Unmarshal(content, cfg); err != nil {
 		return nil, err
 	}
-	return cfg.GenerateConfig()
+	return NewTLSConfig(cfg)
 }
 
 var expectedTLSConfigs = []struct {
@@ -57,7 +57,7 @@ func TestValidTLSConfig(t *testing.T) {
 			t.Errorf("Error parsing %s: %s", cfg.filename, err)
 		}
 		if !reflect.DeepEqual(*got, *cfg.config) {
-			t.Fatalf("%s: unexpected config result: \n\n%s\n expected\n\n%s", cfg.filename, got, cfg.config)
+			t.Fatalf("%v: unexpected config result: \n\n%v\n expected\n\n%v", cfg.filename, got, cfg.config)
 		}
 	}
 }
diff --git a/vendor/github.com/prometheus/common/expfmt/bench_test.go b/vendor/github.com/prometheus/common/expfmt/bench_test.go
index 92b16a028aad9d47918209a0099ce37ef937b49f..e539bfc13a343023e46da0e1251ecfbcb095644f 100644
--- a/vendor/github.com/prometheus/common/expfmt/bench_test.go
+++ b/vendor/github.com/prometheus/common/expfmt/bench_test.go
@@ -42,10 +42,6 @@ var parser TextParser
 // the difference becomes less relevant, only ~4x.
 //
 // The test data contains 248 samples.
-//
-// BenchmarkProcessor002ParseOnly in the extraction package is not quite
-// comparable to the benchmarks here, but it gives an idea: JSON parsing is even
-// slower than text parsing and needs a comparable amount of allocs.
 
 // BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
 // family DTOs.
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 487fdc6cca9e6c8cac075d060e0e9bbfa4d4498a..a7a42d5ef4130ae516dadea69e633942b148a3e6 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -31,6 +31,7 @@ type Decoder interface {
 	Decode(*dto.MetricFamily) error
 }
 
+// DecodeOptions contains options used by the Decoder and in sample extraction.
 type DecodeOptions struct {
 	// Timestamp is added to each value from the stream that has no explicit timestamp set.
 	Timestamp model.Time
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
 	return nil
 }
 
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
 type SampleDecoder struct {
 	Dec  Decoder
 	Opts *DecodeOptions
@@ -149,37 +152,51 @@ type SampleDecoder struct {
 	f dto.MetricFamily
 }
 
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
 func (sd *SampleDecoder) Decode(s *model.Vector) error {
-	if err := sd.Dec.Decode(&sd.f); err != nil {
+	err := sd.Dec.Decode(&sd.f)
+	if err != nil {
 		return err
 	}
-	*s = extractSamples(&sd.f, sd.Opts)
-	return nil
+	*s, err = extractSamples(&sd.f, sd.Opts)
+	return err
 }
 
-// Extract samples builds a slice of samples from the provided metric families.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
-	var all model.Vector
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occured.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+	var (
+		all     model.Vector
+		lastErr error
+	)
 	for _, f := range fams {
-		all = append(all, extractSamples(f, o)...)
+		some, err := extractSamples(f, o)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		all = append(all, some...)
 	}
-	return all
+	return all, lastErr
 }
 
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
 	switch f.GetType() {
 	case dto.MetricType_COUNTER:
-		return extractCounter(o, f)
+		return extractCounter(o, f), nil
 	case dto.MetricType_GAUGE:
-		return extractGauge(o, f)
+		return extractGauge(o, f), nil
 	case dto.MetricType_SUMMARY:
-		return extractSummary(o, f)
+		return extractSummary(o, f), nil
 	case dto.MetricType_UNTYPED:
-		return extractUntyped(o, f)
+		return extractUntyped(o, f), nil
 	case dto.MetricType_HISTOGRAM:
-		return extractHistogram(o, f)
+		return extractHistogram(o, f), nil
 	}
-	panic("expfmt.extractSamples: unknown metric family type")
+	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
 }
 
 func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go
index c27325a9d448b5210aa044adfa401a1c370daecd..82c1130c9dd63152a8f64eecc2e709945dec57f5 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode_test.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go
@@ -21,6 +21,9 @@ import (
 	"strings"
 	"testing"
 
+	"github.com/golang/protobuf/proto"
+	dto "github.com/prometheus/client_model/go"
+
 	"github.com/prometheus/common/model"
 )
 
@@ -365,3 +368,68 @@ func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
 		testDiscriminatorHTTPHeader(b)
 	}
 }
+
+func TestExtractSamples(t *testing.T) {
+	var (
+		goodMetricFamily1 = &dto.MetricFamily{
+			Name: proto.String("foo"),
+			Help: proto.String("Help for foo."),
+			Type: dto.MetricType_COUNTER.Enum(),
+			Metric: []*dto.Metric{
+				&dto.Metric{
+					Counter: &dto.Counter{
+						Value: proto.Float64(4711),
+					},
+				},
+			},
+		}
+		goodMetricFamily2 = &dto.MetricFamily{
+			Name: proto.String("bar"),
+			Help: proto.String("Help for bar."),
+			Type: dto.MetricType_GAUGE.Enum(),
+			Metric: []*dto.Metric{
+				&dto.Metric{
+					Gauge: &dto.Gauge{
+						Value: proto.Float64(3.14),
+					},
+				},
+			},
+		}
+		badMetricFamily = &dto.MetricFamily{
+			Name: proto.String("bad"),
+			Help: proto.String("Help for bad."),
+			Type: dto.MetricType(42).Enum(),
+			Metric: []*dto.Metric{
+				&dto.Metric{
+					Gauge: &dto.Gauge{
+						Value: proto.Float64(2.7),
+					},
+				},
+			},
+		}
+
+		opts = &DecodeOptions{
+			Timestamp: 42,
+		}
+	)
+
+	got, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2)
+	if err != nil {
+		t.Error("Unexpected error from ExtractSamples:", err)
+	}
+	want := model.Vector{
+		&model.Sample{Metric: model.Metric{model.MetricNameLabel: "foo"}, Value: 4711, Timestamp: 42},
+		&model.Sample{Metric: model.Metric{model.MetricNameLabel: "bar"}, Value: 3.14, Timestamp: 42},
+	}
+	if !reflect.DeepEqual(got, want) {
+		t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
+	}
+
+	got, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2)
+	if err == nil {
+		t.Error("Expected error from ExtractSamples")
+	}
+	if !reflect.DeepEqual(got, want) {
+		t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
+	}
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 366fbde98a154461cdf958950e35153c3cf6d27d..371ac75037bccbe64b25a88ae4eedc9c78a08bc8 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -11,14 +11,15 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// A package for reading and writing Prometheus metrics.
+// Package expfmt contains tools for reading and writing Prometheus metrics.
 package expfmt
 
+// Format specifies the HTTP content type of the different wire protocols.
 type Format string
 
+// Constants to assemble the Content-Type values for the different wire protocols.
 const (
-	TextVersion = "0.0.4"
-
+	TextVersion   = "0.0.4"
 	ProtoType     = `application/vnd.google.protobuf`
 	ProtoProtocol = `io.prometheus.client.MetricFamily`
 	ProtoFmt      = ProtoType + "; proto=" + ProtoProtocol + ";"
@@ -29,9 +30,6 @@ const (
 	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
 	FmtProtoText    Format = ProtoFmt + ` encoding=text`
 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
-
-	// fmtJSON2 is hidden as it is deprecated.
-	fmtJSON2 Format = `application/json; version=0.0.2`
 )
 
 const (
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 1e060879e1d3008fbd84fab5450f186cd96b0c30..f11321cd0c726939ac1297d445bec4535572c0b8 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -25,9 +25,12 @@ import (
 
 // MetricFamilyToText converts a MetricFamily proto message into text format and
 // writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered.  This function does not perform checks on the
-// content of the metric and label names, i.e. invalid metric or label names
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
 // will result in invalid text format output.
+//
 // This method fulfills the type 'prometheus.encoder'.
 func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
 	var written int
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index bd170b1675f897770af7aa622df29f94f90c8634..54bcfde2946a26accff2252a39300c136db685af 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -47,7 +47,7 @@ func (e ParseError) Error() string {
 }
 
 // TextParser is used to parse the simple and flat text-based exchange format. Its
-// nil value is ready to use.
+// zero value is ready to use.
 type TextParser struct {
 	metricFamiliesByName map[string]*dto.MetricFamily
 	buf                  *bufio.Reader // Where the parsed input is read through.
@@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
 	if p.readTokenAsLabelValue(); p.err != nil {
 		return nil
 	}
+	if !model.LabelValue(p.currentToken.String()).IsValid() {
+		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+		return nil
+	}
 	p.currentLabelPair.Value = proto.String(p.currentToken.String())
 	// Special treatment of summaries:
 	// - Quantile labels are special, will result in dto.Quantile later.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
index 7e7388ce964245272daabe2ecd122d0cde0b1044..76c9511853a7b01fe6646697ad310cecf32c553a 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
@@ -559,6 +559,11 @@ metric_bucket{le="bla"} 3.14
 `,
 			err: "text format parsing error in line 3: expected float as value for 'le' label",
 		},
+		// 19: Invalid UTF-8 in label value.
+		{
+			in:  "metric{l=\"\xbd\"} 3.14\n",
+			err: "text format parsing error in line 1: invalid label value \"\\xbd\"",
+		},
 	}
 
 	for i, scenario := range scenarios {
diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
new file mode 100644
index 0000000000000000000000000000000000000000..bcf68e6f24e6ab296ad97516703fff027fb0b836
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package log
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/windows/svc/eventlog"
+
+	"github.com/sirupsen/logrus"
+)
+
+func init() {
+	setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error {
+		if name == "" {
+			return fmt.Errorf("missing name parameter")
+		}
+
+		fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
+			l.Errorf("can't connect logger to eventlog: %v", err)
+			return err
+		}
+		l.entry.Logger.Formatter = fmter
+		return nil
+	}
+}
+
+type eventlogger struct {
+	log         *eventlog.Log
+	debugAsInfo bool
+	wrap        logrus.Formatter
+}
+
+func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
+	logHandle, err := eventlog.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
+}
+
+func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
+	data, err := s.wrap.Format(e)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
+		return data, err
+	}
+
+	switch e.Level {
+	case logrus.PanicLevel:
+		fallthrough
+	case logrus.FatalLevel:
+		fallthrough
+	case logrus.ErrorLevel:
+		err = s.log.Error(102, e.Message)
+	case logrus.WarnLevel:
+		err = s.log.Warning(101, e.Message)
+	case logrus.InfoLevel:
+		err = s.log.Info(100, e.Message)
+	case logrus.DebugLevel:
+		if s.debugAsInfo {
+			err = s.log.Info(100, e.Message)
+		}
+	default:
+		err = s.log.Info(100, e.Message)
+	}
+
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
+	}
+
+	return data, err
+}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
index 1477cd891b8d559f4ae060b3a6aeac0f3ab2b14e..108830255db07ee792613fded638c1f3a90cdcc7 100644
--- a/vendor/github.com/prometheus/common/log/log.go
+++ b/vendor/github.com/prometheus/common/log/log.go
@@ -14,95 +14,59 @@
 package log
 
 import (
-	"flag"
 	"fmt"
+	"io"
+	"io/ioutil"
 	"log"
 	"net/url"
 	"os"
 	"runtime"
+	"strconv"
 	"strings"
 
 	"github.com/sirupsen/logrus"
+	"gopkg.in/alecthomas/kingpin.v2"
 )
 
-type levelFlag struct{}
-
-// String implements flag.Value.
-func (f levelFlag) String() string {
-	return origLogger.Level.String()
-}
-
-// Set implements flag.Value.
-func (f levelFlag) Set(level string) error {
-	l, err := logrus.ParseLevel(level)
-	if err != nil {
-		return err
-	}
-	origLogger.Level = l
-	return nil
-}
-
 // setSyslogFormatter is nil if the target architecture does not support syslog.
-var setSyslogFormatter func(string, string) error
+var setSyslogFormatter func(logger, string, string) error
+
+// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
+var setEventlogFormatter func(logger, string, bool) error
 
 func setJSONFormatter() {
 	origLogger.Formatter = &logrus.JSONFormatter{}
 }
 
-type logFormatFlag struct{ uri string }
-
-// String implements flag.Value.
-func (f logFormatFlag) String() string {
-	return f.uri
+type loggerSettings struct {
+	level  string
+	format string
 }
 
-// Set implements flag.Value.
-func (f logFormatFlag) Set(format string) error {
-	f.uri = format
-	u, err := url.Parse(format)
+func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {
+	err := baseLogger.SetLevel(s.level)
 	if err != nil {
 		return err
 	}
-	if u.Scheme != "logger" {
-		return fmt.Errorf("invalid scheme %s", u.Scheme)
-	}
-	jsonq := u.Query().Get("json")
-	if jsonq == "true" {
-		setJSONFormatter()
-	}
-
-	switch u.Opaque {
-	case "syslog":
-		if setSyslogFormatter == nil {
-			return fmt.Errorf("system does not support syslog")
-		}
-		appname := u.Query().Get("appname")
-		facility := u.Query().Get("local")
-		return setSyslogFormatter(appname, facility)
-	case "stdout":
-		origLogger.Out = os.Stdout
-	case "stderr":
-		origLogger.Out = os.Stderr
-
-	default:
-		return fmt.Errorf("unsupported logger %s", u.Opaque)
-	}
-	return nil
+	err = baseLogger.SetFormat(s.format)
+	return err
 }
 
-func init() {
-	AddFlags(flag.CommandLine)
-}
-
-// AddFlags adds the flags used by this package to the given FlagSet. That's
-// useful if working with a custom FlagSet. The init function of this package
-// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
-// flag.Parse() to make the logging flags take effect.
-func AddFlags(fs *flag.FlagSet) {
-	fs.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].")
-	fs.Var(logFormatFlag{}, "log.format", "If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr.")
+// AddFlags adds the flags used by this package to the Kingpin application.
+// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
+func AddFlags(a *kingpin.Application) {
+	s := loggerSettings{}
+	a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
+		Default(origLogger.Level.String()).
+		StringVar(&s.level)
+	defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
+	a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
+		Default(defaultFormat.String()).
+		StringVar(&s.format)
+	a.Action(s.apply)
 }
 
+// Logger is the interface for loggers used in the Prometheus components.
 type Logger interface {
 	Debug(...interface{})
 	Debugln(...interface{})
@@ -125,6 +89,9 @@ type Logger interface {
 	Fatalf(string, ...interface{})
 
 	With(key string, value interface{}) Logger
+
+	SetFormat(string) error
+	SetLevel(string) error
 }
 
 type logger struct {
@@ -210,6 +177,58 @@ func (l logger) Fatalf(format string, args ...interface{}) {
 	l.sourced().Fatalf(format, args...)
 }
 
+func (l logger) SetLevel(level string) error {
+	lvl, err := logrus.ParseLevel(level)
+	if err != nil {
+		return err
+	}
+
+	l.entry.Logger.Level = lvl
+	return nil
+}
+
+func (l logger) SetFormat(format string) error {
+	u, err := url.Parse(format)
+	if err != nil {
+		return err
+	}
+	if u.Scheme != "logger" {
+		return fmt.Errorf("invalid scheme %s", u.Scheme)
+	}
+	jsonq := u.Query().Get("json")
+	if jsonq == "true" {
+		setJSONFormatter()
+	}
+
+	switch u.Opaque {
+	case "syslog":
+		if setSyslogFormatter == nil {
+			return fmt.Errorf("system does not support syslog")
+		}
+		appname := u.Query().Get("appname")
+		facility := u.Query().Get("local")
+		return setSyslogFormatter(l, appname, facility)
+	case "eventlog":
+		if setEventlogFormatter == nil {
+			return fmt.Errorf("system does not support eventlog")
+		}
+		name := u.Query().Get("name")
+		debugAsInfo := false
+		debugAsInfoRaw := u.Query().Get("debugAsInfo")
+		if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
+			debugAsInfo = parsedDebugAsInfo
+		}
+		return setEventlogFormatter(l, name, debugAsInfo)
+	case "stdout":
+		l.entry.Logger.Out = os.Stdout
+	case "stderr":
+		l.entry.Logger.Out = os.Stderr
+	default:
+		return fmt.Errorf("unsupported logger %q", u.Opaque)
+	}
+	return nil
+}
+
 // sourced adds a source field to the logger that contains
 // the file name and line where the logging happened.
 func (l logger) sourced() *logrus.Entry {
@@ -227,10 +246,26 @@ func (l logger) sourced() *logrus.Entry {
 var origLogger = logrus.New()
 var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
 
+// Base returns the default Logger logging to
 func Base() Logger {
 	return baseLogger
 }
 
+// NewLogger returns a new Logger logging to out.
+func NewLogger(w io.Writer) Logger {
+	l := logrus.New()
+	l.Out = w
+	return logger{entry: logrus.NewEntry(l)}
+}
+
+// NewNopLogger returns a logger that discards all log messages.
+func NewNopLogger() Logger {
+	l := logrus.New()
+	l.Out = ioutil.Discard
+	return logger{entry: logrus.NewEntry(l)}
+}
+
+// With adds a field to the logger.
 func With(key string, value interface{}) Logger {
 	return baseLogger.With(key, value)
 }
@@ -240,7 +275,7 @@ func Debug(args ...interface{}) {
 	baseLogger.sourced().Debug(args...)
 }
 
-// Debug logs a message at level Debug on the standard logger.
+// Debugln logs a message at level Debug on the standard logger.
 func Debugln(args ...interface{}) {
 	baseLogger.sourced().Debugln(args...)
 }
@@ -255,7 +290,7 @@ func Info(args ...interface{}) {
 	baseLogger.sourced().Info(args...)
 }
 
-// Info logs a message at level Info on the standard logger.
+// Infoln logs a message at level Info on the standard logger.
 func Infoln(args ...interface{}) {
 	baseLogger.sourced().Infoln(args...)
 }
@@ -270,7 +305,7 @@ func Warn(args ...interface{}) {
 	baseLogger.sourced().Warn(args...)
 }
 
-// Warn logs a message at level Warn on the standard logger.
+// Warnln logs a message at level Warn on the standard logger.
 func Warnln(args ...interface{}) {
 	baseLogger.sourced().Warnln(args...)
 }
@@ -285,7 +320,7 @@ func Error(args ...interface{}) {
 	baseLogger.sourced().Error(args...)
 }
 
-// Error logs a message at level Error on the standard logger.
+// Errorln logs a message at level Error on the standard logger.
 func Errorln(args ...interface{}) {
 	baseLogger.sourced().Errorln(args...)
 }
@@ -300,7 +335,7 @@ func Fatal(args ...interface{}) {
 	baseLogger.sourced().Fatal(args...)
 }
 
-// Fatal logs a message at level Fatal on the standard logger.
+// Fatalln logs a message at level Fatal on the standard logger.
 func Fatalln(args ...interface{}) {
 	baseLogger.sourced().Fatalln(args...)
 }
@@ -310,6 +345,11 @@ func Fatalf(format string, args ...interface{}) {
 	baseLogger.sourced().Fatalf(format, args...)
 }
 
+// AddHook adds hook to Prometheus' original logger.
+func AddHook(hook logrus.Hook) {
+	origLogger.Hooks.Add(hook)
+}
+
 type errorLogWriter struct{}
 
 func (errorLogWriter) Write(b []byte) (int, error) {
diff --git a/vendor/github.com/prometheus/common/log/log_test.go b/vendor/github.com/prometheus/common/log/log_test.go
index 2cd2b18e21371c97e9ae597e354ebe4b6c792333..f63b4417f2976daadebb838b0be33e97d647f4d0 100644
--- a/vendor/github.com/prometheus/common/log/log_test.go
+++ b/vendor/github.com/prometheus/common/log/log_test.go
@@ -32,7 +32,7 @@ func TestFileLineLogging(t *testing.T) {
 	Debug("This debug-level line should not show up in the output.")
 	Infof("This %s-level line should show up in the output.", "info")
 
-	re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$`
+	re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33"\n$`
 	if !regexp.MustCompile(re).Match(buf.Bytes()) {
 		t.Fatalf("%q did not match expected regex %q", buf.String(), re)
 	}
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
index 8db715526651c21daf105ff2c1982d8f45bc29eb..f882f2f8485e929f21065a48a3958ab8adabbc74 100644
--- a/vendor/github.com/prometheus/common/log/syslog_formatter.go
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -23,8 +23,10 @@ import (
 	"github.com/sirupsen/logrus"
 )
 
+var _ logrus.Formatter = (*syslogger)(nil)
+
 func init() {
-	setSyslogFormatter = func(appname, local string) error {
+	setSyslogFormatter = func(l logger, appname, local string) error {
 		if appname == "" {
 			return fmt.Errorf("missing appname parameter")
 		}
@@ -32,18 +34,18 @@ func init() {
 			return fmt.Errorf("missing local parameter")
 		}
 
-		fmter, err := newSyslogger(appname, local, origLogger.Formatter)
+		fmter, err := newSyslogger(appname, local, l.entry.Logger.Formatter)
 		if err != nil {
 			fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
-			origLogger.Errorf("can't connect logger to syslog: %v", err)
+			l.entry.Errorf("can't connect logger to syslog: %v", err)
 			return err
 		}
-		origLogger.Formatter = fmter
+		l.entry.Logger.Formatter = fmter
 		return nil
 	}
 }
 
-var ceeTag = []byte("@cee:")
+var prefixTag []byte
 
 type syslogger struct {
 	wrap logrus.Formatter
@@ -56,6 +58,11 @@ func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*sys
 		return nil, err
 	}
 	out, err := syslog.New(priority, appname)
+	_, isJSON := fmter.(*logrus.JSONFormatter)
+	if isJSON {
+		// add cee tag to json formatted syslogs
+		prefixTag = []byte("@cee:")
+	}
 	return &syslogger{
 		out:  out,
 		wrap: fmter,
@@ -92,7 +99,7 @@ func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
 	}
 	// only append tag to data sent to syslog (line), not to what
 	// is returned
-	line := string(append(ceeTag, data...))
+	line := string(append(prefixTag, data...))
 
 	switch e.Level {
 	case logrus.PanicLevel:
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter_test.go b/vendor/github.com/prometheus/common/log/syslog_formatter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b7e68848f1099d050ff7dd5f36740dbe47d9c64c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter_test.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!nacl,!plan9
+
+package log
+
+import (
+	"errors"
+	"log/syslog"
+	"testing"
+)
+
+func TestGetFacility(t *testing.T) {
+	testCases := []struct {
+		facility         string
+		expectedPriority syslog.Priority
+		expectedErr      error
+	}{
+		{"0", syslog.LOG_LOCAL0, nil},
+		{"1", syslog.LOG_LOCAL1, nil},
+		{"2", syslog.LOG_LOCAL2, nil},
+		{"3", syslog.LOG_LOCAL3, nil},
+		{"4", syslog.LOG_LOCAL4, nil},
+		{"5", syslog.LOG_LOCAL5, nil},
+		{"6", syslog.LOG_LOCAL6, nil},
+		{"7", syslog.LOG_LOCAL7, nil},
+		{"8", syslog.LOG_LOCAL0, errors.New("invalid local(8) for syslog")},
+	}
+	for _, tc := range testCases {
+		priority, err := getFacility(tc.facility)
+		if err != tc.expectedErr {
+			if err.Error() != tc.expectedErr.Error() {
+				t.Errorf("want %s, got %s", tc.expectedErr.Error(), err.Error())
+			}
+		}
+
+		if priority != tc.expectedPriority {
+			t.Errorf("want %q, got %q", tc.expectedPriority, priority)
+		}
+	}
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 3b72e7ff8f6cf751e84beec863e4f47dd86d9c91..41051a01a36d49c5b85be61dcdaadb6030f6bf98 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -80,14 +80,18 @@ const (
 	QuantileLabel = "quantile"
 )
 
-// LabelNameRE is a regular expression matching valid label names.
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
 var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
 
 // A LabelName is a key for a LabelSet or Metric.  It has a value associated
 // therewith.
 type LabelName string
 
-// IsValid is true iff the label name matches the pattern of LabelNameRE.
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
 func (ln LabelName) IsValid() bool {
 	if len(ln) == 0 {
 		return false
@@ -106,7 +110,7 @@ func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
 	if err := unmarshal(&s); err != nil {
 		return err
 	}
-	if !LabelNameRE.MatchString(s) {
+	if !LabelName(s).IsValid() {
 		return fmt.Errorf("%q is not a valid label name", s)
 	}
 	*ln = LabelName(s)
@@ -119,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
 	if err := json.Unmarshal(b, &s); err != nil {
 		return err
 	}
-	if !LabelNameRE.MatchString(s) {
+	if !LabelName(s).IsValid() {
 		return fmt.Errorf("%q is not a valid label name", s)
 	}
 	*ln = LabelName(s)
diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go
index 3e80144168494ab10b5d76f95b3a54e26f1da982..e8df28ffacd241bb1c36be2ce178e53b02732ebd 100644
--- a/vendor/github.com/prometheus/common/model/labels_test.go
+++ b/vendor/github.com/prometheus/common/model/labels_test.go
@@ -119,11 +119,22 @@ func TestLabelNameIsValid(t *testing.T) {
 			ln:    "a lid_23name",
 			valid: false,
 		},
+		{
+			ln:    ":leading_colon",
+			valid: false,
+		},
+		{
+			ln:    "colon:in:the:middle",
+			valid: false,
+		},
 	}
 
 	for _, s := range scenarios {
 		if s.ln.IsValid() != s.valid {
-			t.Errorf("Expected %v for %q", s.valid, s.ln)
+			t.Errorf("Expected %v for %q using IsValid method", s.valid, s.ln)
+		}
+		if LabelNameRE.MatchString(string(s.ln)) != s.valid {
+			t.Errorf("Expected %v for %q using regexp match", s.valid, s.ln)
 		}
 	}
 }
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index 5f931cdb9b3ab63415a7b7ed60bb7765f48d46ac..6eda08a7395a1b96c9d762f816343ee069feae56 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -160,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
 	// LabelName as a string and does not call its UnmarshalJSON method.
 	// Thus, we have to replicate the behavior here.
 	for ln := range m {
-		if !LabelNameRE.MatchString(string(ln)) {
+		if !ln.IsValid() {
 			return fmt.Errorf("%q is not a valid label name", ln)
 		}
 	}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index a5da59a5055e519d3c588edebe4e3b502af5aab2..f7250909b9fd3e2dd78740ad9180a0c4980ab6a1 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -21,8 +21,11 @@ import (
 )
 
 var (
-	separator    = []byte{0}
-	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+	separator = []byte{0}
+	// MetricNameRE is a regular expression matching valid metric
+	// names. Note that the IsValidMetricName function performs the same
+	// check but faster than a match with this regular expression.
+	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
 )
 
 // A Metric is similar to a LabelSet, but the key difference is that a Metric is
@@ -41,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
 
 // Clone returns a copy of the Metric.
 func (m Metric) Clone() Metric {
-	clone := Metric{}
+	clone := make(Metric, len(m))
 	for k, v := range m {
 		clone[k] = v
 	}
@@ -85,6 +88,8 @@ func (m Metric) FastFingerprint() Fingerprint {
 }
 
 // IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
 func IsValidMetricName(n LabelValue) bool {
 	if len(n) == 0 {
 		return false
diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go
index e26d54fe58df656dccd3ad636bddc041f6a7ffda..06f9de525a99690dd54dc159332fb0c20358888c 100644
--- a/vendor/github.com/prometheus/common/model/metric_test.go
+++ b/vendor/github.com/prometheus/common/model/metric_test.go
@@ -111,11 +111,22 @@ func TestMetricNameIsValid(t *testing.T) {
 			mn:    "a lid_23name",
 			valid: false,
 		},
+		{
+			mn:    ":leading_colon",
+			valid: true,
+		},
+		{
+			mn:    "colon:in:the:middle",
+			valid: true,
+		},
 	}
 
 	for _, s := range scenarios {
 		if IsValidMetricName(s.mn) != s.valid {
-			t.Errorf("Expected %v for %q", s.valid, s.mn)
+			t.Errorf("Expected %v for %q using IsValidMetricName function", s.valid, s.mn)
+		}
+		if MetricNameRE.MatchString(string(s.mn)) != s.valid {
+			t.Errorf("Expected %v for %q using regexp matching", s.valid, s.mn)
 		}
 	}
 }
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 548968aebe6d48ad1a523da64ce776399121d829..74ed5a9f7e9cd466d54eba8f3ebce870a7b30d4d 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
 // This type should not propagate beyond the scope of input/output processing.
 type Duration time.Duration
 
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+	var err error
+	*d, err = ParseDuration(s)
+	return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+	return "duration"
+}
+
 var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
 
-// StringToDuration parses a string into a time.Duration, assuming that a year
+// ParseDuration parses a string into a time.Duration, assuming that a year
 // always has 365d, a week always has 7d, and a day always has 24h.
 func ParseDuration(durationStr string) (Duration, error) {
 	matches := durationRE.FindStringSubmatch(durationStr)
@@ -202,6 +214,9 @@ func (d Duration) String() string {
 		ms   = int64(time.Duration(d) / time.Millisecond)
 		unit = "ms"
 	)
+	if ms == 0 {
+		return "0s"
+	}
 	factors := map[string]int64{
 		"y":  1000 * 60 * 60 * 24 * 365,
 		"w":  1000 * 60 * 60 * 24 * 7,
diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go
index 45ffd872d3afe9c8f6a195d6e9769e46fb4b9a5b..3efdd65ff30cc7ab4695ee19da774f635129c666 100644
--- a/vendor/github.com/prometheus/common/model/time_test.go
+++ b/vendor/github.com/prometheus/common/model/time_test.go
@@ -91,6 +91,9 @@ func TestParseDuration(t *testing.T) {
 		out time.Duration
 	}{
 		{
+			in:  "0s",
+			out: 0,
+		}, {
 			in:  "324ms",
 			out: 324 * time.Millisecond,
 		}, {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index dbf5d10e4317dc7ad073e26070d9ca6d8bf8bfe1..c9ed3ffd82aeafb5a37fc6f3321278dc789fc858 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -22,6 +22,22 @@ import (
 	"strings"
 )
 
+var (
+	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+	// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+	// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+	// of 0, which is possible to appear in a real SamplePair and thus not
+	// suitable to signal a non-existing SamplePair.
+	ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+	// ZeroSample is the pseudo zero-value of Sample used to signal a
+	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+	// and metric nil. Note that the natural zero value of Sample has a timestamp
+	// of 0, which is possible to appear in a real Sample and thus not suitable
+	// to signal a non-existing Sample.
+	ZeroSample = Sample{Timestamp: Earliest}
+)
+
 // A SampleValue is a representation of a value for a given sample at a given
 // time.
 type SampleValue float64
@@ -113,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
 	if !s.Timestamp.Equal(o.Timestamp) {
 		return false
 	}
-	if s.Value.Equal(o.Value) {
-		return false
-	}
 
-	return true
+	return s.Value.Equal(o.Value)
 }
 
 func (s Sample) String() string {
diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go
index 8d2b69ea14ec1015521d1b997538e5f5f1ff1899..b97dcf84cf108c3066f62d6af3252b3a1ff3b05f 100644
--- a/vendor/github.com/prometheus/common/model/value_test.go
+++ b/vendor/github.com/prometheus/common/model/value_test.go
@@ -21,7 +21,7 @@ import (
 	"testing"
 )
 
-func TestEqual(t *testing.T) {
+func TestEqualValues(t *testing.T) {
 	tests := map[string]struct {
 		in1, in2 SampleValue
 		want     bool
@@ -76,6 +76,57 @@ func TestEqual(t *testing.T) {
 	}
 }
 
+func TestEqualSamples(t *testing.T) {
+	testSample := &Sample{}
+
+	tests := map[string]struct {
+		in1, in2 *Sample
+		want     bool
+	}{
+		"equal pointers": {
+			in1:  testSample,
+			in2:  testSample,
+			want: true,
+		},
+		"different metrics": {
+			in1:  &Sample{Metric: Metric{"foo": "bar"}},
+			in2:  &Sample{Metric: Metric{"foo": "biz"}},
+			want: false,
+		},
+		"different timestamp": {
+			in1:  &Sample{Timestamp: 0},
+			in2:  &Sample{Timestamp: 1},
+			want: false,
+		},
+		"different value": {
+			in1:  &Sample{Value: 0},
+			in2:  &Sample{Value: 1},
+			want: false,
+		},
+		"equal samples": {
+			in1: &Sample{
+				Metric:    Metric{"foo": "bar"},
+				Timestamp: 0,
+				Value:     1,
+			},
+			in2: &Sample{
+				Metric:    Metric{"foo": "bar"},
+				Timestamp: 0,
+				Value:     1,
+			},
+			want: true,
+		},
+	}
+
+	for name, test := range tests {
+		got := test.in1.Equal(test.in2)
+		if got != test.want {
+			t.Errorf("Comparing %s, %v and %v: got %t, want %t", name, test.in1, test.in2, got, test.want)
+		}
+	}
+
+}
+
 func TestSamplePairJSON(t *testing.T) {
 	input := []struct {
 		plain string
diff --git a/vendor/github.com/prometheus/common/promlog/flag/flag.go b/vendor/github.com/prometheus/common/promlog/flag/flag.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9d361e43c118b25f10b93cd322465af6a9d441f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/promlog/flag/flag.go
@@ -0,0 +1,33 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flag
+
+import (
+	"github.com/prometheus/common/promlog"
+	kingpin "gopkg.in/alecthomas/kingpin.v2"
+)
+
+// LevelFlagName is the canonical flag name to configure the allowed log level
+// within Prometheus projects.
+const LevelFlagName = "log.level"
+
+// LevelFlagHelp is the help description for the log.level flag.
+const LevelFlagHelp = "Only log messages with the given severity or above. One of: [debug, info, warn, error]"
+
+// AddFlags adds the flags used by this package to the Kingpin application.
+// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
+func AddFlags(a *kingpin.Application, logLevel *promlog.AllowedLevel) {
+	a.Flag(LevelFlagName, LevelFlagHelp).
+		Default("info").SetValue(logLevel)
+}
diff --git a/vendor/github.com/prometheus/common/promlog/log.go b/vendor/github.com/prometheus/common/promlog/log.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf8307ad285cac4cf172287ee7f441e236136e41
--- /dev/null
+++ b/vendor/github.com/prometheus/common/promlog/log.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promlog defines standardised ways to initialize Go kit loggers
+// across Prometheus components.
+// It should typically only ever be imported by main packages.
+package promlog
+
+import (
+	"os"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/pkg/errors"
+)
+
+// AllowedLevel is a settable identifier for the minimum level a log entry
+// must be have.
+type AllowedLevel struct {
+	s string
+	o level.Option
+}
+
+func (l *AllowedLevel) String() string {
+	return l.s
+}
+
+// Set updates the value of the allowed level.
+func (l *AllowedLevel) Set(s string) error {
+	switch s {
+	case "debug":
+		l.o = level.AllowDebug()
+	case "info":
+		l.o = level.AllowInfo()
+	case "warn":
+		l.o = level.AllowWarn()
+	case "error":
+		l.o = level.AllowError()
+	default:
+		return errors.Errorf("unrecognized log level %q", s)
+	}
+	l.s = s
+	return nil
+}
+
+// New returns a new leveled oklog logger in the logfmt format. Each logged line will be annotated
+// with a timestamp. The output always goes to stderr.
+func New(al AllowedLevel) log.Logger {
+	l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+	l = level.NewFilter(l, al.o)
+	l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+	return l
+}
diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go
index fb337077a51021ecf972f24624e0cc53e3e705d0..bb468817363929425c0ac17787eb94f3aec89ccf 100644
--- a/vendor/github.com/prometheus/common/route/route.go
+++ b/vendor/github.com/prometheus/common/route/route.go
@@ -2,24 +2,11 @@ package route
 
 import (
 	"net/http"
-	"sync"
 
 	"github.com/julienschmidt/httprouter"
 	"golang.org/x/net/context"
 )
 
-var (
-	mtx   = sync.RWMutex{}
-	ctxts = map[*http.Request]context.Context{}
-)
-
-// Context returns the context for the request.
-func Context(r *http.Request) context.Context {
-	mtx.RLock()
-	defer mtx.RUnlock()
-	return ctxts[r]
-}
-
 type param string
 
 // Param returns param p for the context.
@@ -32,29 +19,8 @@ func WithParam(ctx context.Context, p, v string) context.Context {
 	return context.WithValue(ctx, param(p), v)
 }
 
-// handle turns a Handle into httprouter.Handle
-func handle(h http.HandlerFunc) httprouter.Handle {
-	return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
-		ctx, cancel := context.WithCancel(context.Background())
-		defer cancel()
-
-		for _, p := range params {
-			ctx = context.WithValue(ctx, param(p.Key), p.Value)
-		}
-
-		mtx.Lock()
-		ctxts[r] = ctx
-		mtx.Unlock()
-
-		h(w, r)
-
-		mtx.Lock()
-		delete(ctxts, r)
-		mtx.Unlock()
-	}
-}
-
-// Router wraps httprouter.Router and adds support for prefixed sub-routers.
+// Router wraps httprouter.Router and adds support for prefixed sub-routers
+// and per-request context injections.
 type Router struct {
 	rtr    *httprouter.Router
 	prefix string
@@ -62,7 +28,9 @@ type Router struct {
 
 // New returns a new Router.
 func New() *Router {
-	return &Router{rtr: httprouter.New()}
+	return &Router{
+		rtr: httprouter.New(),
+	}
 }
 
 // WithPrefix returns a router that prefixes all registered routes with prefix.
@@ -70,29 +38,42 @@ func (r *Router) WithPrefix(prefix string) *Router {
 	return &Router{rtr: r.rtr, prefix: r.prefix + prefix}
 }
 
+// handle turns a HandlerFunc into an httprouter.Handle.
+func (r *Router) handle(h http.HandlerFunc) httprouter.Handle {
+	return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+
+		for _, p := range params {
+			ctx = context.WithValue(ctx, param(p.Key), p.Value)
+		}
+		h(w, req.WithContext(ctx))
+	}
+}
+
 // Get registers a new GET route.
 func (r *Router) Get(path string, h http.HandlerFunc) {
-	r.rtr.GET(r.prefix+path, handle(h))
+	r.rtr.GET(r.prefix+path, r.handle(h))
 }
 
 // Options registers a new OPTIONS route.
 func (r *Router) Options(path string, h http.HandlerFunc) {
-	r.rtr.OPTIONS(r.prefix+path, handle(h))
+	r.rtr.OPTIONS(r.prefix+path, r.handle(h))
 }
 
 // Del registers a new DELETE route.
 func (r *Router) Del(path string, h http.HandlerFunc) {
-	r.rtr.DELETE(r.prefix+path, handle(h))
+	r.rtr.DELETE(r.prefix+path, r.handle(h))
 }
 
 // Put registers a new PUT route.
 func (r *Router) Put(path string, h http.HandlerFunc) {
-	r.rtr.PUT(r.prefix+path, handle(h))
+	r.rtr.PUT(r.prefix+path, r.handle(h))
 }
 
 // Post registers a new POST route.
 func (r *Router) Post(path string, h http.HandlerFunc) {
-	r.rtr.POST(r.prefix+path, handle(h))
+	r.rtr.POST(r.prefix+path, r.handle(h))
 }
 
 // Redirect takes an absolute path and sends an internal HTTP redirect for it,
@@ -113,7 +94,7 @@ func FileServe(dir string) http.HandlerFunc {
 	fs := http.FileServer(http.Dir(dir))
 
 	return func(w http.ResponseWriter, r *http.Request) {
-		r.URL.Path = Param(Context(r), "filepath")
+		r.URL.Path = Param(r.Context(), "filepath")
 		fs.ServeHTTP(w, r)
 	}
 }
diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go
index 734682e98e5a988e6326adc11c610f0ec561d950..a9bb209964e117929481f082b5c8ff60637d8cab 100644
--- a/vendor/github.com/prometheus/common/route/route_test.go
+++ b/vendor/github.com/prometheus/common/route/route_test.go
@@ -25,3 +25,20 @@ func TestRedirect(t *testing.T) {
 		t.Fatalf("Unexpected redirect location: got %s, want %s", got, want)
 	}
 }
+
+func TestContext(t *testing.T) {
+	router := New()
+	router.Get("/test/:foo/", func(w http.ResponseWriter, r *http.Request) {
+		want := "bar"
+		got := Param(r.Context(), "foo")
+		if want != got {
+			t.Fatalf("Unexpected context value: want %q, got %q", want, got)
+		}
+	})
+
+	r, err := http.NewRequest("GET", "http://localhost:9090/test/bar/", nil)
+	if err != nil {
+		t.Fatalf("Error building test request: %s", err)
+	}
+	router.ServeHTTP(nil, r)
+}
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f090cb42f370bda9e7f4f58d9b8b8ee2750c115f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000000000000000000000000000000000000..8af90637a99e61a5f55df746e1f916b4b0ffab06
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit_test.go b/vendor/github.com/sirupsen/logrus/alt_exit_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a08b1a898f665878347a19952ab3037afc66ea62
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit_test.go
@@ -0,0 +1,83 @@
+package logrus
+
+import (
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"testing"
+	"time"
+)
+
+func TestRegister(t *testing.T) {
+	current := len(handlers)
+	RegisterExitHandler(func() {})
+	if len(handlers) != current+1 {
+		t.Fatalf("expected %d handlers, got %d", current+1, len(handlers))
+	}
+}
+
+func TestHandler(t *testing.T) {
+	tempDir, err := ioutil.TempDir("", "test_handler")
+	if err != nil {
+		log.Fatalf("can't create temp dir. %q", err)
+	}
+	defer os.RemoveAll(tempDir)
+
+	gofile := filepath.Join(tempDir, "gofile.go")
+	if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
+		t.Fatalf("can't create go file. %q", err)
+	}
+
+	outfile := filepath.Join(tempDir, "outfile.out")
+	arg := time.Now().UTC().String()
+	err = exec.Command("go", "run", gofile, outfile, arg).Run()
+	if err == nil {
+		t.Fatalf("completed normally, should have failed")
+	}
+
+	data, err := ioutil.ReadFile(outfile)
+	if err != nil {
+		t.Fatalf("can't read output file %s. %q", outfile, err)
+	}
+
+	if string(data) != arg {
+		t.Fatalf("bad data. Expected %q, got %q", data, arg)
+	}
+}
+
+var testprog = []byte(`
+// Test program for atexit, gets output file and data as arguments and writes
+// data to output file in atexit handler.
+package main
+
+import (
+	"github.com/sirupsen/logrus"
+	"flag"
+	"fmt"
+	"io/ioutil"
+)
+
+var outfile = ""
+var data = ""
+
+func handler() {
+	ioutil.WriteFile(outfile, []byte(data), 0666)
+}
+
+func badHandler() {
+	n := 0
+	fmt.Println(1/n)
+}
+
+func main() {
+	flag.Parse()
+	outfile = flag.Arg(0)
+	data = flag.Arg(1)
+
+	logrus.RegisterExitHandler(handler)
+	logrus.RegisterExitHandler(badHandler)
+	logrus.Fatal("Bye bye")
+}
+`)
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..da67aba06debf6721a2d97604145aa09ac257e19
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000000000000000000000000000000000000..778f4c9f0d34776736d555fe6ebabb627e333b96
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,288 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	entry.fireHooks()
+
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+
+	entry.write()
+
+	entry.Buffer = nil
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) fireHooks() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	err := entry.Logger.Hooks.Fire(entry.Level, &entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+	}
+}
+
+func (entry *Entry) write() {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+	} else {
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.level() >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.level() >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.level() >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.level() >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.level() >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.level() >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.level() >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.level() >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.level() >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.level() >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.level() >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.level() >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.level() >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.level() >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.level() >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.level() >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.level() >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.level() >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/entry_test.go b/vendor/github.com/sirupsen/logrus/entry_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a81e2b3834cfe89b77bd99f5d2bfba8b14d6e4ac
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry_test.go
@@ -0,0 +1,115 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestEntryWithError(t *testing.T) {
+
+	assert := assert.New(t)
+
+	defer func() {
+		ErrorKey = "error"
+	}()
+
+	err := fmt.Errorf("kaboom at layer %d", 4711)
+
+	assert.Equal(err, WithError(err).Data["error"])
+
+	logger := New()
+	logger.Out = &bytes.Buffer{}
+	entry := NewEntry(logger)
+
+	assert.Equal(err, entry.WithError(err).Data["error"])
+
+	ErrorKey = "err"
+
+	assert.Equal(err, entry.WithError(err).Data["err"])
+
+}
+
+func TestEntryPanicln(t *testing.T) {
+	errBoom := fmt.Errorf("boom time")
+
+	defer func() {
+		p := recover()
+		assert.NotNil(t, p)
+
+		switch pVal := p.(type) {
+		case *Entry:
+			assert.Equal(t, "kaboom", pVal.Message)
+			assert.Equal(t, errBoom, pVal.Data["err"])
+		default:
+			t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+		}
+	}()
+
+	logger := New()
+	logger.Out = &bytes.Buffer{}
+	entry := NewEntry(logger)
+	entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+	errBoom := fmt.Errorf("boom again")
+
+	defer func() {
+		p := recover()
+		assert.NotNil(t, p)
+
+		switch pVal := p.(type) {
+		case *Entry:
+			assert.Equal(t, "kaboom true", pVal.Message)
+			assert.Equal(t, errBoom, pVal.Data["err"])
+		default:
+			t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+		}
+	}()
+
+	logger := New()
+	logger.Out = &bytes.Buffer{}
+	entry := NewEntry(logger)
+	entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
+
+const (
+	badMessage   = "this is going to panic"
+	panicMessage = "this is broken"
+)
+
+type panickyHook struct{}
+
+func (p *panickyHook) Levels() []Level {
+	return []Level{InfoLevel}
+}
+
+func (p *panickyHook) Fire(entry *Entry) error {
+	if entry.Message == badMessage {
+		panic(panicMessage)
+	}
+
+	return nil
+}
+
+func TestEntryHooksPanic(t *testing.T) {
+	logger := New()
+	logger.Out = &bytes.Buffer{}
+	logger.Level = InfoLevel
+	logger.Hooks.Add(&panickyHook{})
+
+	defer func() {
+		p := recover()
+		assert.NotNil(t, p)
+		assert.Equal(t, panicMessage, p)
+
+		entry := NewEntry(logger)
+		entry.Info("another message")
+	}()
+
+	entry := NewEntry(logger)
+	entry.Info(badMessage)
+}
diff --git a/vendor/github.com/sirupsen/logrus/example_basic_test.go b/vendor/github.com/sirupsen/logrus/example_basic_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a2acf550c95dfecb11132ec8578d2b6ef8d4c560
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/example_basic_test.go
@@ -0,0 +1,69 @@
+package logrus_test
+
+import (
+	"github.com/sirupsen/logrus"
+	"os"
+)
+
+func Example_basic() {
+	var log = logrus.New()
+	log.Formatter = new(logrus.JSONFormatter)
+	log.Formatter = new(logrus.TextFormatter)                     //default
+	log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
+	log.Level = logrus.DebugLevel
+	log.Out = os.Stdout
+
+	// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+	// if err == nil {
+	// 	log.Out = file
+	// } else {
+	// 	log.Info("Failed to log to file, using default stderr")
+	// }
+
+	defer func() {
+		err := recover()
+		if err != nil {
+			entry := err.(*logrus.Entry)
+			log.WithFields(logrus.Fields{
+				"omg":         true,
+				"err_animal":  entry.Data["animal"],
+				"err_size":    entry.Data["size"],
+				"err_level":   entry.Level,
+				"err_message": entry.Message,
+				"number":      100,
+			}).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code
+		}
+	}()
+
+	log.WithFields(logrus.Fields{
+		"animal": "walrus",
+		"number": 8,
+	}).Debug("Started observing beach")
+
+	log.WithFields(logrus.Fields{
+		"animal": "walrus",
+		"size":   10,
+	}).Info("A group of walrus emerges from the ocean")
+
+	log.WithFields(logrus.Fields{
+		"omg":    true,
+		"number": 122,
+	}).Warn("The group's number increased tremendously!")
+
+	log.WithFields(logrus.Fields{
+		"temperature": -4,
+	}).Debug("Temperature changes")
+
+	log.WithFields(logrus.Fields{
+		"animal": "orca",
+		"size":   9009,
+	}).Panic("It's over 9000!")
+
+	// Output:
+	// level=debug msg="Started observing beach" animal=walrus number=8
+	// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+	// level=warning msg="The group's number increased tremendously!" number=122 omg=true
+	// level=debug msg="Temperature changes" temperature=-4
+	// level=panic msg="It's over 9000!" animal=orca size=9009
+	// level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true
+}
diff --git a/vendor/github.com/sirupsen/logrus/example_hook_test.go b/vendor/github.com/sirupsen/logrus/example_hook_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d4ddffca37ffe59a15a670d50a9c285a108c2e32
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/example_hook_test.go
@@ -0,0 +1,35 @@
+package logrus_test
+
+import (
+	"github.com/sirupsen/logrus"
+	"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+	"os"
+)
+
+func Example_hook() {
+	var log = logrus.New()
+	log.Formatter = new(logrus.TextFormatter)                     // default
+	log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
+	log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
+	log.Out = os.Stdout
+
+	log.WithFields(logrus.Fields{
+		"animal": "walrus",
+		"size":   10,
+	}).Info("A group of walrus emerges from the ocean")
+
+	log.WithFields(logrus.Fields{
+		"omg":    true,
+		"number": 122,
+	}).Warn("The group's number increased tremendously!")
+
+	log.WithFields(logrus.Fields{
+		"omg":    true,
+		"number": 100,
+	}).Error("The ice breaks!")
+
+	// Output:
+	// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+	// level=warning msg="The group's number increased tremendously!" number=122 omg=true
+	// level=error msg="The ice breaks!" number=100 omg=true
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000000000000000000000000000000000000..013183edabff82d9b5050371f5cac21ee2bc6c7c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.level()
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000000000000000000000000000000000000..b183ff5b1dbecf7ce6d477ad2552aed455e89a47
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const defaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9481589f5f0e3bce680d81a4766a6f47fae16ed
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,101 @@
+package logrus
+
+import (
+	"fmt"
+	"testing"
+	"time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+	"foo":   "bar",
+	"baz":   "qux",
+	"one":   "two",
+	"three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+	"foo":       "bar",
+	"baz":       "qux",
+	"one":       "two",
+	"three":     "four",
+	"five":      "six",
+	"seven":     "eight",
+	"nine":      "ten",
+	"eleven":    "twelve",
+	"thirteen":  "fourteen",
+	"fifteen":   "sixteen",
+	"seventeen": "eighteen",
+	"nineteen":  "twenty",
+	"a":         "b",
+	"c":         "d",
+	"e":         "f",
+	"g":         "h",
+	"i":         "j",
+	"k":         "l",
+	"m":         "n",
+	"o":         "p",
+	"q":         "r",
+	"s":         "t",
+	"u":         "v",
+	"w":         "x",
+	"y":         "z",
+	"this":      "will",
+	"make":      "thirty",
+	"entries":   "yeah",
+}
+
+var errorFields = Fields{
+	"foo": fmt.Errorf("bar"),
+	"baz": fmt.Errorf("qux"),
+}
+
+func BenchmarkErrorTextFormatter(b *testing.B) {
+	doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+	doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+	doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+	doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+	doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+	doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+	doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+	logger := New()
+
+	entry := &Entry{
+		Time:    time.Time{},
+		Level:   InfoLevel,
+		Message: "message",
+		Data:    fields,
+		Logger:  logger,
+	}
+	var d []byte
+	var err error
+	for i := 0; i < b.N; i++ {
+		d, err = formatter.Format(entry)
+		if err != nil {
+			b.Fatal(err)
+		}
+		b.SetBytes(int64(len(d)))
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/hook_test.go b/vendor/github.com/sirupsen/logrus/hook_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4fea7514e1b7bfa9ee0d09c51b2495ec463a30cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hook_test.go
@@ -0,0 +1,144 @@
+package logrus
+
+import (
+	"sync"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+	Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+	hook.Fired = true
+	return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+	return []Level{
+		DebugLevel,
+		InfoLevel,
+		WarnLevel,
+		ErrorLevel,
+		FatalLevel,
+		PanicLevel,
+	}
+}
+
+func TestHookFires(t *testing.T) {
+	hook := new(TestHook)
+
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Hooks.Add(hook)
+		assert.Equal(t, hook.Fired, false)
+
+		log.Print("test")
+	}, func(fields Fields) {
+		assert.Equal(t, hook.Fired, true)
+	})
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+	entry.Data["wow"] = "whale"
+	return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+	return []Level{
+		DebugLevel,
+		InfoLevel,
+		WarnLevel,
+		ErrorLevel,
+		FatalLevel,
+		PanicLevel,
+	}
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+	hook := new(ModifyHook)
+
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Hooks.Add(hook)
+		log.WithField("wow", "elephant").Print("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["wow"], "whale")
+	})
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+	hook1 := new(ModifyHook)
+	hook2 := new(TestHook)
+
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Hooks.Add(hook1)
+		log.Hooks.Add(hook2)
+
+		log.WithField("wow", "elephant").Print("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["wow"], "whale")
+		assert.Equal(t, hook2.Fired, true)
+	})
+}
+
+type ErrorHook struct {
+	Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+	hook.Fired = true
+	return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+	return []Level{
+		ErrorLevel,
+	}
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+	hook := new(ErrorHook)
+
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Hooks.Add(hook)
+		log.Info("test")
+	}, func(fields Fields) {
+		assert.Equal(t, hook.Fired, false)
+	})
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+	hook := new(ErrorHook)
+
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Hooks.Add(hook)
+		log.Error("test")
+	}, func(fields Fields) {
+		assert.Equal(t, hook.Fired, true)
+	})
+}
+
+func TestAddHookRace(t *testing.T) {
+	var wg sync.WaitGroup
+	wg.Add(2)
+	hook := new(ErrorHook)
+	LogAndAssertJSON(t, func(log *Logger) {
+		go func() {
+			defer wg.Done()
+			log.AddHook(hook)
+		}()
+		go func() {
+			defer wg.Done()
+			log.Error("test")
+		}()
+		wg.Wait()
+	}, func(fields Fields) {
+		// the line may have been logged
+		// before the hook was added, so we can't
+		// actually assert on the hook
+	})
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f151cdc39275a003d6a6e7059b0801071c001a8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 0000000000000000000000000000000000000000..329ce0d60cb1f6bb413261bd615f8152fc257da8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,55 @@
+// +build !windows,!nacl,!plan9
+
+package syslog
+
+import (
+	"fmt"
+	"log/syslog"
+	"os"
+
+	"github.com/sirupsen/logrus"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+	Writer        *syslog.Writer
+	SyslogNetwork string
+	SyslogRaddr   string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+	w, err := syslog.Dial(network, raddr, priority, tag)
+	return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+	line, err := entry.String()
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+		return err
+	}
+
+	switch entry.Level {
+	case logrus.PanicLevel:
+		return hook.Writer.Crit(line)
+	case logrus.FatalLevel:
+		return hook.Writer.Crit(line)
+	case logrus.ErrorLevel:
+		return hook.Writer.Err(line)
+	case logrus.WarnLevel:
+		return hook.Writer.Warning(line)
+	case logrus.InfoLevel:
+		return hook.Writer.Info(line)
+	case logrus.DebugLevel:
+		return hook.Writer.Debug(line)
+	default:
+		return nil
+	}
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+	return logrus.AllLevels
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ec3a44454883f1ba1c0edac541c80737b918987
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,27 @@
+package syslog
+
+import (
+	"log/syslog"
+	"testing"
+
+	"github.com/sirupsen/logrus"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+	log := logrus.New()
+	hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+	if err != nil {
+		t.Errorf("Unable to connect to local syslog.")
+	}
+
+	log.Hooks.Add(hook)
+
+	for _, level := range hook.Levels() {
+		if len(log.Hooks[level]) != 1 {
+			t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+		}
+	}
+
+	log.Info("Congratulations!")
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks/test/test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test.go
new file mode 100644
index 0000000000000000000000000000000000000000..62c4845df7d095d01b6b8a8a679082678a9aa63d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks/test/test.go
@@ -0,0 +1,95 @@
+// The Test package is used for testing logrus. It is here for backwards
+// compatibility from when logrus' organization was upper-case. Please use
+// lower-case logrus and the `null` package instead of this one.
+package test
+
+import (
+	"io/ioutil"
+	"sync"
+
+	"github.com/sirupsen/logrus"
+)
+
+// Hook is a hook designed for dealing with logs in test scenarios.
+type Hook struct {
+	// Entries is an array of all entries that have been received by this hook.
+	// For safe access, use the AllEntries() method, rather than reading this
+	// value directly.
+	Entries []*logrus.Entry
+	mu      sync.RWMutex
+}
+
+// NewGlobal installs a test hook for the global logger.
+func NewGlobal() *Hook {
+
+	hook := new(Hook)
+	logrus.AddHook(hook)
+
+	return hook
+
+}
+
+// NewLocal installs a test hook for a given local logger.
+func NewLocal(logger *logrus.Logger) *Hook {
+
+	hook := new(Hook)
+	logger.Hooks.Add(hook)
+
+	return hook
+
+}
+
+// NewNullLogger creates a discarding logger and installs the test hook.
+func NewNullLogger() (*logrus.Logger, *Hook) {
+
+	logger := logrus.New()
+	logger.Out = ioutil.Discard
+
+	return logger, NewLocal(logger)
+
+}
+
+func (t *Hook) Fire(e *logrus.Entry) error {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.Entries = append(t.Entries, e)
+	return nil
+}
+
+func (t *Hook) Levels() []logrus.Level {
+	return logrus.AllLevels
+}
+
+// LastEntry returns the last entry that was logged or nil.
+func (t *Hook) LastEntry() *logrus.Entry {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	i := len(t.Entries) - 1
+	if i < 0 {
+		return nil
+	}
+	// Make a copy, for safety
+	e := *t.Entries[i]
+	return &e
+}
+
+// AllEntries returns all entries that were logged.
+func (t *Hook) AllEntries() []*logrus.Entry {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	// Make a copy so the returned value won't race with future log requests
+	entries := make([]*logrus.Entry, len(t.Entries))
+	for i, entry := range t.Entries {
+		// Make a copy, for safety
+		e := *entry
+		entries[i] = &e
+	}
+	return entries
+}
+
+// Reset removes all Entries from this test hook.
+func (t *Hook) Reset() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.Entries = make([]*logrus.Entry, 0)
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..dea768e6c5cff226826312da675feff535422d4e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go
@@ -0,0 +1,61 @@
+package test
+
+import (
+	"sync"
+	"testing"
+
+	"github.com/sirupsen/logrus"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestAllHooks(t *testing.T) {
+	assert := assert.New(t)
+
+	logger, hook := NewNullLogger()
+	assert.Nil(hook.LastEntry())
+	assert.Equal(0, len(hook.Entries))
+
+	logger.Error("Hello error")
+	assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+	assert.Equal("Hello error", hook.LastEntry().Message)
+	assert.Equal(1, len(hook.Entries))
+
+	logger.Warn("Hello warning")
+	assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
+	assert.Equal("Hello warning", hook.LastEntry().Message)
+	assert.Equal(2, len(hook.Entries))
+
+	hook.Reset()
+	assert.Nil(hook.LastEntry())
+	assert.Equal(0, len(hook.Entries))
+
+	hook = NewGlobal()
+
+	logrus.Error("Hello error")
+	assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+	assert.Equal("Hello error", hook.LastEntry().Message)
+	assert.Equal(1, len(hook.Entries))
+}
+
+func TestLoggingWithHooksRace(t *testing.T) {
+	assert := assert.New(t)
+	logger, hook := NewNullLogger()
+
+	var wg sync.WaitGroup
+	wg.Add(100)
+
+	for i := 0; i < 100; i++ {
+		go func() {
+			logger.Info("info")
+			wg.Done()
+		}()
+	}
+
+	assert.Equal(logrus.InfoLevel, hook.LastEntry().Level)
+	assert.Equal("info", hook.LastEntry().Message)
+
+	wg.Wait()
+
+	entries := hook.AllEntries()
+	assert.Equal(100, len(entries))
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb01c1b1040400da7a8138a0e3f89d413acd3eaf
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,79 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+// Default key names for the default fields
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyMsg: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter_test.go b/vendor/github.com/sirupsen/logrus/json_formatter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..51093a79bad3c8be2d934eb1beb54d7210af7431
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter_test.go
@@ -0,0 +1,199 @@
+package logrus
+
+import (
+	"encoding/json"
+	"errors"
+	"strings"
+	"testing"
+)
+
+func TestErrorNotLost(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+
+	entry := make(map[string]interface{})
+	err = json.Unmarshal(b, &entry)
+	if err != nil {
+		t.Fatal("Unable to unmarshal formatted entry: ", err)
+	}
+
+	if entry["error"] != "wild walrus" {
+		t.Fatal("Error field not set")
+	}
+}
+
+func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+
+	entry := make(map[string]interface{})
+	err = json.Unmarshal(b, &entry)
+	if err != nil {
+		t.Fatal("Unable to unmarshal formatted entry: ", err)
+	}
+
+	if entry["omg"] != "wild walrus" {
+		t.Fatal("Error field not set")
+	}
+}
+
+func TestFieldClashWithTime(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("time", "right now!"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+
+	entry := make(map[string]interface{})
+	err = json.Unmarshal(b, &entry)
+	if err != nil {
+		t.Fatal("Unable to unmarshal formatted entry: ", err)
+	}
+
+	if entry["fields.time"] != "right now!" {
+		t.Fatal("fields.time not set to original time field")
+	}
+
+	if entry["time"] != "0001-01-01T00:00:00Z" {
+		t.Fatal("time field not set to current time, was: ", entry["time"])
+	}
+}
+
+func TestFieldClashWithMsg(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("msg", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+
+	entry := make(map[string]interface{})
+	err = json.Unmarshal(b, &entry)
+	if err != nil {
+		t.Fatal("Unable to unmarshal formatted entry: ", err)
+	}
+
+	if entry["fields.msg"] != "something" {
+		t.Fatal("fields.msg not set to original msg field")
+	}
+}
+
+func TestFieldClashWithLevel(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("level", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+
+	entry := make(map[string]interface{})
+	err = json.Unmarshal(b, &entry)
+	if err != nil {
+		t.Fatal("Unable to unmarshal formatted entry: ", err)
+	}
+
+	if entry["fields.level"] != "something" {
+		t.Fatal("fields.level not set to original level field")
+	}
+}
+
+func TestJSONEntryEndsWithNewline(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("level", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+
+	if b[len(b)-1] != '\n' {
+		t.Fatal("Expected JSON log entry to end with a newline")
+	}
+}
+
+func TestJSONMessageKey(t *testing.T) {
+	formatter := &JSONFormatter{
+		FieldMap: FieldMap{
+			FieldKeyMsg: "message",
+		},
+	}
+
+	b, err := formatter.Format(&Entry{Message: "oh hai"})
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+	s := string(b)
+	if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
+		t.Fatal("Expected JSON to format message key")
+	}
+}
+
+func TestJSONLevelKey(t *testing.T) {
+	formatter := &JSONFormatter{
+		FieldMap: FieldMap{
+			FieldKeyLevel: "somelevel",
+		},
+	}
+
+	b, err := formatter.Format(WithField("level", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+	s := string(b)
+	if !strings.Contains(s, "somelevel") {
+		t.Fatal("Expected JSON to format level key")
+	}
+}
+
+func TestJSONTimeKey(t *testing.T) {
+	formatter := &JSONFormatter{
+		FieldMap: FieldMap{
+			FieldKeyTime: "timeywimey",
+		},
+	}
+
+	b, err := formatter.Format(WithField("level", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+	s := string(b)
+	if !strings.Contains(s, "timeywimey") {
+		t.Fatal("Expected JSON to format time key")
+	}
+}
+
+func TestJSONDisableTimestamp(t *testing.T) {
+	formatter := &JSONFormatter{
+		DisableTimestamp: true,
+	}
+
+	b, err := formatter.Format(WithField("level", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+	s := string(b)
+	if strings.Contains(s, FieldKeyTime) {
+		t.Error("Did not prevent timestamp", s)
+	}
+}
+
+func TestJSONEnableTimestamp(t *testing.T) {
+	formatter := &JSONFormatter{}
+
+	b, err := formatter.Format(WithField("level", "something"))
+	if err != nil {
+		t.Fatal("Unable to format entry: ", err)
+	}
+	s := string(b)
+	if !strings.Contains(s, FieldKeyTime) {
+		t.Error("Timestamp not present", s)
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..fdaf8a653414953533cb9ad369e0c031cc5a793e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,323 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+	"sync/atomic"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged.
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.level() >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.level() >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.level() >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.level() >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.level() >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.level() >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.level() >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.level() >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.level() >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.level() >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.level() >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.level() >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.level() >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.level() >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.level() >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.level() >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.level() >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.level() >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.level() >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.level() >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.level() >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+	return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+func (logger *Logger) SetLevel(level Level) {
+	atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+func (logger *Logger) AddHook(hook Hook) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Hooks.Add(hook)
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger_bench_test.go b/vendor/github.com/sirupsen/logrus/logger_bench_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd23a3535ec9626a00e590bf53f221017fc99f39
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger_bench_test.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+	"os"
+	"testing"
+)
+
+// smallFields is a small size data set for benchmarking
+var loggerFields = Fields{
+	"foo":   "bar",
+	"baz":   "qux",
+	"one":   "two",
+	"three": "four",
+}
+
+func BenchmarkDummyLogger(b *testing.B) {
+	nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
+	if err != nil {
+		b.Fatalf("%v", err)
+	}
+	defer nullf.Close()
+	doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkDummyLoggerNoLock(b *testing.B) {
+	nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
+	if err != nil {
+		b.Fatalf("%v", err)
+	}
+	defer nullf.Close()
+	doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+	logger := Logger{
+		Out:       out,
+		Level:     InfoLevel,
+		Formatter: formatter,
+	}
+	entry := logger.WithFields(fields)
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			entry.Info("aaa")
+		}
+	})
+}
+
+func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+	logger := Logger{
+		Out:       out,
+		Level:     InfoLevel,
+		Formatter: formatter,
+	}
+	logger.SetNoLock()
+	entry := logger.WithFields(fields)
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			entry.Info("aaa")
+		}
+	})
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd38999741ed0802ad3d28892abb3c0b60a56c16
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus_test.go b/vendor/github.com/sirupsen/logrus/logrus_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..78cbc282595e3f33ddca6aa45dc745c8c7ab231c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus_test.go
@@ -0,0 +1,386 @@
+package logrus
+
+import (
+	"bytes"
+	"encoding/json"
+	"strconv"
+	"strings"
+	"sync"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+	var buffer bytes.Buffer
+	var fields Fields
+
+	logger := New()
+	logger.Out = &buffer
+	logger.Formatter = new(JSONFormatter)
+
+	log(logger)
+
+	err := json.Unmarshal(buffer.Bytes(), &fields)
+	assert.Nil(t, err)
+
+	assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+	var buffer bytes.Buffer
+
+	logger := New()
+	logger.Out = &buffer
+	logger.Formatter = &TextFormatter{
+		DisableColors: true,
+	}
+
+	log(logger)
+
+	fields := make(map[string]string)
+	for _, kv := range strings.Split(buffer.String(), " ") {
+		if !strings.Contains(kv, "=") {
+			continue
+		}
+		kvArr := strings.Split(kv, "=")
+		key := strings.TrimSpace(kvArr[0])
+		val := kvArr[1]
+		if kvArr[1][0] == '"' {
+			var err error
+			val, err = strconv.Unquote(val)
+			assert.NoError(t, err)
+		}
+		fields[key] = val
+	}
+	assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Print("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test")
+		assert.Equal(t, fields["level"], "info")
+	})
+}
+
+func TestInfo(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Info("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test")
+		assert.Equal(t, fields["level"], "info")
+	})
+}
+
+func TestWarn(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Warn("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test")
+		assert.Equal(t, fields["level"], "warning")
+	})
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Infoln("test", "test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test test")
+	})
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Infoln("test", 10)
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test 10")
+	})
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Infoln(10, 10)
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "10 10")
+	})
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Infoln(10, 10)
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "10 10")
+	})
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Info("test", 10)
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test10")
+	})
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.Info("test", "test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "testtest")
+	})
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+	var buffer bytes.Buffer
+	var fields Fields
+
+	logger := New()
+	logger.Out = &buffer
+	logger.Formatter = new(JSONFormatter)
+
+	localLog := logger.WithFields(Fields{
+		"key1": "value1",
+	})
+
+	localLog.WithField("key2", "value2").Info("test")
+	err := json.Unmarshal(buffer.Bytes(), &fields)
+	assert.Nil(t, err)
+
+	assert.Equal(t, "value2", fields["key2"])
+	assert.Equal(t, "value1", fields["key1"])
+
+	buffer = bytes.Buffer{}
+	fields = Fields{}
+	localLog.Info("test")
+	err = json.Unmarshal(buffer.Bytes(), &fields)
+	assert.Nil(t, err)
+
+	_, ok := fields["key2"]
+	assert.Equal(t, false, ok)
+	assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.WithField("msg", "hello").Info("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test")
+	})
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.WithField("msg", "hello").Info("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["msg"], "test")
+		assert.Equal(t, fields["fields.msg"], "hello")
+	})
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.WithField("time", "hello").Info("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["fields.time"], "hello")
+	})
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+	LogAndAssertJSON(t, func(log *Logger) {
+		log.WithField("level", 1).Info("test")
+	}, func(fields Fields) {
+		assert.Equal(t, fields["level"], "info")
+		assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
+	})
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+	LogAndAssertText(t, func(log *Logger) {
+		ll := log.WithField("herp", "derp")
+		ll.Info("hello")
+		ll.Info("bye")
+	}, func(fields map[string]string) {
+		for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+			if _, ok := fields[fieldName]; ok {
+				t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+			}
+		}
+	})
+}
+
+func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
+
+	var buffer bytes.Buffer
+	var fields Fields
+
+	logger := New()
+	logger.Out = &buffer
+	logger.Formatter = new(JSONFormatter)
+
+	llog := logger.WithField("context", "eating raw fish")
+
+	llog.Info("looks delicious")
+
+	err := json.Unmarshal(buffer.Bytes(), &fields)
+	assert.NoError(t, err, "should have decoded first message")
+	assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+	assert.Equal(t, fields["msg"], "looks delicious")
+	assert.Equal(t, fields["context"], "eating raw fish")
+
+	buffer.Reset()
+
+	llog.Warn("omg it is!")
+
+	err = json.Unmarshal(buffer.Bytes(), &fields)
+	assert.NoError(t, err, "should have decoded second message")
+	assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+	assert.Equal(t, fields["msg"], "omg it is!")
+	assert.Equal(t, fields["context"], "eating raw fish")
+	assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
+
+}
+
+func TestConvertLevelToString(t *testing.T) {
+	assert.Equal(t, "debug", DebugLevel.String())
+	assert.Equal(t, "info", InfoLevel.String())
+	assert.Equal(t, "warning", WarnLevel.String())
+	assert.Equal(t, "error", ErrorLevel.String())
+	assert.Equal(t, "fatal", FatalLevel.String())
+	assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+	l, err := ParseLevel("panic")
+	assert.Nil(t, err)
+	assert.Equal(t, PanicLevel, l)
+
+	l, err = ParseLevel("PANIC")
+	assert.Nil(t, err)
+	assert.Equal(t, PanicLevel, l)
+
+	l, err = ParseLevel("fatal")
+	assert.Nil(t, err)
+	assert.Equal(t, FatalLevel, l)
+
+	l, err = ParseLevel("FATAL")
+	assert.Nil(t, err)
+	assert.Equal(t, FatalLevel, l)
+
+	l, err = ParseLevel("error")
+	assert.Nil(t, err)
+	assert.Equal(t, ErrorLevel, l)
+
+	l, err = ParseLevel("ERROR")
+	assert.Nil(t, err)
+	assert.Equal(t, ErrorLevel, l)
+
+	l, err = ParseLevel("warn")
+	assert.Nil(t, err)
+	assert.Equal(t, WarnLevel, l)
+
+	l, err = ParseLevel("WARN")
+	assert.Nil(t, err)
+	assert.Equal(t, WarnLevel, l)
+
+	l, err = ParseLevel("warning")
+	assert.Nil(t, err)
+	assert.Equal(t, WarnLevel, l)
+
+	l, err = ParseLevel("WARNING")
+	assert.Nil(t, err)
+	assert.Equal(t, WarnLevel, l)
+
+	l, err = ParseLevel("info")
+	assert.Nil(t, err)
+	assert.Equal(t, InfoLevel, l)
+
+	l, err = ParseLevel("INFO")
+	assert.Nil(t, err)
+	assert.Equal(t, InfoLevel, l)
+
+	l, err = ParseLevel("debug")
+	assert.Nil(t, err)
+	assert.Equal(t, DebugLevel, l)
+
+	l, err = ParseLevel("DEBUG")
+	assert.Nil(t, err)
+	assert.Equal(t, DebugLevel, l)
+
+	l, err = ParseLevel("invalid")
+	assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
+
+func TestGetSetLevelRace(t *testing.T) {
+	wg := sync.WaitGroup{}
+	for i := 0; i < 100; i++ {
+		wg.Add(1)
+		go func(i int) {
+			defer wg.Done()
+			if i%2 == 0 {
+				SetLevel(InfoLevel)
+			} else {
+				GetLevel()
+			}
+		}(i)
+
+	}
+	wg.Wait()
+}
+
+func TestLoggingRace(t *testing.T) {
+	logger := New()
+
+	var wg sync.WaitGroup
+	wg.Add(100)
+
+	for i := 0; i < 100; i++ {
+		go func() {
+			logger.Info("info")
+			wg.Done()
+		}()
+	}
+	wg.Wait()
+}
+
+// Compile test
+func TestLogrusInterface(t *testing.T) {
+	var buffer bytes.Buffer
+	fn := func(l FieldLogger) {
+		b := l.WithField("key", "value")
+		b.Debug("Test")
+	}
+	// test logger
+	logger := New()
+	logger.Out = &buffer
+	fn(logger)
+
+	// test Entry
+	e := logger.WithField("another", "value")
+	fn(e)
+}
+
+// Implements io.Writer using channels for synchronization, so we can wait on
+// the Entry.Writer goroutine to write in a non-racey way. This does assume that
+// there is a single call to Logger.Out for each message.
+type channelWriter chan []byte
+
+func (cw channelWriter) Write(p []byte) (int, error) {
+	cw <- p
+	return len(p), nil
+}
+
+func TestEntryWriter(t *testing.T) {
+	cw := channelWriter(make(chan []byte, 1))
+	log := New()
+	log.Out = cw
+	log.Formatter = new(JSONFormatter)
+	log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
+
+	bs := <-cw
+	var fields Fields
+	err := json.Unmarshal(bs, &fields)
+	assert.Nil(t, err)
+	assert.Equal(t, fields["foo"], "bar")
+	assert.Equal(t, fields["level"], "warning")
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7b3893f3fe327a425b557ff07af09714e7e302a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000000000000000000000000000000000000..2403de981929f6af51da8c875d09252e920601cb
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000000000000000000000000000000000000..116bcb4e3393d1d6ef688e8e23eddd9960cf9392
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,19 @@
+// +build !appengine
+
+package logrus
+
+import (
+	"io"
+	"os"
+
+	"golang.org/x/crypto/ssh/terminal"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		return terminal.IsTerminal(int(v.Fd()))
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..88d7298e24f663d17ed1d06b7b971162121ad3ff
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000000000000000000000000000000000000..61b21caea4503acdb474a4c4ab7b82818fae2e4f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,178 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 36
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+)
+
+func init() {
+	baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+
+	// QuoteEmptyFields will wrap empty fields in quotes if true
+	QuoteEmptyFields bool
+
+	// Whether the logger's out is to a terminal
+	isTerminal bool
+
+	sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+	if entry.Logger != nil {
+		f.isTerminal = checkIfTerminal(entry.Logger.Out)
+	}
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	f.Do(func() { f.init(entry) })
+
+	isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+	if f.QuoteEmptyFields && len(text) == 0 {
+		return true
+	}
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+	if b.Len() > 0 {
+		b.WriteByte(' ')
+	}
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	stringVal, ok := value.(string)
+	if !ok {
+		stringVal = fmt.Sprint(value)
+	}
+
+	if !f.needsQuoting(stringVal) {
+		b.WriteString(stringVal)
+	} else {
+		b.WriteString(fmt.Sprintf("%q", stringVal))
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter_test.go b/vendor/github.com/sirupsen/logrus/text_formatter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d93b931e518ec793449127d788b6c8f3d6e49652
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter_test.go
@@ -0,0 +1,141 @@
+package logrus
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestFormatting(t *testing.T) {
+	tf := &TextFormatter{DisableColors: true}
+
+	testCases := []struct {
+		value    string
+		expected string
+	}{
+		{`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"},
+	}
+
+	for _, tc := range testCases {
+		b, _ := tf.Format(WithField("test", tc.value))
+
+		if string(b) != tc.expected {
+			t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
+		}
+	}
+}
+
+func TestQuoting(t *testing.T) {
+	tf := &TextFormatter{DisableColors: true}
+
+	checkQuoting := func(q bool, value interface{}) {
+		b, _ := tf.Format(WithField("test", value))
+		idx := bytes.Index(b, ([]byte)("test="))
+		cont := bytes.Contains(b[idx+5:], []byte("\""))
+		if cont != q {
+			if q {
+				t.Errorf("quoting expected for: %#v", value)
+			} else {
+				t.Errorf("quoting not expected for: %#v", value)
+			}
+		}
+	}
+
+	checkQuoting(false, "")
+	checkQuoting(false, "abcd")
+	checkQuoting(false, "v1.0")
+	checkQuoting(false, "1234567890")
+	checkQuoting(false, "/foobar")
+	checkQuoting(false, "foo_bar")
+	checkQuoting(false, "foo@bar")
+	checkQuoting(false, "foobar^")
+	checkQuoting(false, "+/-_^@f.oobar")
+	checkQuoting(true, "foobar$")
+	checkQuoting(true, "&foobar")
+	checkQuoting(true, "x y")
+	checkQuoting(true, "x,y")
+	checkQuoting(false, errors.New("invalid"))
+	checkQuoting(true, errors.New("invalid argument"))
+
+	// Test for quoting empty fields.
+	tf.QuoteEmptyFields = true
+	checkQuoting(true, "")
+	checkQuoting(false, "abcd")
+	checkQuoting(true, errors.New("invalid argument"))
+}
+
+func TestEscaping(t *testing.T) {
+	tf := &TextFormatter{DisableColors: true}
+
+	testCases := []struct {
+		value    string
+		expected string
+	}{
+		{`ba"r`, `ba\"r`},
+		{`ba'r`, `ba'r`},
+	}
+
+	for _, tc := range testCases {
+		b, _ := tf.Format(WithField("test", tc.value))
+		if !bytes.Contains(b, []byte(tc.expected)) {
+			t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
+		}
+	}
+}
+
+func TestEscaping_Interface(t *testing.T) {
+	tf := &TextFormatter{DisableColors: true}
+
+	ts := time.Now()
+
+	testCases := []struct {
+		value    interface{}
+		expected string
+	}{
+		{ts, fmt.Sprintf("\"%s\"", ts.String())},
+		{errors.New("error: something went wrong"), "\"error: something went wrong\""},
+	}
+
+	for _, tc := range testCases {
+		b, _ := tf.Format(WithField("test", tc.value))
+		if !bytes.Contains(b, []byte(tc.expected)) {
+			t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
+		}
+	}
+}
+
+func TestTimestampFormat(t *testing.T) {
+	checkTimeStr := func(format string) {
+		customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
+		customStr, _ := customFormatter.Format(WithField("test", "test"))
+		timeStart := bytes.Index(customStr, ([]byte)("time="))
+		timeEnd := bytes.Index(customStr, ([]byte)("level="))
+		timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")]
+		if format == "" {
+			format = time.RFC3339
+		}
+		_, e := time.Parse(format, (string)(timeStr))
+		if e != nil {
+			t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
+		}
+	}
+
+	checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
+	checkTimeStr("Mon Jan _2 15:04:05 2006")
+	checkTimeStr("")
+}
+
+func TestDisableTimestampWithColoredOutput(t *testing.T) {
+	tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
+
+	b, _ := tf.Format(WithField("test", "test"))
+	if strings.Contains(string(b), "[0000]") {
+		t.Error("timestamp not expected when DisableTimestamp is true")
+	}
+}
+
+// TODO add tests for sorting etc., this requires a parser for the text
+// formatter output.
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bdebedc60bb91b7c24ec373cace3c6d3e9365a7
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,62 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+	return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+
+	switch level {
+	case DebugLevel:
+		printFunc = entry.Debug
+	case InfoLevel:
+		printFunc = entry.Info
+	case WarnLevel:
+		printFunc = entry.Warn
+	case ErrorLevel:
+		printFunc = entry.Error
+	case FatalLevel:
+		printFunc = entry.Fatal
+	case PanicLevel:
+		printFunc = entry.Panic
+	default:
+		printFunc = entry.Print
+	}
+
+	go entry.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		entry.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/tar_test.go b/vendor/github.com/vbatts/tar-split/archive/tar/tar_test.go
index d63c072eb9aaa489c5ec76552db060fa50f2a3ef..9ef319a314c1d24a3506bc2489aae20b4d3a082d 100644
--- a/vendor/github.com/vbatts/tar-split/archive/tar/tar_test.go
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/tar_test.go
@@ -94,13 +94,12 @@ func TestRoundTrip(t *testing.T) {
 	var b bytes.Buffer
 	tw := NewWriter(&b)
 	hdr := &Header{
-		Name:    "file.txt",
-		Uid:     1 << 21, // too big for 8 octal digits
-		Size:    int64(len(data)),
-		ModTime: time.Now(),
+		Name: "file.txt",
+		Uid:  1 << 21, // too big for 8 octal digits
+		Size: int64(len(data)),
+		// https://github.com/golang/go/commit/0e3355903d2ebcf5ee9e76096f51ac9a116a9dbb#diff-d7bf2a98d7b57b6ff754ca406f1b7581R105
+		ModTime: time.Now().AddDate(0, 0, 0).Round(1 * time.Second),
 	}
-	// tar only supports second precision.
-	hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
 	if err := tw.WriteHeader(hdr); err != nil {
 		t.Fatalf("tw.WriteHeader: %v", err)
 	}
diff --git a/vendor/github.com/vbatts/tar-split/tar_benchmark_test.go b/vendor/github.com/vbatts/tar-split/cmd/tar-split/tar_benchmark_test.go
similarity index 94%
rename from vendor/github.com/vbatts/tar-split/tar_benchmark_test.go
rename to vendor/github.com/vbatts/tar-split/cmd/tar-split/tar_benchmark_test.go
index d946f2af92e77c45ef1bf8e17c5531520b73b4db..f318645398ca2b0d26c2a78ce25cd5e2fdaf36a3 100644
--- a/vendor/github.com/vbatts/tar-split/tar_benchmark_test.go
+++ b/vendor/github.com/vbatts/tar-split/cmd/tar-split/tar_benchmark_test.go
@@ -1,4 +1,4 @@
-package tartest
+package main
 
 import (
 	"io"
@@ -11,7 +11,7 @@ import (
 	ourTar "github.com/vbatts/tar-split/archive/tar"
 )
 
-var testfile = "./archive/tar/testdata/sparse-formats.tar"
+var testfile = "../../archive/tar/testdata/sparse-formats.tar"
 
 func BenchmarkUpstreamTar(b *testing.B) {
 	for n := 0; n < b.N; n++ {
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
index 54ef23aed3660d2b00befd1caff27d33b2ad6a9b..009b3f5d8124e2255b4803ea9007dd8517c38907 100644
--- a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
@@ -2,7 +2,6 @@ package asm
 
 import (
 	"io"
-	"io/ioutil"
 
 	"github.com/vbatts/tar-split/archive/tar"
 	"github.com/vbatts/tar-split/tar/storage"
@@ -119,20 +118,34 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
 			}
 		}
 
-		// it is allowable, and not uncommon that there is further padding on the
-		// end of an archive, apart from the expected 1024 null bytes.
-		remainder, err := ioutil.ReadAll(outputRdr)
-		if err != nil && err != io.EOF {
-			pW.CloseWithError(err)
-			return
-		}
-		_, err = p.AddEntry(storage.Entry{
-			Type:    storage.SegmentType,
-			Payload: remainder,
-		})
-		if err != nil {
-			pW.CloseWithError(err)
-			return
+		// It is allowable, and not uncommon that there is further padding on
+		// the end of an archive, apart from the expected 1024 null bytes. We
+		// do this in chunks rather than in one go to avoid cases where a
+		// maliciously crafted tar file tries to trick us into reading many GBs
+		// into memory.
+		const paddingChunkSize = 1024 * 1024
+		var paddingChunk [paddingChunkSize]byte
+		for {
+			var isEOF bool
+			n, err := outputRdr.Read(paddingChunk[:])
+			if err != nil {
+				if err != io.EOF {
+					pW.CloseWithError(err)
+					return
+				}
+				isEOF = true
+			}
+			_, err = p.AddEntry(storage.Entry{
+				Type:    storage.SegmentType,
+				Payload: paddingChunk[:n],
+			})
+			if err != nil {
+				pW.CloseWithError(err)
+				return
+			}
+			if isEOF {
+				break
+			}
 		}
 		pW.Close()
 	}()
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble_test.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4e45714c4905af08aaf864ab244e8c1a92d5e5aa
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble_test.go
@@ -0,0 +1,72 @@
+package asm
+
+import (
+	"archive/tar"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/vbatts/tar-split/tar/storage"
+)
+
+// This test failing causes the binary to crash due to memory overcommitment.
+func TestLargeJunkPadding(t *testing.T) {
+	pR, pW := io.Pipe()
+
+	// Write a normal tar file into the pipe and then load it full of junk
+	// bytes as padding. We have to do this in a goroutine because we can't
+	// store 20GB of junk in-memory.
+	go func() {
+		// Empty archive.
+		tw := tar.NewWriter(pW)
+		if err := tw.Close(); err != nil {
+			pW.CloseWithError(err)
+			t.Fatal(err)
+			return
+		}
+
+		// Write junk.
+		const (
+			junkChunkSize = 64 * 1024 * 1024
+			junkChunkNum  = 20 * 16
+		)
+		devZero, err := os.Open("/dev/zero")
+		if err != nil {
+			pW.CloseWithError(err)
+			t.Fatal(err)
+			return
+		}
+		defer devZero.Close()
+		for i := 0; i < junkChunkNum; i++ {
+			if i%32 == 0 {
+				fmt.Fprintf(os.Stderr, "[TestLargeJunkPadding] junk chunk #%d/#%d\n", i, junkChunkNum)
+			}
+			if _, err := io.CopyN(pW, devZero, junkChunkSize); err != nil {
+				pW.CloseWithError(err)
+				t.Fatal(err)
+				return
+			}
+		}
+
+		fmt.Fprintln(os.Stderr, "[TestLargeJunkPadding] junk chunk finished")
+		pW.Close()
+	}()
+
+	// Disassemble our junk file.
+	nilPacker := storage.NewJSONPacker(ioutil.Discard)
+	rdr, err := NewInputTarStream(pR, nilPacker, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Copy the entire rdr.
+	_, err = io.Copy(ioutil.Discard, rdr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// At this point, if we haven't crashed then we are not vulnerable to
+	// CVE-2017-14992.
+}