diff --git a/.golangci.yaml b/.golangci.yaml index 822693aca..d20b2bbb4 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -10,7 +10,14 @@ linters: - govet - ineffassign - staticcheck + - testifylint - unused linters-settings: # please keep this alphabetized goimports: local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages. + testifylint: + disable: + - go-require + enable-all: true + formatter: + require-f-funcs: true diff --git a/allocate_test.go b/allocate_test.go index d8dedfb7b..387f12f89 100644 --- a/allocate_test.go +++ b/allocate_test.go @@ -3,6 +3,9 @@ package bbolt import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/freelist" ) @@ -26,14 +29,11 @@ func TestTx_allocatePageStats(t *testing.T) { prePageCnt := txStats.GetPageCount() allocateCnt := f.FreeCount() - if _, err := tx.allocate(allocateCnt); err != nil { - t.Fatal(err) - } + _, err := tx.allocate(allocateCnt) + require.NoError(t, err) txStats = tx.Stats() - if txStats.GetPageCount() != prePageCnt+int64(allocateCnt) { - t.Errorf("Allocated %d but got %d page in stats", allocateCnt, txStats.GetPageCount()) - } + assert.Equalf(t, txStats.GetPageCount(), prePageCnt+int64(allocateCnt), "Allocated %d but got %d page in stats", allocateCnt, txStats.GetPageCount()) }) } } diff --git a/bucket_test.go b/bucket_test.go index 493d133a7..cd28d65b1 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -25,61 +25,44 @@ import ( func TestBucket_Get_NonExistent(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatal("expected nil value") - } + require.NoError(t, err) + require.Nilf(t, b.Get([]byte("foo")), "expected nil value") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can read a value that is not flushed yet. func TestBucket_Get_FromNode(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { - t.Fatalf("unexpected value: %v", v) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + v := b.Get([]byte("foo")) + require.Truef(t, bytes.Equal(v, []byte("bar")), "unexpected value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket retrieved via Get() returns a nil. func TestBucket_Get_IncompatibleValue(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } + _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + require.NoError(t, err) - if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { - t.Fatal("expected nil value") - } + require.Nilf(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")), "expected nil value") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a slice returned from a bucket has a capacity equal to its length. @@ -90,26 +73,22 @@ func TestBucket_Get_Capacity(t *testing.T) { db := btesting.MustCreateDB(t) // Write key to a bucket. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("bucket")) if err != nil { return err } return b.Put([]byte("key"), []byte("val")) - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Retrieve value and attempt to append to it. - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { k, v := tx.Bucket([]byte("bucket")).Cursor().First() // Verify capacity. - if len(k) != cap(k) { - t.Fatalf("unexpected key slice capacity: %d", cap(k)) - } else if len(v) != cap(v) { - t.Fatalf("unexpected value slice capacity: %d", cap(v)) - } + require.Equalf(t, len(k), cap(k), "unexpected key slice capacity: %d", cap(k)) + require.Equalf(t, len(v), cap(v), "unexpected value slice capacity: %d", cap(v)) // Ensure slice can be appended to without a segfault. k = append(k, []byte("123")...) @@ -117,56 +96,39 @@ func TestBucket_Get_Capacity(t *testing.T) { _, _ = k, v // to pass ineffassign return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can write a key/value. func TestBucket_Put(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal([]byte("bar"), v) { - t.Fatalf("unexpected value: %v", v) - } + require.Truef(t, bytes.Equal([]byte("bar"), v), "unexpected value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can rewrite a key in the same transaction. func TestBucket_Put_Repeat(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("baz")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.NoError(t, b.Put([]byte("foo"), []byte("baz"))) value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal([]byte("baz"), value) { - t.Fatalf("unexpected value: %v", value) - } + require.Truef(t, bytes.Equal([]byte("baz"), value), "unexpected value: %v", value) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can write a bunch of large values. @@ -174,33 +136,25 @@ func TestBucket_Put_Large(t *testing.T) { db := btesting.MustCreateDB(t) count, factor := 100, 200 - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 1; i < count; i++ { - if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i < count; i++ { value := b.Get([]byte(strings.Repeat("0", i*factor))) - if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) { - t.Fatalf("unexpected value: %v", value) - } + require.Truef(t, bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))), "unexpected value: %v", value) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a database can perform multiple large appends safely. @@ -215,22 +169,17 @@ func TestDB_Put_VeryLarge(t *testing.T) { db := btesting.MustCreateDB(t) for i := 0; i < n; i += batchN { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for j := 0; j < batchN; j++ { k, v := make([]byte, ksize), make([]byte, vsize) binary.BigEndian.PutUint32(k, uint32(i+j)) - if err := b.Put(k, v); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(k, v)) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } } @@ -238,138 +187,100 @@ func TestDB_Put_VeryLarge(t *testing.T) { func TestBucket_Put_IncompatibleValue(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b0, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if err := b0.Put([]byte("foo"), []byte("bar")); err != berrors.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } + _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + require.NoError(t, err) + require.Equalf(t, b0.Put([]byte("foo"), []byte("bar")), berrors.ErrIncompatibleValue, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a setting a value while the transaction is closed returns an error. func TestBucket_Put_Closed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) - if err := b.Put([]byte("foo"), []byte("bar")); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, b.Put([]byte("foo"), []byte("bar")), berrors.ErrTxClosed, "unexpected error") } // Ensure that setting a value on a read-only bucket returns an error. func TestBucket_Put_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("bar")); err != berrors.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, b.Put([]byte("foo"), []byte("bar")), berrors.ErrTxNotWritable, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can delete an existing key. func TestBucket_Delete(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("unexpected value: %v", v) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.NoError(t, b.Delete([]byte("foo"))) + require.Nilf(t, b.Get([]byte("foo")), "expected nil value") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a large set of keys will work correctly. func TestBucket_Delete_Large(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { - if err := b.Delete([]byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Delete([]byte(strconv.Itoa(i)))) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { - if v := b.Get([]byte(strconv.Itoa(i))); v != nil { - t.Fatalf("unexpected value: %v, i=%d", v, i) - } + v := b.Get([]byte(strconv.Itoa(i))) + require.Nilf(t, v, "unexpected value: %v, i=%d", v, i) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Deleting a very large list of keys will cause the freelist to use overflow. @@ -383,217 +294,165 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { k := make([]byte, 16) // The bigger the pages - the more values we need to write. for i := uint64(0); i < 2*uint64(db.Info().PageSize); i++ { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } + require.NoErrorf(t, err, "bucket error: %s", err) for j := uint64(0); j < 1000; j++ { binary.BigEndian.PutUint64(k[:8], i) binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } + require.NoErrorf(t, b.Put(k, nil), "put error") } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Delete all of them in one large transaction - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("0")) c := b.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { - if err := c.Delete(); err != nil { - t.Fatal(err) - } + require.NoError(t, c.Delete()) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Check more than an overflow's worth of pages are freed. stats := db.Stats() freePages := stats.FreePageN + stats.PendingPageN - if freePages <= 0xFFFF { - t.Fatalf("expected more than 0xFFFF free pages, got %v", freePages) - } + require.Greaterf(t, freePages, 0xFFFF, "expected more than 0xFFFF free pages, got %v", freePages) // Free page count should be preserved on reopen. db.MustClose() db.MustReopen() - if reopenFreePages := db.Stats().FreePageN; freePages != reopenFreePages { - t.Fatalf("expected %d free pages, got %+v", freePages, db.Stats()) - } - if reopenPendingPages := db.Stats().PendingPageN; reopenPendingPages != 0 { - t.Fatalf("expected no pending pages, got %+v", db.Stats()) - } + reopenFreePages := db.Stats().FreePageN + require.Equalf(t, freePages, reopenFreePages, "expected %d free pages, got %+v", freePages, db.Stats()) + reopenPendingPages := db.Stats().PendingPageN + require.Equalf(t, 0, reopenPendingPages, "expected no pending pages, got %+v", db.Stats()) } // Ensure that deleting of non-existing key is a no-op. func TestBucket_Delete_NonExisting(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if _, err = b.CreateBucket([]byte("nested")); err != nil { - t.Fatal(err) - } + _, err = b.CreateBucket([]byte("nested")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } - if b.Bucket([]byte("nested")) == nil { - t.Fatal("nested bucket has been deleted") - } + require.NoError(t, b.Delete([]byte("foo"))) + require.NotNilf(t, b.Bucket([]byte("nested")), "nested bucket has been deleted") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that accessing and updating nested buckets is ok across transactions. func TestBucket_Nested(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { // Create a widgets bucket. b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create a widgets/foo bucket. _, err = b.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create a widgets/bar key. - if err := b.Put([]byte("bar"), []byte("0000")); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte("bar"), []byte("0000"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() // Update widgets/bar. - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte("bar"), []byte("xxxx"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() // Cause a split. - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) for i := 0; i < 10000; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() // Insert into widgets/foo/baz. - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() // Verify. - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) { - t.Fatalf("unexpected value: %v", v) - } - if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) { - t.Fatalf("unexpected value: %v", v) - } + v := b.Bucket([]byte("foo")).Get([]byte("baz")) + require.Truef(t, bytes.Equal(v, []byte("yyyy")), "unexpected value: %v", v) + v = b.Get([]byte("bar")) + require.Truef(t, bytes.Equal(v, []byte("xxxx")), "unexpected value: %v", v) for i := 0; i < 10000; i++ { - if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) { - t.Fatalf("unexpected value: %v", v) - } + v := b.Get([]byte(strconv.Itoa(i))) + require.Truef(t, bytes.Equal(v, []byte(strconv.Itoa(i))), "unexpected value: %v", v) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a bucket using Delete() returns an error. func TestBucket_Delete_Bucket(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != berrors.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, err) + _, err = b.CreateBucket([]byte("foo")) + require.NoError(t, err) + require.Equalf(t, b.Delete([]byte("foo")), berrors.ErrIncompatibleValue, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a key on a read-only bucket returns an error. func TestBucket_Delete_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != berrors.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } + err = db.View(func(tx *bolt.Tx) error { + require.Equalf(t, tx.Bucket([]byte("widgets")).Delete([]byte("foo")), berrors.ErrTxNotWritable, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a deleting value while the transaction is closed returns an error. @@ -601,117 +460,76 @@ func TestBucket_Delete_Closed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, tx.Rollback()) + require.Equalf(t, b.Delete([]byte("foo")), berrors.ErrTxClosed, "unexpected error") } // Ensure that deleting a bucket causes nested buckets to be deleted. func TestBucket_DeleteBucket_Nested(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) bar, err := foo.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, bar.Put([]byte("baz"), []byte("bat"))) + require.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. func TestBucket_DeleteBucket_Nested2(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) bar, err := foo.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } + require.NoError(t, bar.Put([]byte("baz"), []byte("bat"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { widgets := tx.Bucket([]byte("widgets")) - if widgets == nil { - t.Fatal("expected widgets bucket") - } + require.NotNilf(t, widgets, "expected widgets bucket") foo := widgets.Bucket([]byte("foo")) - if foo == nil { - t.Fatal("expected foo bucket") - } + require.NotNilf(t, foo, "expected foo bucket") bar := foo.Bucket([]byte("bar")) - if bar == nil { - t.Fatal("expected bar bucket") - } + require.NotNilf(t, bar, "expected bar bucket") - if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + v := bar.Get([]byte("baz")) + require.Truef(t, bytes.Equal(v, []byte("bat")), "unexpected value: %v", v) + require.NoError(t, tx.DeleteBucket([]byte("widgets"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) != nil { - t.Fatal("expected bucket to be deleted") - } + err = db.View(func(tx *bolt.Tx) error { + require.Nilf(t, tx.Bucket([]byte("widgets")), "expected bucket to be deleted") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a child bucket with multiple pages causes all pages to get collected. @@ -719,180 +537,129 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) { func TestBucket_DeleteBucket_Large(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 1000; i++ { - if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil { - t.Fatal(err) - } + require.NoError(t, foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err = db.Update(func(tx *bolt.Tx) error { + require.NoError(t, tx.DeleteBucket([]byte("widgets"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a simple value retrieved via Bucket() returns a nil. func TestBucket_Bucket_IncompatibleValue(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil { - t.Fatal("expected nil bucket") - } + require.NoError(t, widgets.Put([]byte("foo"), []byte("bar"))) + require.Nilf(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")), "expected nil bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that creating a bucket on an existing non-bucket key returns an error. func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := widgets.CreateBucket([]byte("foo")); err != berrors.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, widgets.Put([]byte("foo"), []byte("bar"))) + _, err = widgets.CreateBucket([]byte("foo")) + require.Equalf(t, err, berrors.ErrIncompatibleValue, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a bucket on an existing non-bucket key returns an error. func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != berrors.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, err) + require.NoError(t, widgets.Put([]byte("foo"), []byte("bar"))) + require.Equalf(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")), berrors.ErrIncompatibleValue, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure bucket can set and update its sequence number. func TestBucket_Sequence(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { bkt, err := tx.CreateBucket([]byte("0")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Retrieve sequence. - if v := bkt.Sequence(); v != 0 { - t.Fatalf("unexpected sequence: %d", v) - } + require.Equalf(t, uint64(0), bkt.Sequence(), "unexpected sequence") // Update sequence. - if err := bkt.SetSequence(1000); err != nil { - t.Fatal(err) - } + err = bkt.SetSequence(1000) + require.NoError(t, err) // Read sequence again. - if v := bkt.Sequence(); v != 1000 { - t.Fatalf("unexpected sequence: %d", v) - } + require.Equalf(t, uint64(1000), bkt.Sequence(), "unexpected sequence") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - // Verify sequence in separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 { - t.Fatalf("unexpected sequence: %d", v) - } + // Verify sequence in separate transaction. + err = db.View(func(tx *bolt.Tx) error { + v := tx.Bucket([]byte("0")).Sequence() + require.Equalf(t, uint64(1000), v, "unexpected sequence: %d", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can return an autoincrementing sequence. func TestBucket_NextSequence(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) woojits, err := tx.CreateBucket([]byte("woojits")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Make sure sequence increments. - if seq, err := widgets.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 1 { - t.Fatalf("unexpecte sequence: %d", seq) - } + seq, err := widgets.NextSequence() + require.NoError(t, err) + require.Equalf(t, uint64(1), seq, "unexpected sequence: %d", seq) - if seq, err := widgets.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } + seq, err = widgets.NextSequence() + require.NoError(t, err) + require.Equalf(t, uint64(2), seq, "unexpected sequence: %d", seq) // Buckets should be separate. - if seq, err := woojits.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 1 { - t.Fatalf("unexpected sequence: %d", 1) - } + seq, err = woojits.NextSequence() + require.NoError(t, err) + require.Equalf(t, uint64(1), seq, "unexpected sequence: %d", seq) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket will persist an autoincrementing sequence even if its @@ -901,78 +668,58 @@ func TestBucket_NextSequence(t *testing.T) { func TestBucket_NextSequence_Persist(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil { - t.Fatal(err) - } + err = db.Update(func(tx *bolt.Tx) error { + _, err := tx.Bucket([]byte("widgets")).NextSequence() + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { seq, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } + require.NoErrorf(t, err, "unexpected error") + require.Equalf(t, uint64(2), seq, "unexpected sequence: %d", seq) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that retrieving the next sequence on a read-only bucket returns an error. func TestBucket_NextSequence_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { _, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != berrors.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, err, berrors.ErrTxNotWritable, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that retrieving the next sequence for a bucket on a closed database return an error. func TestBucket_NextSequence_Closed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if _, err := b.NextSequence(); err != berrors.ErrTxClosed { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, tx.Rollback()) + _, err = b.NextSequence() + require.Equal(t, err, berrors.ErrTxClosed) } // Ensure a user can loop over all key/value pairs in a bucket. @@ -997,13 +744,13 @@ func TestBucket_ForEach(t *testing.T) { items = append(items, kv{k: k, v: v}) return nil }) - assert.NoErrorf(t, err, "b.ForEach failed") - assert.Equal(t, expectedItems, items, "what we iterated (ForEach) is not what we put") + require.NoErrorf(t, err, "b.ForEach failed") + assert.Equalf(t, expectedItems, items, "what we iterated (ForEach) is not what we put") } err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - require.NoError(t, err, "bucket creation failed") + require.NoErrorf(t, err, "bucket creation failed") require.NoErrorf(t, b.Put([]byte("foo"), []byte("0000")), "put 'foo' failed") require.NoErrorf(t, b.Put([]byte("baz"), []byte("0001")), "put 'baz' failed") @@ -1018,7 +765,7 @@ func TestBucket_ForEach(t *testing.T) { require.NoErrorf(t, err, "db.Update failed") err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - require.NotNil(t, b, "bucket opening failed") + require.NotNilf(t, b, "bucket opening failed") verifyReads(b) return nil }) @@ -1039,13 +786,13 @@ func TestBucket_ForEachBucket(t *testing.T) { items = append(items, k) return nil }) - assert.NoErrorf(t, err, "b.ForEach failed") - assert.Equal(t, expectedItems, items, "what we iterated (ForEach) is not what we put") + require.NoErrorf(t, err, "b.ForEach failed") + assert.Equalf(t, expectedItems, items, "what we iterated (ForEach) is not what we put") } err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - require.NoError(t, err, "bucket creation failed") + require.NoErrorf(t, err, "bucket creation failed") require.NoErrorf(t, b.Put([]byte("foo"), []byte("0000")), "put 'foo' failed") _, err = b.CreateBucket([]byte("zsubbucket")) @@ -1059,10 +806,10 @@ func TestBucket_ForEachBucket(t *testing.T) { return nil }) - assert.NoErrorf(t, err, "db.Update failed") + require.NoErrorf(t, err, "db.Update failed") err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - require.NotNil(t, b, "bucket opening failed") + require.NotNilf(t, b, "bucket opening failed") verifyReads(b) return nil }) @@ -1078,13 +825,13 @@ func TestBucket_ForEachBucket_NoBuckets(t *testing.T) { items = append(items, k) return nil }) - assert.NoErrorf(t, err, "b.ForEach failed") + require.NoErrorf(t, err, "b.ForEach failed") assert.Emptyf(t, items, "what we iterated (ForEach) is not what we put") } err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - require.NoError(t, err, "bucket creation failed") + require.NoErrorf(t, err, "bucket creation failed") require.NoErrorf(t, b.Put([]byte("foo"), []byte("0000")), "put 'foo' failed") require.NoErrorf(t, err, "creation of subbucket failed") @@ -1099,7 +846,7 @@ func TestBucket_ForEachBucket_NoBuckets(t *testing.T) { err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - require.NotNil(t, b, "bucket opening failed") + require.NotNilf(t, b, "bucket opening failed") verifyReads(b) return nil }) @@ -1109,39 +856,27 @@ func TestBucket_ForEachBucket_NoBuckets(t *testing.T) { // Ensure a database can stop iteration early. func TestBucket_ForEach_ShortCircuit(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0000")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("bar"), []byte("0000"))) + require.NoError(t, b.Put([]byte("baz"), []byte("0000"))) + require.NoError(t, b.Put([]byte("foo"), []byte("0000"))) var index int - if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + err = tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { index++ if bytes.Equal(k, []byte("baz")) { return errors.New("marker") } return nil - }); err == nil || err.Error() != "marker" { - t.Fatalf("unexpected error: %s", err) - } - if index != 2 { - t.Fatalf("unexpected index: %d", index) - } + }) + require.EqualErrorf(t, err, "marker", "unexpected error: %s", err) + require.Equalf(t, 2, index, "unexpected index: %d", index) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that looping over a bucket on a closed database returns an error. @@ -1149,60 +884,40 @@ func TestBucket_ForEach_Closed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) - if err := b.ForEach(func(k, v []byte) error { return nil }); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, b.ForEach(func(k, v []byte) error { return nil }), berrors.ErrTxClosed, "unexpected error") } // Ensure that an error is returned when inserting with an empty key. func TestBucket_Put_EmptyKey(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte(""), []byte("bar")); err != berrors.ErrKeyRequired { - t.Fatalf("unexpected error: %s", err) - } - if err := b.Put(nil, []byte("bar")); err != berrors.ErrKeyRequired { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, err) + require.Equalf(t, b.Put([]byte(""), []byte("bar")), berrors.ErrKeyRequired, "unexpected error") + require.Equalf(t, b.Put(nil, []byte("bar")), berrors.ErrKeyRequired, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that an error is returned when inserting with a key that's too large. func TestBucket_Put_KeyTooLarge(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put(make([]byte, 32769), []byte("bar")); err != berrors.ErrKeyTooLarge { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, err) + require.Equalf(t, b.Put(make([]byte, 32769), []byte("bar")), berrors.ErrKeyTooLarge, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that an error is returned when inserting a value that's too large. @@ -1214,18 +929,13 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != berrors.ErrValueTooLarge { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, err) + require.Equalf(t, b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)), berrors.ErrValueTooLarge, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure a bucket can calculate stats. @@ -1239,29 +949,21 @@ func TestBucket_Stats(t *testing.T) { // Add bucket with fewer keys but one big value. bigKey := []byte("really-big-value") for i := 0; i < 500; i++ { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("woojits")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i)))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } longKeyLength := 10*db.Info().PageSize + 17 - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", longKeyLength))); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + require.NoError(t, tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", longKeyLength)))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() @@ -1322,18 +1024,17 @@ func TestBucket_Stats(t *testing.T) { InlineBucketInuse: 0}, } - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { stats := tx.Bucket([]byte("woojits")).Stats() t.Logf("Stats: %#v", stats) if expected, ok := pageSize2stats[db.Info().PageSize]; ok { - assert.EqualValues(t, expected, stats, "stats differs from expectations") + assert.Equalf(t, expected, stats, "stats differs from expectations") } else { t.Skipf("No expectations for page size: %d", db.Info().PageSize) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure a bucket with random insertion utilizes fill percentage correctly. @@ -1351,240 +1052,160 @@ func TestBucket_Stats_RandomFill(t *testing.T) { var count int rand := rand.New(rand.NewSource(42)) for _, i := range rand.Perm(1000) { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("woojits")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b.FillPercent = 0.9 for _, j := range rand.Perm(100) { index := (j * 10000) + i - if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000"))) count++ } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } db.MustCheck() - if err := db.View(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { stats := tx.Bucket([]byte("woojits")).Stats() - if stats.KeyN != 100000 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } + require.Equalf(t, 100000, stats.KeyN, "unexpected KeyN: %d", stats.KeyN) - if stats.BranchPageN != 98 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.BranchInuse != 130984 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.BranchAlloc != 401408 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } + require.Equalf(t, 98, stats.BranchPageN, "unexpected BranchPageN: %d", stats.BranchPageN) + require.Equalf(t, 0, stats.BranchOverflowN, "unexpected BranchOverflowN: %d", stats.BranchOverflowN) + require.Equalf(t, 130984, stats.BranchInuse, "unexpected BranchInuse: %d", stats.BranchInuse) + require.Equalf(t, 401408, stats.BranchAlloc, "unexpected BranchAlloc: %d", stats.BranchAlloc) - if stats.LeafPageN != 3412 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.LeafInuse != 4742482 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } else if stats.LeafAlloc != 13975552 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } + require.Equalf(t, 3412, stats.LeafPageN, "unexpected LeafPageN: %d", stats.LeafPageN) + require.Equalf(t, 0, stats.LeafOverflowN, "unexpected LeafOverflowN: %d", stats.LeafOverflowN) + require.Equalf(t, 4742482, stats.LeafInuse, "unexpected LeafInuse: %d", stats.LeafInuse) + require.Equalf(t, 13975552, stats.LeafAlloc, "unexpected LeafAlloc: %d", stats.LeafAlloc) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure a bucket can calculate stats. func TestBucket_Stats_Small(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. b, err := tx.CreateBucket([]byte("whozawhats")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 0 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 1 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 1 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 0 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } + require.Equalf(t, 0, stats.BranchPageN, "unexpected BranchPageN: %d", stats.BranchPageN) + require.Equalf(t, 0, stats.BranchOverflowN, "unexpected BranchOverflowN: %d", stats.BranchOverflowN) + require.Equalf(t, 0, stats.LeafPageN, "unexpected LeafPageN: %d", stats.LeafPageN) + require.Equalf(t, 0, stats.LeafOverflowN, "unexpected LeafOverflowN: %d", stats.LeafOverflowN) + require.Equalf(t, 1, stats.KeyN, "unexpected KeyN: %d", stats.KeyN) + require.Equalf(t, 1, stats.Depth, "unexpected Depth: %d", stats.Depth) + require.Equalf(t, 0, stats.BranchInuse, "unexpected BranchInuse: %d", stats.BranchInuse) + require.Equalf(t, 0, stats.LeafInuse, "unexpected LeafInuse: %d", stats.LeafInuse) if db.Info().PageSize == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 0 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } + require.Equalf(t, 0, stats.BranchAlloc, "unexpected BranchAlloc: %d", stats.BranchAlloc) + require.Equalf(t, 0, stats.LeafAlloc, "unexpected LeafAlloc: %d", stats.LeafAlloc) } - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 16+16+6 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } + require.Equalf(t, 1, stats.BucketN, "unexpected BucketN: %d", stats.BucketN) + require.Equalf(t, 1, stats.InlineBucketN, "unexpected InlineBucketN: %d", stats.InlineBucketN) + require.Equalf(t, 16+16+6, stats.InlineBucketInuse, "unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } func TestBucket_Stats_EmptyBucket(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. - if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { - t.Fatal(err) - } + _, err := tx.CreateBucket([]byte("whozawhats")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 0 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 0 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 1 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 0 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } + require.Equalf(t, 0, stats.BranchPageN, "unexpected BranchPageN: %d", stats.BranchPageN) + require.Equalf(t, 0, stats.BranchOverflowN, "unexpected BranchOverflowN: %d", stats.BranchOverflowN) + require.Equalf(t, 0, stats.LeafPageN, "unexpected LeafPageN: %d", stats.LeafPageN) + require.Equalf(t, 0, stats.LeafOverflowN, "unexpected LeafOverflowN: %d", stats.LeafOverflowN) + require.Equalf(t, 0, stats.KeyN, "unexpected KeyN: %d", stats.KeyN) + require.Equalf(t, 1, stats.Depth, "unexpected Depth: %d", stats.Depth) + require.Equalf(t, 0, stats.BranchInuse, "unexpected BranchInuse: %d", stats.BranchInuse) + require.Equalf(t, 0, stats.LeafInuse, "unexpected LeafInuse: %d", stats.LeafInuse) if db.Info().PageSize == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 0 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } + require.Equalf(t, 0, stats.BranchAlloc, "unexpected BranchAlloc: %d", stats.BranchAlloc) + require.Equalf(t, 0, stats.LeafAlloc, "unexpected LeafAlloc: %d", stats.LeafAlloc) } - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 16 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } + require.Equalf(t, 1, stats.BucketN, "unexpected BucketN: %d", stats.BucketN) + require.Equalf(t, 1, stats.InlineBucketN, "unexpected InlineBucketN: %d", stats.InlineBucketN) + require.Equalf(t, 16, stats.InlineBucketInuse, "unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure a bucket can calculate stats. func TestBucket_Stats_Nested(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 100; i++ { - if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i)))) } bar, err := b.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 10; i++ { - if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) } baz, err := bar.CreateBucket([]byte("baz")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 10; i++ { - if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.MustCheck() - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("foo")) stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 2 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 122 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 3 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } + require.Equalf(t, 0, stats.BranchPageN, "unexpected BranchPageN: %d", stats.BranchPageN) + require.Equalf(t, 0, stats.BranchOverflowN, "unexpected BranchOverflowN: %d", stats.BranchOverflowN) + require.Equalf(t, 2, stats.LeafPageN, "unexpected LeafPageN: %d", stats.LeafPageN) + require.Equalf(t, 0, stats.LeafOverflowN, "unexpected LeafOverflowN: %d", stats.LeafOverflowN) + require.Equalf(t, 122, stats.KeyN, "unexpected KeyN: %d", stats.KeyN) + require.Equalf(t, 3, stats.Depth, "unexpected Depth: %d", stats.Depth) + require.Equalf(t, 0, stats.BranchInuse, "unexpected BranchInuse: %d", stats.BranchInuse) foo := 16 // foo (pghdr) foo += 101 * 16 // foo leaf elements @@ -1600,30 +1221,20 @@ func TestBucket_Stats_Nested(t *testing.T) { baz += 10 * 16 // baz leaf elements baz += 10 + 10 // baz leaf key/values - if stats.LeafInuse != foo+bar+baz { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } + require.Equalf(t, stats.LeafInuse, foo+bar+baz, "unexpected LeafInuse: %d", stats.LeafInuse) if db.Info().PageSize == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 8192 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } + require.Equalf(t, 0, stats.BranchAlloc, "unexpected BranchAlloc: %d", stats.BranchAlloc) + require.Equalf(t, 8192, stats.LeafAlloc, "unexpected LeafAlloc: %d", stats.LeafAlloc) } - if stats.BucketN != 3 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != baz { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } + require.Equalf(t, 3, stats.BucketN, "unexpected BucketN: %d", stats.BucketN) + require.Equalf(t, 1, stats.InlineBucketN, "unexpected InlineBucketN: %d", stats.InlineBucketN) + require.Equalf(t, stats.InlineBucketInuse, baz, "unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } func TestBucket_Inspect(t *testing.T) { @@ -1702,8 +1313,7 @@ func TestBucket_Inspect(t *testing.T) { if item.b != nil { for i := 0; i < item.bs.KeyN; i++ { - err := item.b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) - require.NoError(t, err) + require.NoError(t, item.b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i)))) } for _, child := range item.bs.Children { @@ -1742,21 +1352,16 @@ func TestBucket_Stats_Large(t *testing.T) { var index int for i := 0; i < 100; i++ { // Add bucket with lots of keys. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 1000; i++ { - if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index)))) index++ } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } db.MustCheck() @@ -1806,18 +1411,17 @@ func TestBucket_Stats_Large(t *testing.T) { InlineBucketInuse: 0}, } - if err := db.View(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { stats := tx.Bucket([]byte("widgets")).Stats() t.Logf("Stats: %#v", stats) if expected, ok := pageSize2stats[db.Info().PageSize]; ok { - assert.EqualValues(t, expected, stats, "stats differs from expectations") + assert.Equalf(t, expected, stats, "stats differs from expectations") } else { t.Skipf("No expectations for page size: %d", db.Info().PageSize) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can write random keys and values across multiple transactions. @@ -1827,34 +1431,31 @@ func TestBucket_Put_Single(t *testing.T) { } index := 0 - if err := quick.Check(func(items testdata) bool { + err := quick.Check(func(items testdata) bool { db := btesting.MustCreateDB(t) defer db.MustClose() m := make(map[string][]byte) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) for _, item := range items { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { panic("put error: " + err.Error()) } m[string(item.Key)] = item.Value return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Verify all key/values so far. - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { i := 0 for k, v := range m { value := tx.Bucket([]byte("widgets")).Get([]byte(k)) @@ -1866,16 +1467,14 @@ func TestBucket_Put_Single(t *testing.T) { i++ } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } index++ return true - }, qconfig()); err != nil { - t.Error(err) - } + }, qconfig()) + assert.NoError(t, err) } // Ensure that a transaction can insert multiple key/value pairs at once. @@ -1884,34 +1483,29 @@ func TestBucket_Put_Multiple(t *testing.T) { t.Skip("skipping test in short mode.") } - if err := quick.Check(func(items testdata) bool { + err := quick.Check(func(items testdata) bool { db := btesting.MustCreateDB(t) defer db.MustClose() // Bulk insert all values. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(item.Key, item.Value)) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Verify all items exist. - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { value := b.Get(item.Key) @@ -1921,14 +1515,12 @@ func TestBucket_Put_Multiple(t *testing.T) { } } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) return true - }, qconfig()); err != nil { - t.Error(err) - } + }, qconfig()) + assert.NoError(t, err) } // Ensure that a transaction can delete all key/value pairs and return to a single leaf page. @@ -1937,58 +1529,49 @@ func TestBucket_Delete_Quick(t *testing.T) { t.Skip("skipping test in short mode.") } - if err := quick.Check(func(items testdata) bool { + err := quick.Check(func(items testdata) bool { db := btesting.MustCreateDB(t) defer db.MustClose() // Bulk insert all values. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(item.Key, item.Value)) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Remove items one at a time and check consistency. for _, item := range items { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Delete(item.Key) - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Anything before our deletion index should be nil. - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + err = db.View(func(tx *bolt.Tx) error { + err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) return true - }, qconfig()); err != nil { - t.Error(err) - } + }, qconfig()) + assert.NoError(t, err) } func BenchmarkBucket_CreateBucketIfNotExists(b *testing.B) { diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 727b38f55..4af480fc5 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -33,9 +33,7 @@ func TestInfoCommand_Run(t *testing.T) { // Run the info command. m := NewMain() - if err := m.Run("info", db.Path()); err != nil { - t.Fatal(err) - } + require.NoError(t, m.Run("info", db.Path())) } // Ensure the "stats" command executes correctly with an empty database. @@ -72,11 +70,8 @@ func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { // Run the command. m := NewMain() - if err := m.Run("stats", db.Path()); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } + require.NoError(t, m.Run("stats", db.Path())) + require.Equalf(t, exp, m.Stdout.String(), "unexpected stdout") } func TestDumpCommand_Run(t *testing.T) { @@ -88,11 +83,8 @@ func TestDumpCommand_Run(t *testing.T) { exp := `0000010 edda 0ced 0200 0000 0010 0000 0000 0000` m := NewMain() - err := m.Run("dump", db.Path(), "0") - require.NoError(t, err) - if !strings.Contains(m.Stdout.String(), exp) { - t.Fatalf("unexpected stdout:\n%s\n", m.Stdout.String()) - } + require.NoError(t, m.Run("dump", db.Path(), "0")) + require.Containsf(t, m.Stdout.String(), exp, "unexpected stdout") } func TestPageCommand_Run(t *testing.T) { @@ -115,11 +107,8 @@ func TestPageCommand_Run(t *testing.T) { "Checksum: 07516e114689fdee\n\n" m := NewMain() - err := m.Run("page", db.Path(), "0") - require.NoError(t, err) - if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n%s\n%s", m.Stdout.String(), exp) - } + require.NoError(t, m.Run("page", db.Path(), "0")) + require.Equalf(t, exp, m.Stdout.String(), "unexpected stdout") } func TestPageItemCommand_Run(t *testing.T) { @@ -205,7 +194,7 @@ func TestStatsCommand_Run(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { // Create "foo" bucket. b, err := tx.CreateBucket([]byte("foo")) if err != nil { @@ -238,9 +227,8 @@ func TestStatsCommand_Run(t *testing.T) { } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.Close() defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) @@ -267,18 +255,15 @@ func TestStatsCommand_Run(t *testing.T) { // Run the command. m := NewMain() - if err := m.Run("stats", db.Path()); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } + require.NoError(t, m.Run("stats", db.Path())) + require.Equalf(t, exp, m.Stdout.String(), "unexpected stdout") } // Ensure the "buckets" command can print a list of buckets. func TestBucketsCommand_Run(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { for _, name := range []string{"foo", "bar", "baz"} { _, err := tx.CreateBucket([]byte(name)) if err != nil { @@ -286,9 +271,8 @@ func TestBucketsCommand_Run(t *testing.T) { } } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.Close() defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) @@ -297,11 +281,8 @@ func TestBucketsCommand_Run(t *testing.T) { // Run the command. m := NewMain() - if err := m.Run("buckets", db.Path()); err != nil { - t.Fatal(err) - } else if actual := m.Stdout.String(); actual != expected { - t.Fatalf("unexpected stdout:\n\n%s", actual) - } + require.NoError(t, m.Run("buckets", db.Path())) + require.Equalf(t, expected, m.Stdout.String(), "unexpected stdout") } // Ensure the "keys" command can print a list of keys for a bucket. @@ -363,10 +344,8 @@ func TestKeysCommand_Run(t *testing.T) { t.Log("running Keys cmd") m := NewMain() - kErr := m.Run("keys", db.Path(), tc.testBucket) - require.NoError(t, kErr) - actual := m.Stdout.String() - assert.Equal(t, tc.expected, actual) + require.NoError(t, m.Run("keys", db.Path(), tc.testBucket)) + assert.Equal(t, tc.expected, m.Stdout.String()) }) } } @@ -400,7 +379,7 @@ func TestGetCommand_Run(t *testing.T) { t.Run(tc.name, func(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte(tc.testBucket)) if err != nil { return err @@ -416,20 +395,16 @@ func TestGetCommand_Run(t *testing.T) { } } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.Close() defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) // Run the command. m := NewMain() - if err := m.Run("get", db.Path(), tc.testBucket, tc.testKey); err != nil { - t.Fatal(err) - } - actual := m.Stdout.String() - assert.Equal(t, tc.expectedValue, actual) + require.NoError(t, m.Run("get", db.Path(), tc.testBucket, tc.testKey)) + assert.Equal(t, tc.expectedValue, m.Stdout.String()) }) } } @@ -479,9 +454,7 @@ func TestBenchCommand_Run(t *testing.T) { // Run the command. m := NewMain() args := append([]string{"bench"}, test.args...) - if err := m.Run(args...); err != nil { - t.Fatal(err) - } + require.NoError(t, m.Run(args...)) stderr := m.Stderr.String() stdout := m.Stdout.String() @@ -489,9 +462,7 @@ func TestBenchCommand_Run(t *testing.T) { t.Fatal(fmt.Errorf("benchmark result does not contain read/write start output:\n%s", stderr)) } - if strings.Contains(stderr, "iter mismatch") { - t.Fatal(fmt.Errorf("found iter mismatch in stdout:\n%s", stderr)) - } + require.NotContainsf(t, stderr, "iter mismatch", "found iter mismatch in stdout:\n%s", stderr) if !strings.Contains(stdout, "# Write") || !strings.Contains(stdout, "# Read") { t.Fatal(fmt.Errorf("benchmark result does not contain read/write output:\n%s", stdout)) @@ -549,7 +520,7 @@ func TestCompactCommand_Run(t *testing.T) { // fill the db db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { n := 2 + rand.Intn(5) for i := 0; i < n; i++ { k := []byte(fmt.Sprintf("b%d", i)) @@ -565,12 +536,11 @@ func TestCompactCommand_Run(t *testing.T) { } } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // make the db grow by adding large values, and delete them. - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) if err != nil { return err @@ -587,10 +557,9 @@ func TestCompactCommand_Run(t *testing.T) { } } return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Update(func(tx *bolt.Tx) error { + }) + require.NoError(t, err) + err = db.Update(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("large_vals")).Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { if err := c.Delete(); err != nil { @@ -598,37 +567,24 @@ func TestCompactCommand_Run(t *testing.T) { } } return tx.DeleteBucket([]byte("large_vals")) - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db.Close() dbChk, err := chkdb(db.Path()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) m := NewMain() - if err := m.Run("compact", "-o", dstdb.Path(), db.Path()); err != nil { - t.Fatal(err) - } + require.NoError(t, m.Run("compact", "-o", dstdb.Path(), db.Path())) dbChkAfterCompact, err := chkdb(db.Path()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) dstdbChk, err := chkdb(dstdb.Path()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !bytes.Equal(dbChk, dbChkAfterCompact) { - t.Error("the original db has been touched") - } - if !bytes.Equal(dbChk, dstdbChk) { - t.Error("the compacted db data isn't the same than the original db") - } + assert.Truef(t, bytes.Equal(dbChk, dbChkAfterCompact), "the original db has been touched") + assert.Truef(t, bytes.Equal(dbChk, dstdbChk), "the compacted db data isn't the same than the original db") } func TestCommands_Run_NoArgs(t *testing.T) { diff --git a/concurrent_test.go b/concurrent_test.go index 07c0c1f77..1393f6162 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -17,6 +17,7 @@ import ( "time" "unicode/utf8" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" @@ -197,14 +198,10 @@ func concurrentReadAndWrite(t *testing.T, testDuration) t.Log("Analyzing the history records.") - if err := validateSequential(records); err != nil { - t.Errorf("The history records are not sequential:\n %v", err) - } + require.NoErrorf(t, validateSequential(records), "The history records are not sequential") t.Log("Checking database consistency.") - if err := checkConsistency(t, db); err != nil { - t.Errorf("The data isn't consistency: %v", err) - } + assert.NoErrorf(t, checkConsistency(t, db), "The data isn't consistency") panicked = false // TODO (ahrtr): @@ -225,8 +222,7 @@ func mustReOpenDB(t *testing.T, db *bolt.DB, o *bolt.Options) *bolt.DB { f := db.Path() t.Logf("Closing bbolt DB at: %s", f) - err := db.Close() - require.NoError(t, err) + require.NoError(t, db.Close()) return mustOpenDB(t, f, o) } @@ -311,9 +307,7 @@ func runWorkers(t *testing.T, close(stopCh) t.Log("Waiting for all workers to finish.") - if err := g.Wait(); err != nil { - t.Errorf("Received error: %v", err) - } + assert.NoErrorf(t, g.Wait(), "Received error") return rs } @@ -547,9 +541,7 @@ func saveDataIfFailed(t *testing.T, db *bolt.DB, rs historyRecords, force bool) if t.Failed() || force { t.Log("Saving data...") dbPath := db.Path() - if err := db.Close(); err != nil { - t.Errorf("Failed to close db: %v", err) - } + require.NoErrorf(t, db.Close(), "Failed to close db") backupPath := testResultsDirectory(t) backupDB(t, dbPath, backupPath) persistHistoryRecords(t, rs, backupPath) @@ -559,8 +551,7 @@ func saveDataIfFailed(t *testing.T, db *bolt.DB, rs historyRecords, force bool) func backupDB(t *testing.T, srcPath string, dstPath string) { targetFile := filepath.Join(dstPath, "db.bak") t.Logf("Saving the DB file to %s", targetFile) - err := copyFile(srcPath, targetFile) - require.NoError(t, err) + require.NoError(t, copyFile(srcPath, targetFile)) t.Logf("DB file saved to %s", targetFile) } diff --git a/cursor_test.go b/cursor_test.go index 73e8492f0..7e96a4bd2 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -11,6 +11,7 @@ import ( "testing" "testing/quick" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" @@ -152,88 +153,63 @@ func testRepeatCursorOperations_PrevNextPrev(t *testing.T, b *bolt.Bucket) { // Ensure that a cursor can return a reference to the bucket that created it. func TestCursor_Bucket(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) { - t.Fatal("cursor bucket mismatch") - } + require.NoError(t, err) + cb := b.Cursor().Bucket() + require.Truef(t, reflect.DeepEqual(cb, b), "cursor bucket mismatch") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx cursor can seek to the appropriate keys. func TestCursor_Seek(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0001")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0002")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0003")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("0001"))) + require.NoError(t, b.Put([]byte("bar"), []byte("0002"))) + require.NoError(t, b.Put([]byte("baz"), []byte("0003"))) - if _, err := b.CreateBucket([]byte("bkt")); err != nil { - t.Fatal(err) - } + _, err = b.CreateBucket([]byte("bkt")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() // Exact match should go to the key. - if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } + k, v := c.Seek([]byte("bar")) + require.Truef(t, bytes.Equal(k, []byte("bar")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte("0002")), "unexpected value: %v", v) // Inexact match should go to the next key. - if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0003")) { - t.Fatalf("unexpected value: %v", v) - } + k, v = c.Seek([]byte("bas")) + require.Truef(t, bytes.Equal(k, []byte("baz")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte("0003")), "unexpected value: %v", v) // Low key should go to the first key. - if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } + k, v = c.Seek([]byte("")) + require.Truef(t, bytes.Equal(k, []byte("bar")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte("0002")), "unexpected value: %v", v) // High key should return no key. - if k, v := c.Seek([]byte("zzz")); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } + k, v = c.Seek([]byte("zzz")) + require.Nilf(t, k, "expected nil key: %v", k) + require.Nilf(t, v, "expected nil value: %v", v) // Buckets should return their key but no value. - if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } + k, v = c.Seek([]byte("bkt")) + require.Truef(t, bytes.Equal(k, []byte("bkt")), "unexpected key: %v", k) + require.Nilf(t, v, "expected nil value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } func TestCursor_Delete(t *testing.T) { @@ -242,55 +218,41 @@ func TestCursor_Delete(t *testing.T) { const count = 1000 // Insert every other key between 0 and $count. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < count; i += 1 { k := make([]byte, 8) binary.BigEndian.PutUint64(k, uint64(i)) - if err := b.Put(k, make([]byte, 100)); err != nil { - t.Fatal(err) - } - } - if _, err := b.CreateBucket([]byte("sub")); err != nil { - t.Fatal(err) + require.NoError(t, b.Put(k, make([]byte, 100))) } + _, err = b.CreateBucket([]byte("sub")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() bound := make([]byte, 8) binary.BigEndian.PutUint64(bound, uint64(count/2)) for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - t.Fatal(err) - } + require.NoError(t, c.Delete()) } c.Seek([]byte("sub")) - if err := c.Delete(); err != errors.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, c.Delete(), errors.ErrIncompatibleValue, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { stats := tx.Bucket([]byte("widgets")).Stats() - if stats.KeyN != count/2+1 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } + require.Equalf(t, count/2+1, stats.KeyN, "unexpected KeyN: %d", stats.KeyN) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx cursor can seek to the appropriate keys when there are a @@ -304,27 +266,22 @@ func TestCursor_Seek_Large(t *testing.T) { var count = 10000 // Insert every other key between 0 and $count. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < count; i += 100 { for j := i; j < i+100; j += 2 { k := make([]byte, 8) binary.BigEndian.PutUint64(k, uint64(j)) - if err := b.Put(k, make([]byte, 100)); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(k, make([]byte, 100))) } } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() for i := 0; i < count; i++ { seek := make([]byte, 8) @@ -335,255 +292,172 @@ func TestCursor_Seek_Large(t *testing.T) { // The last seek is beyond the end of the range so // it should return nil. if i == count-1 { - if k != nil { - t.Fatal("expected nil key") - } + require.Nilf(t, k, "expected nil key") continue } // Otherwise we should seek to the exact key or the next key. num := binary.BigEndian.Uint64(k) if i%2 == 0 { - if num != uint64(i) { - t.Fatalf("unexpected num: %d", num) - } + require.Equalf(t, num, uint64(i), "unexpected num: %d", num) } else { - if num != uint64(i+1) { - t.Fatalf("unexpected num: %d", num) - } + require.Equalf(t, num, uint64(i+1), "unexpected num: %d", num) } } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a cursor can iterate over an empty bucket without error. func TestCursor_EmptyBucket(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() - if k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } + require.Nilf(t, k, "unexpected key: %v", k) + require.Nilf(t, v, "unexpected value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx cursor can reverse iterate over an empty bucket without error. func TestCursor_EmptyBucketReverse(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }); err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { + }) + require.NoError(t, err) + err = db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.Last() - if k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } + require.Nilf(t, k, "unexpected key: %v", k) + require.Nilf(t, v, "unexpected value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx cursor can iterate over a single root with a couple elements. func TestCursor_Iterate_Leaf(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{0}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{1}); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("baz"), []byte{})) + require.NoError(t, b.Put([]byte("foo"), []byte{0})) + require.NoError(t, b.Put([]byte("bar"), []byte{1})) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { _ = tx.Rollback() }() c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() - if !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{1}) { - t.Fatalf("unexpected value: %v", v) - } + require.Truef(t, bytes.Equal(k, []byte("bar")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte{1}), "unexpected value: %v", v) k, v = c.Next() - if !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{}) { - t.Fatalf("unexpected value: %v", v) - } + require.Truef(t, bytes.Equal(k, []byte("baz")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte{}), "unexpected value: %v", v) k, v = c.Next() - if !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{0}) { - t.Fatalf("unexpected value: %v", v) - } + require.Truef(t, bytes.Equal(k, []byte("foo")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte{0}), "unexpected value: %v", v) k, v = c.Next() - if k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } + require.Nilf(t, k, "expected nil key: %v", k) + require.Nilf(t, v, "expected nil value: %v", v) k, v = c.Next() - if k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } + require.Nilf(t, k, "expected nil key: %v", k) + require.Nilf(t, v, "expected nil value: %v", v) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) } // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. func TestCursor_LeafRootReverse(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{0}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{1}); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("baz"), []byte{})) + require.NoError(t, b.Put([]byte("foo"), []byte{0})) + require.NoError(t, b.Put([]byte("bar"), []byte{1})) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) c := tx.Bucket([]byte("widgets")).Cursor() - if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{0}) { - t.Fatalf("unexpected value: %v", v) - } + k, v := c.Last() + require.Truef(t, bytes.Equal(k, []byte("foo")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte{0}), "unexpected value: %v", v) - if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{}) { - t.Fatalf("unexpected value: %v", v) - } + k, v = c.Prev() + require.Truef(t, bytes.Equal(k, []byte("baz")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte{}), "unexpected value: %v", v) - if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{1}) { - t.Fatalf("unexpected value: %v", v) - } + k, v = c.Prev() + require.Truef(t, bytes.Equal(k, []byte("bar")), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, []byte{1}), "unexpected value: %v", v) - if k, v := c.Prev(); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } + k, v = c.Prev() + require.Nilf(t, k, "expected nil key: %v", k) + require.Nilf(t, v, "expected nil value: %v", v) - if k, v := c.Prev(); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } + k, v = c.Prev() + require.Nilf(t, k, "expected nil key: %v", k) + require.Nilf(t, v, "expected nil value: %v", v) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) } // Ensure that a Tx cursor can restart from the beginning. func TestCursor_Restart(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{}); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("bar"), []byte{})) + require.NoError(t, b.Put([]byte("foo"), []byte{})) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) c := tx.Bucket([]byte("widgets")).Cursor() - if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } - if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } + k, _ := c.First() + require.Truef(t, bytes.Equal(k, []byte("bar")), "unexpected key: %v", k) + k, _ = c.Next() + require.Truef(t, bytes.Equal(k, []byte("foo")), "unexpected key: %v", k) - if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } - if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } + k, _ = c.First() + require.Truef(t, bytes.Equal(k, []byte("bar")), "unexpected key: %v", k) + k, _ = c.Next() + require.Truef(t, bytes.Equal(k, []byte("foo")), "unexpected key: %v", k) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) } // Ensure that a cursor can skip over empty pages that have been deleted. @@ -591,30 +465,23 @@ func TestCursor_First_EmptyPages(t *testing.T) { db := btesting.MustCreateDB(t) // Create 1000 keys in the "widgets" bucket. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 1000; i++ { - if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(u64tob(uint64(i)), []byte{})) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Delete half the keys and then try to iterate. - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 600; i++ { - if err := b.Delete(u64tob(uint64(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Delete(u64tob(uint64(i)))) } c := b.Cursor() @@ -622,14 +489,11 @@ func TestCursor_First_EmptyPages(t *testing.T) { for k, _ := c.First(); k != nil; k, _ = c.Next() { n++ } - if n != 400 { - t.Fatalf("unexpected key count: %d", n) - } + require.Equalf(t, 400, n, "unexpected key count: %d", n) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a cursor can skip over empty pages that have been deleted. @@ -637,30 +501,23 @@ func TestCursor_Last_EmptyPages(t *testing.T) { db := btesting.MustCreateDB(t) // Create 1000 keys in the "widgets" bucket. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 1000; i++ { - if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(u64tob(uint64(i)), []byte{})) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Delete last 800 elements to ensure last page is empty - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 200; i < 1000; i++ { - if err := b.Delete(u64tob(uint64(i))); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Delete(u64tob(uint64(i)))) } c := b.Cursor() @@ -668,14 +525,11 @@ func TestCursor_Last_EmptyPages(t *testing.T) { for k, _ := c.Last(); k != nil; k, _ = c.Prev() { n++ } - if n != 200 { - t.Fatalf("unexpected key count: %d", n) - } + require.Equalf(t, 200, n, "unexpected key count: %d", n) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx can iterate over all elements in a bucket. @@ -686,21 +540,13 @@ func TestCursor_QuickCheck(t *testing.T) { // Bulk insert all values. tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) + require.NoError(t, b.Put(item.Key, item.Value)) } + require.NoError(t, tx.Commit()) // Sort test data. sort.Sort(items) @@ -708,32 +554,21 @@ func TestCursor_QuickCheck(t *testing.T) { // Iterate over all items and check consistency. var index = 0 tx, err = db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - if !bytes.Equal(k, items[index].Key) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, items[index].Value) { - t.Fatalf("unexpected value: %v", v) - } + require.Truef(t, bytes.Equal(k, items[index].Key), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, items[index].Value), "unexpected value: %v", v) index++ } - if len(items) != index { - t.Fatalf("unexpected item count: %v, expected %v", len(items), index) - } + require.Equalf(t, len(items), index, "unexpected item count: %v, expected %v", len(items), index) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) return true } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } + assert.NoError(t, quick.Check(f, qconfig())) } // Ensure that a transaction can iterate over all elements in a bucket in reverse. @@ -744,21 +579,13 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) { // Bulk insert all values. tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) + require.NoError(t, b.Put(item.Key, item.Value)) } + require.NoError(t, tx.Commit()) // Sort test data. sort.Sort(revtestdata(items)) @@ -766,113 +593,80 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) { // Iterate over all items and check consistency. var index = 0 tx, err = db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - if !bytes.Equal(k, items[index].Key) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, items[index].Value) { - t.Fatalf("unexpected value: %v", v) - } + require.Truef(t, bytes.Equal(k, items[index].Key), "unexpected key: %v", k) + require.Truef(t, bytes.Equal(v, items[index].Value), "unexpected value: %v", v) index++ } - if len(items) != index { - t.Fatalf("unexpected item count: %v, expected %v", len(items), index) - } + require.Equalf(t, len(items), index, "unexpected item count: %v, expected %v", len(items), index) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) return true } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } + assert.NoError(t, quick.Check(f, qconfig())) } // Ensure that a Tx cursor can iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("baz")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = b.CreateBucket([]byte("foo")) + require.NoError(t, err) + _, err = b.CreateBucket([]byte("bar")) + require.NoError(t, err) + _, err = b.CreateBucket([]byte("baz")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { var names []string c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { names = append(names, string(k)) - if v != nil { - t.Fatalf("unexpected value: %v", v) - } - } - if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { - t.Fatalf("unexpected names: %+v", names) + require.Nilf(t, v, "unexpected value: %v", v) } + require.Truef(t, reflect.DeepEqual(names, []string{"bar", "baz", "foo"}), "unexpected names: %+v", names) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx cursor can reverse iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("baz")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = b.CreateBucket([]byte("foo")) + require.NoError(t, err) + _, err = b.CreateBucket([]byte("bar")) + require.NoError(t, err) + _, err = b.CreateBucket([]byte("baz")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { var names []string c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil; k, v = c.Prev() { names = append(names, string(k)) - if v != nil { - t.Fatalf("unexpected value: %v", v) - } - } - if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { - t.Fatalf("unexpected names: %+v", names) + require.Nilf(t, v, "unexpected value: %v", v) } + require.Truef(t, reflect.DeepEqual(names, []string{"foo", "baz", "bar"}), "unexpected names: %+v", names) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } func ExampleCursor() { diff --git a/db_test.go b/db_test.go index 757b896e8..f3b433d64 100644 --- a/db_test.go +++ b/db_test.go @@ -11,7 +11,6 @@ import ( "os" "path/filepath" "reflect" - "strings" "sync" "testing" "time" @@ -50,19 +49,12 @@ func TestOpen(t *testing.T) { defer os.RemoveAll(path) db, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } else if db == nil { - t.Fatal("expected db") - } + require.NoError(t, err) + require.NotNilf(t, db, "expected db") - if s := db.Path(); s != path { - t.Fatalf("unexpected path: %s", s) - } + require.Equalf(t, db.Path(), path, "unexpected path") - if err := db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, db.Close()) } // Regression validation for https://github.com/etcd-io/bbolt/pull/122. @@ -100,26 +92,20 @@ func TestOpen_MultipleGoroutines(t *testing.T) { } close(errCh) for err := range errCh { - if err != nil { - t.Fatalf("error from inside goroutine: %v", err) - } + require.NoErrorf(t, err, "error from inside goroutine: %v", err) } } // Ensure that opening a database with a blank path returns an error. func TestOpen_ErrPathRequired(t *testing.T) { _, err := bolt.Open("", 0600, nil) - if err == nil { - t.Fatalf("expected error") - } + require.Errorf(t, err, "expected error") } // Ensure that opening a database with a bad path returns an error. func TestOpen_ErrNotExists(t *testing.T) { _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0600, nil) - if err == nil { - t.Fatal("expected error") - } + require.Errorf(t, err, "expected error") } // Ensure that opening a file that is not a Bolt database returns ErrInvalid. @@ -128,19 +114,13 @@ func TestOpen_ErrInvalid(t *testing.T) { defer os.RemoveAll(path) f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = fmt.Fprintln(f, "this is not a bolt database") + require.NoError(t, err) + require.NoError(t, f.Close()) - if _, err := bolt.Open(path, 0600, nil); err != berrors.ErrInvalid { - t.Fatalf("unexpected error: %s", err) - } + _, err = bolt.Open(path, 0600, nil) + require.Equalf(t, err, berrors.ErrInvalid, "unexpected error: %s", err) } // Ensure that opening a file with two invalid versions returns ErrVersionMismatch. @@ -154,29 +134,22 @@ func TestOpen_ErrVersionMismatch(t *testing.T) { path := db.Path() // Close database. - if err := db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, db.Close()) // Read data file. buf, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Rewrite meta pages. meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.version++ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) meta1.version++ - if err := os.WriteFile(path, buf, 0666); err != nil { - t.Fatal(err) - } + require.NoError(t, os.WriteFile(path, buf, 0666)) // Reopen data file. - if _, err := bolt.Open(path, 0600, nil); err != berrors.ErrVersionMismatch { - t.Fatalf("unexpected error: %s", err) - } + _, err = bolt.Open(path, 0600, nil) + require.Equalf(t, err, berrors.ErrVersionMismatch, "unexpected error: %s", err) } // Ensure that opening a file with two invalid checksums returns ErrChecksum. @@ -190,29 +163,22 @@ func TestOpen_ErrChecksum(t *testing.T) { path := db.Path() // Close database. - if err := db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, db.Close()) // Read data file. buf, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Rewrite meta pages. meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.pgid++ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) meta1.pgid++ - if err := os.WriteFile(path, buf, 0666); err != nil { - t.Fatal(err) - } + require.NoError(t, os.WriteFile(path, buf, 0666)) // Reopen data file. - if _, err := bolt.Open(path, 0600, nil); err != berrors.ErrChecksum { - t.Fatalf("unexpected error: %s", err) - } + _, err = bolt.Open(path, 0600, nil) + require.Equalf(t, err, berrors.ErrChecksum, "unexpected error: %s", err) } // Ensure that it can read the page size from the second meta page if the first one is invalid. @@ -226,16 +192,12 @@ func TestOpen_ReadPageSize_FromMeta1_OS(t *testing.T) { // Read data file. buf, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Rewrite first meta page. meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.pgid++ - if err := os.WriteFile(path, buf, 0666); err != nil { - t.Fatal(err) - } + require.NoError(t, os.WriteFile(path, buf, 0666)) // Reopen data file. db = btesting.MustOpenDBWithOption(t, path, nil) @@ -264,8 +226,7 @@ func TestOpen_ReadPageSize_FromMeta1_Given(t *testing.T) { t.Logf("#%d: Intentionally corrupt the first meta page for pageSize %d", i, givenPageSize) meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.pgid++ - err = os.WriteFile(path, buf, 0666) - require.NoError(t, err) + require.NoError(t, os.WriteFile(path, buf, 0666)) } // Reopen data file. @@ -288,40 +249,27 @@ func TestOpen_Size(t *testing.T) { func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, func(tx int, k int) []byte { return make([]byte, 1000) }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) path := db.Path() db.MustClose() sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } + require.NotEqualf(t, 0, sz, "unexpected new file size: %d", sz) db.MustReopen() - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil { - t.Fatal(err) - } + err = db.Update(func(tx *bolt.Tx) error { + require.NoError(t, tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0})) return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) + require.NoError(t, db.Close()) newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } + require.NotEqualf(t, 0, newSz, "unexpected new file size: %d", newSz) // Compare the original size with the new size. // db size might increase by a few page sizes due to the new small update. - if sz < newSz-5*int64(pagesize) { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } + require.GreaterOrEqualf(t, sz, newSz-5*int64(pagesize), "unexpected file growth: %d => %d", sz, newSz) } // Ensure that opening a database beyond the max step size does not increase its size. @@ -340,55 +288,38 @@ func TestOpen_Size_Large(t *testing.T) { // Insert until we get above the minimum 4MB size. var index uint64 for i := 0; i < 10000; i++ { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucketIfNotExists([]byte("data")) for j := 0; j < 1000; j++ { - if err := b.Put(u64tob(index), make([]byte, 50)); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(u64tob(index), make([]byte, 50))) index++ } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Close database and grab the size. - if err := db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, db.Close()) sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } else if sz < (1 << 30) { - t.Fatalf("expected larger initial size: %d", sz) - } + require.NotEqualf(t, 0, sz, "unexpected new file size: %d", sz) + require.GreaterOrEqualf(t, sz, int64(1<<30), "expected larger initial size: %d", sz) // Reopen database, update, and check size again. db0, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } - if err := db0.Update(func(tx *bolt.Tx) error { + require.NoError(t, err) + err = db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) - }); err != nil { - t.Fatal(err) - } - if err := db0.Close(); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) + require.NoError(t, db0.Close()) newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } + require.NotEqualf(t, 0, newSz, "unexpected new file size: %d", newSz) // Compare the original size with the new size. // db size might increase by a few page sizes due to the new small update. - if sz < newSz-5*int64(pagesize) { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } + require.GreaterOrEqualf(t, sz, newSz-5*int64(pagesize), "unexpected file growth: %d => %d", sz, newSz) } // Ensure that a re-opened database is consistent. @@ -397,26 +328,14 @@ func TestOpen_Check(t *testing.T) { defer os.RemoveAll(path) db, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } - if err = db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { - t.Fatal(err) - } - if err = db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + require.NoError(t, db.Close()) db, err = bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + require.NoError(t, db.Close()) } // Ensure that write errors to the meta file handler during initialization are returned. @@ -430,23 +349,15 @@ func TestOpen_FileTooSmall(t *testing.T) { defer os.RemoveAll(path) db, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) pageSize := int64(db.Info().PageSize) - if err = db.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, db.Close()) // corrupt the database - if err = os.Truncate(path, pageSize); err != nil { - t.Fatal(err) - } + require.NoError(t, os.Truncate(path, pageSize)) _, err = bolt.Open(path, 0600, nil) - if err == nil || !strings.Contains(err.Error(), "file size too small") { - t.Fatalf("unexpected error: %s", err) - } + require.ErrorContainsf(t, err, "file size too small", "unexpected error: %s", err) } // TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough @@ -461,33 +372,22 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { testWriteSize := 1 << 27 // 134MB db, err := bolt.Open(path, 0600, &bolt.Options{InitialMmapSize: initMmapSize}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // create a long-running read transaction // that never gets closed while writing rtx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // create a write transaction wtx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := wtx.CreateBucket([]byte("test")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // and commit a large write - err = b.Put([]byte("foo"), make([]byte, testWriteSize)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put([]byte("foo"), make([]byte, testWriteSize))) done := make(chan error, 1) @@ -500,14 +400,10 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { case <-time.After(5 * time.Second): t.Errorf("unexpected that the reader blocks writer") case err := <-done: - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } - if err := rtx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, rtx.Rollback()) } // TestDB_Open_ReadOnly checks a database in read only mode can read but not write. @@ -515,21 +411,14 @@ func TestDB_Open_ReadOnly(t *testing.T) { // Create a writable db, write k-v and close it. db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) + require.NoError(t, db.Close()) f := db.Path() o := &bolt.Options{ReadOnly: true} @@ -538,29 +427,21 @@ func TestDB_Open_ReadOnly(t *testing.T) { panic(err) } - if !readOnlyDB.IsReadOnly() { - t.Fatal("expect db in read only mode") - } + require.Truef(t, readOnlyDB.IsReadOnly(), "expect db in read only mode") // Read from a read-only transaction. - if err := readOnlyDB.View(func(tx *bolt.Tx) error { + err = readOnlyDB.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal(value, []byte("bar")) { - t.Fatal("expect value 'bar', got", value) - } + require.Truef(t, bytes.Equal(value, []byte("bar")), "expect value 'bar', got %s", value) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Can't launch read-write transaction. - if _, err := readOnlyDB.Begin(true); err != berrors.ErrDatabaseReadOnly { - t.Fatalf("unexpected error: %s", err) - } + _, err = readOnlyDB.Begin(true) + require.Equalf(t, err, berrors.ErrDatabaseReadOnly, "unexpected error: %s", err) - if err := readOnlyDB.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, readOnlyDB.Close()) } func TestDB_Open_ReadOnly_NoCreate(t *testing.T) { @@ -578,9 +459,8 @@ func TestOpen_BigPage(t *testing.T) { db2 := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize * 4}) - if db1sz, db2sz := fileSize(db1.Path()), fileSize(db2.Path()); db1sz >= db2sz { - t.Errorf("expected %d < %d", db1sz, db2sz) - } + db1sz, db2sz := fileSize(db1.Path()), fileSize(db2.Path()) + assert.Lessf(t, db1sz, db2sz, "expected %d < %d", db1sz, db2sz) } // TestOpen_RecoverFreeList tests opening the DB with free-list @@ -591,49 +471,32 @@ func TestOpen_RecoverFreeList(t *testing.T) { // Write some pages. tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wbuf := make([]byte, 8192) for i := 0; i < 100; i++ { s := fmt.Sprintf("%d", i) b, err := tx.CreateBucket([]byte(s)) - if err != nil { - t.Fatal(err) - } - if err = b.Put([]byte(s), wbuf); err != nil { - t.Fatal(err) - } - } - if err = tx.Commit(); err != nil { - t.Fatal(err) + require.NoError(t, err) + require.NoError(t, b.Put([]byte(s), wbuf)) } + require.NoError(t, tx.Commit()) // Generate free pages. - if tx, err = db.Begin(true); err != nil { - t.Fatal(err) - } + tx, err = db.Begin(true) + require.NoError(t, err) for i := 0; i < 50; i++ { s := fmt.Sprintf("%d", i) b := tx.Bucket([]byte(s)) - if b == nil { - t.Fatal(err) - } - if err := b.Delete([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) + require.NotNil(t, b) + require.NoError(t, b.Delete([]byte(s))) } + require.NoError(t, tx.Commit()) db.MustClose() // Record freelist count from opening with NoFreelistSync. db.MustReopen() freepages := db.Stats().FreePageN - if freepages == 0 { - t.Fatalf("no free pages on NoFreelistSync reopen") - } + require.NotEqualf(t, 0, freepages, "no free pages on NoFreelistSync reopen") db.MustClose() // Check free page count is reconstructed when opened with freelist sync. @@ -641,17 +504,15 @@ func TestOpen_RecoverFreeList(t *testing.T) { db.MustReopen() // One less free page for syncing the free list on open. freepages-- - if fp := db.Stats().FreePageN; fp < freepages { - t.Fatalf("closed with %d free pages, opened with %d", freepages, fp) - } + fp := db.Stats().FreePageN + require.GreaterOrEqualf(t, fp, freepages, "closed with %d free pages, opened with %d", freepages, fp) } // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { var db bolt.DB - if _, err := db.Begin(false); err != berrors.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } + _, err := db.Begin(false) + require.Equalf(t, err, berrors.ErrDatabaseNotOpen, "unexpected error: %s", err) } // Ensure that a read-write transaction can be retrieved. @@ -660,10 +521,10 @@ func TestDB_BeginRW(t *testing.T) { tx, err := db.Begin(true) require.NoError(t, err) - require.NotNil(t, tx, "expected tx") + require.NotNilf(t, tx, "expected tx") defer func() { require.NoError(t, tx.Commit()) }() - require.True(t, tx.Writable(), "expected writable tx") + require.Truef(t, tx.Writable(), "expected writable tx") require.Same(t, db.DB, tx.DB()) } @@ -709,8 +570,7 @@ func TestDB_Concurrent_WriteTo_and_ConsistentRead(t *testing.T) { dataCache[round] = dataSlice dataLock.Unlock() - err = tx.Rollback() - require.NoError(t, err) + require.NoError(t, tx.Rollback()) copyOpt := *o snap := btesting.MustOpenDBWithOption(t, f, ©Opt) @@ -741,8 +601,7 @@ func TestDB_Concurrent_WriteTo_and_ConsistentRead(t *testing.T) { require.NoError(t, perr) } } - err = tx.Commit() - require.NoError(t, err) + require.NoError(t, tx.Commit()) } wg.Wait() @@ -754,7 +613,7 @@ func TestDB_Concurrent_WriteTo_and_ConsistentRead(t *testing.T) { for i := 1; i < len(dataSlice); i++ { datai := dataSlice[i] same := reflect.DeepEqual(data0, datai) - require.True(t, same, fmt.Sprintf("found inconsistent data in round %d, data[0]: %v, data[%d] : %v", round, data0, i, datai)) + require.Truef(t, same, "found inconsistent data in round %d, data[0]: %v, data[%d] : %v", round, data0, i, datai) } } } @@ -762,9 +621,8 @@ func TestDB_Concurrent_WriteTo_and_ConsistentRead(t *testing.T) { // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB - if _, err := db.Begin(true); err != berrors.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } + _, err := db.Begin(true) + require.Equalf(t, err, berrors.ErrDatabaseNotOpen, "unexpected error: %s", err) } func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } @@ -776,9 +634,7 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { // Start transaction. tx, err := db.Begin(writable) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Open update in separate goroutine. startCh := make(chan struct{}, 1) @@ -795,29 +651,22 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { time.Sleep(100 * time.Millisecond) select { case err := <-done: - if err != nil { - t.Errorf("error from inside goroutine: %v", err) - } + require.NoErrorf(t, err, "error from inside goroutine: %v", err) t.Fatal("database closed too early") default: } // Commit/close transaction. if writable { - err = tx.Commit() + require.NoError(t, tx.Commit()) } else { - err = tx.Rollback() - } - if err != nil { - t.Fatal(err) + require.NoError(t, tx.Rollback()) } // Ensure database closed now. select { case err := <-done: - if err != nil { - t.Fatalf("error from inside goroutine: %v", err) - } + require.NoErrorf(t, err, "error from inside goroutine: %v", err) case <-time.After(5 * time.Second): t.Fatalf("database did not close") } @@ -826,49 +675,34 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { // Ensure a database can provide a transactional block. func TestDB_Update(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.NoError(t, b.Put([]byte("baz"), []byte("bat"))) + require.NoError(t, b.Delete([]byte("foo"))) return nil - }); err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { + }) + require.NoError(t, err) + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("expected nil value, got: %v", v) - } - if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } + require.Nilf(t, b.Get([]byte("foo")), "expected nil value") + v := b.Get([]byte("baz")) + require.Truef(t, bytes.Equal(v, []byte("bat")), "unexpected value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure a closed database returns an error while running a transaction block func TestDB_Update_Closed(t *testing.T) { var db bolt.DB - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != berrors.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } + }) + require.Equalf(t, err, berrors.ErrDatabaseNotOpen, "unexpected error: %s", err) } // Ensure a panic occurs while trying to commit a managed transaction. @@ -876,7 +710,7 @@ func TestDB_Update_ManualCommit(t *testing.T) { db := btesting.MustCreateDB(t) var panicked bool - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { @@ -884,16 +718,12 @@ func TestDB_Update_ManualCommit(t *testing.T) { } }() - if err := tx.Commit(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Commit()) }() return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } + }) + require.NoError(t, err) + require.Truef(t, panicked, "expected panic") } // Ensure a panic occurs while trying to rollback a managed transaction. @@ -901,7 +731,7 @@ func TestDB_Update_ManualRollback(t *testing.T) { db := btesting.MustCreateDB(t) var panicked bool - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { @@ -909,16 +739,12 @@ func TestDB_Update_ManualRollback(t *testing.T) { } }() - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) }() return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } + }) + require.NoError(t, err) + require.Truef(t, panicked, "expected panic") } // Ensure a panic occurs while trying to commit a managed transaction. @@ -926,7 +752,7 @@ func TestDB_View_ManualCommit(t *testing.T) { db := btesting.MustCreateDB(t) var panicked bool - if err := db.View(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { @@ -934,16 +760,12 @@ func TestDB_View_ManualCommit(t *testing.T) { } }() - if err := tx.Commit(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Commit()) }() return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } + }) + require.NoError(t, err) + require.Truef(t, panicked, "expected panic") } // Ensure a panic occurs while trying to rollback a managed transaction. @@ -951,7 +773,7 @@ func TestDB_View_ManualRollback(t *testing.T) { db := btesting.MustCreateDB(t) var panicked bool - if err := db.View(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { @@ -959,16 +781,12 @@ func TestDB_View_ManualRollback(t *testing.T) { } }() - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) }() return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } + }) + require.NoError(t, err) + require.Truef(t, panicked, "expected panic") } // Ensure a write transaction that panics does not hold open locks. @@ -983,60 +801,50 @@ func TestDB_Update_Panic(t *testing.T) { } }() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) panic("omg") - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) }() // Verify we can update again. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Verify that our change persisted. - if err := db.Update(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } + err = db.Update(func(tx *bolt.Tx) error { + require.NotNilf(t, tx.Bucket([]byte("widgets")), "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure a database can return an error through a read-only transactional block. func TestDB_View_Error(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.View(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { return errors.New("xxx") - }); err == nil || err.Error() != "xxx" { - t.Fatalf("unexpected error: %s", err) - } + }) + require.EqualErrorf(t, err, "xxx", "unexpected error: %s", err) } // Ensure a read transaction that panics does not hold open locks. func TestDB_View_Panic(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Panic during view transaction but recover. func() { @@ -1046,112 +854,84 @@ func TestDB_View_Panic(t *testing.T) { } }() - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } + err := db.View(func(tx *bolt.Tx) error { + require.NotNilf(t, tx.Bucket([]byte("widgets")), "expected bucket") panic("omg") - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) }() // Verify that we can still use read transactions. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } + err = db.View(func(tx *bolt.Tx) error { + require.NotNilf(t, tx.Bucket([]byte("widgets")), "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that DB stats can be returned. func TestDB_Stats(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) stats := db.Stats() - if stats.TxStats.GetPageCount() != 2 { - t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.GetPageCount()) - } else if stats.FreePageN != 0 { - t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN) - } else if stats.PendingPageN != 2 { - t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN) - } + require.Equalf(t, int64(2), stats.TxStats.GetPageCount(), "unexpected TxStats.PageCount: %d", stats.TxStats.GetPageCount()) + require.Equalf(t, 0, stats.FreePageN, "unexpected FreePageN != 0: %d", stats.FreePageN) + require.Equalf(t, 2, stats.PendingPageN, "unexpected PendingPageN != 2: %d", stats.PendingPageN) } // Ensure that database pages are in expected order and type. func TestDB_Consistency(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) for i := 0; i < 10; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } - if err := db.Update(func(tx *bolt.Tx) error { - if p, _ := tx.Page(0); p == nil { - t.Fatal("expected page") - } else if p.Type != "meta" { - t.Fatalf("unexpected page type: %s", p.Type) - } + err = db.Update(func(tx *bolt.Tx) error { + p, _ := tx.Page(0) + require.NotNilf(t, p, "expected page") + require.Equalf(t, "meta", p.Type, "unexpected page type: %s", p.Type) - if p, _ := tx.Page(1); p == nil { - t.Fatal("expected page") - } else if p.Type != "meta" { - t.Fatalf("unexpected page type: %s", p.Type) - } + p, _ = tx.Page(1) + require.NotNilf(t, p, "expected page") + require.Equalf(t, "meta", p.Type, "unexpected page type: %s", p.Type) - if p, _ := tx.Page(2); p == nil { - t.Fatal("expected page") - } else if p.Type != "free" { - t.Fatalf("unexpected page type: %s", p.Type) - } + p, _ = tx.Page(2) + require.NotNilf(t, p, "expected page") + require.Equalf(t, "free", p.Type, "unexpected page type: %s", p.Type) - if p, _ := tx.Page(3); p == nil { - t.Fatal("expected page") - } else if p.Type != "free" { - t.Fatalf("unexpected page type: %s", p.Type) - } + p, _ = tx.Page(3) + require.NotNilf(t, p, "expected page") + require.Equalf(t, "free", p.Type, "unexpected page type: %s", p.Type) - if p, _ := tx.Page(4); p == nil { - t.Fatal("expected page") - } else if p.Type != "leaf" { - t.Fatalf("unexpected page type: %s", p.Type) - } + p, _ = tx.Page(4) + require.NotNilf(t, p, "expected page") + require.Equalf(t, "leaf", p.Type, "unexpected page type: %s", p.Type) - if p, _ := tx.Page(5); p == nil { - t.Fatal("expected page") - } else if p.Type != "freelist" { - t.Fatalf("unexpected page type: %s", p.Type) - } + p, _ = tx.Page(5) + require.NotNilf(t, p, "expected page") + require.Equalf(t, "freelist", p.Type, "unexpected page type: %s", p.Type) - if p, _ := tx.Page(6); p != nil { - t.Fatal("unexpected page") - } + p, _ = tx.Page(6) + require.Nilf(t, p, "unexpected page") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that DB stats can be subtracted from one another. @@ -1162,28 +942,22 @@ func TestDBStats_Sub(t *testing.T) { b.TxStats.PageCount = 10 b.FreePageN = 14 diff := b.Sub(&a) - if diff.TxStats.GetPageCount() != 7 { - t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.GetPageCount()) - } + require.Equalf(t, int64(7), diff.TxStats.GetPageCount(), "unexpected TxStats.PageCount: %d", diff.TxStats.GetPageCount()) // free page stats are copied from the receiver and not subtracted - if diff.FreePageN != 14 { - t.Fatalf("unexpected FreePageN: %d", diff.FreePageN) - } + require.Equalf(t, 14, diff.FreePageN, "unexpected FreePageN: %d", diff.FreePageN) } // Ensure two functions can perform updates in a single batch. func TestDB_Batch(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Iterate over multiple updates in separate goroutines. n := 2 @@ -1198,23 +972,19 @@ func TestDB_Batch(t *testing.T) { // Check all responses to make sure there's no error. for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } + err := <-ch + require.NoError(t, err) } // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } + assert.NotNilf(t, b.Get(u64tob(uint64(i))), "key not found: %d", i) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } func TestDB_Batch_Panic(t *testing.T) { @@ -1249,12 +1019,11 @@ func TestDB_Batch_Panic(t *testing.T) { func TestDB_BatchFull(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) const size = 3 // buffered so we never leak goroutines @@ -1286,33 +1055,28 @@ func TestDB_BatchFull(t *testing.T) { // Check all responses to make sure there's no error. for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } + err := <-ch + require.NoError(t, err) } // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } + assert.NotNilf(t, b.Get(u64tob(uint64(i))), "key not found: %d", i) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } func TestDB_BatchTime(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) const size = 1 // buffered so we never leak goroutines @@ -1332,23 +1096,19 @@ func TestDB_BatchTime(t *testing.T) { // Check all responses to make sure there's no error. for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } + err := <-ch + require.NoError(t, err) } // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } + assert.NotNilf(t, b.Get(u64tob(uint64(i))), "key not found: %d", i) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // TestDBUnmap verifes that `dataref`, `data` and `datasz` must be reset @@ -1518,12 +1278,11 @@ func ExampleDB_Begin() { func BenchmarkDBBatchAutomatic(b *testing.B) { db := btesting.MustCreateDB(b) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("bench")) return err - }); err != nil { - b.Fatal(err) - } + }) + require.NoError(b, err) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1562,12 +1321,11 @@ func BenchmarkDBBatchAutomatic(b *testing.B) { func BenchmarkDBBatchSingle(b *testing.B) { db := btesting.MustCreateDB(b) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("bench")) return err - }); err != nil { - b.Fatal(err) - } + }) + require.NoError(b, err) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1605,12 +1363,11 @@ func BenchmarkDBBatchSingle(b *testing.B) { func BenchmarkDBBatchManual10x100(b *testing.B) { db := btesting.MustCreateDB(b) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("bench")) return err - }); err != nil { - b.Fatal(err) - } + }) + require.NoError(b, err) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1647,9 +1404,7 @@ func BenchmarkDBBatchManual10x100(b *testing.B) { wg.Wait() close(errCh) for err := range errCh { - if err != nil { - b.Fatal(err) - } + require.NoError(b, err) } } diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index c83369f09..1f4ccb3bf 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -96,8 +96,7 @@ func (db *DB) Close() error { // MustClose closes the database but does NOT delete the underlying file. func (db *DB) MustClose() { - err := db.Close() - require.NoError(db.t, err) + require.NoError(db.t, db.Close()) } func (db *DB) MustDeleteFile() { diff --git a/internal/common/page_test.go b/internal/common/page_test.go index 376ab6a6c..21bbd972c 100644 --- a/internal/common/page_test.go +++ b/internal/common/page_test.go @@ -5,25 +5,23 @@ import ( "sort" "testing" "testing/quick" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Ensure that the page type can be returned in human readable format. func TestPage_typ(t *testing.T) { - if typ := (&Page{flags: BranchPageFlag}).Typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&Page{flags: LeafPageFlag}).Typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&Page{flags: MetaPageFlag}).Typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&Page{flags: FreelistPageFlag}).Typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&Page{flags: 20000}).Typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } + typ := (&Page{flags: BranchPageFlag}).Typ() + require.Equalf(t, "branch", typ, "exp=branch; got=%v", typ) + typ = (&Page{flags: LeafPageFlag}).Typ() + require.Equalf(t, "leaf", typ, "exp=leaf; got=%v", typ) + typ = (&Page{flags: MetaPageFlag}).Typ() + require.Equalf(t, "meta", typ, "exp=meta; got=%v", typ) + typ = (&Page{flags: FreelistPageFlag}).Typ() + require.Equalf(t, "freelist", typ, "exp=freelist; got=%v", typ) + typ = (&Page{flags: 20000}).Typ() + require.Equalf(t, "unknown<4e20>", typ, "exp=unknown<4e20>; got=%v", typ) } // Ensure that the hexdump debugging function doesn't blow up. @@ -35,20 +33,16 @@ func TestPgids_merge(t *testing.T) { a := Pgids{4, 5, 6, 10, 11, 12, 13, 27} b := Pgids{1, 3, 8, 9, 25, 30} c := a.Merge(b) - if !reflect.DeepEqual(c, Pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } + assert.Truef(t, reflect.DeepEqual(c, Pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}), "mismatch: %v", c) a = Pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} b = Pgids{8, 9, 25, 30} c = a.Merge(b) - if !reflect.DeepEqual(c, Pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } + assert.Truef(t, reflect.DeepEqual(c, Pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}), "mismatch: %v", c) } func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b Pgids) bool { + err := quick.Check(func(a, b Pgids) bool { // Sort incoming lists. sort.Sort(a) sort.Sort(b) @@ -66,7 +60,6 @@ func TestPgids_merge_quick(t *testing.T) { } return true - }, nil); err != nil { - t.Fatal(err) - } + }, nil) + require.NoError(t, err) } diff --git a/internal/freelist/array_test.go b/internal/freelist/array_test.go index 904123042..aa026479a 100644 --- a/internal/freelist/array_test.go +++ b/internal/freelist/array_test.go @@ -14,43 +14,31 @@ func TestFreelistArray_allocate(t *testing.T) { f := NewArrayFreelist() ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} f.Init(ids) - if id := int(f.Allocate(1, 3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.Allocate(1, 1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.Allocate(1, 3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.Allocate(1, 2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.Allocate(1, 1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.Allocate(1, 0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.Allocate(1, 0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := common.Pgids([]common.Pgid{9, 18}); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) - } + id := int(f.Allocate(1, 3)) + require.Equalf(t, 3, id, "exp=3; got=%v", id) + id = int(f.Allocate(1, 1)) + require.Equalf(t, 6, id, "exp=6; got=%v", id) + id = int(f.Allocate(1, 3)) + require.Equalf(t, 0, id, "exp=0; got=%v", id) + id = int(f.Allocate(1, 2)) + require.Equalf(t, 12, id, "exp=12; got=%v", id) + id = int(f.Allocate(1, 1)) + require.Equalf(t, 7, id, "exp=7; got=%v", id) + id = int(f.Allocate(1, 0)) + require.Equalf(t, 0, id, "exp=0; got=%v", id) + id = int(f.Allocate(1, 0)) + require.Equalf(t, 0, id, "exp=0; got=%v", id) + exp := common.Pgids([]common.Pgid{9, 18}) + require.Truef(t, reflect.DeepEqual(exp, f.freePageIds()), "exp=%v; got=%v", exp, f.freePageIds()) - if id := int(f.Allocate(1, 1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.Allocate(1, 1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.Allocate(1, 1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := common.Pgids([]common.Pgid{}); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) - } + id = int(f.Allocate(1, 1)) + require.Equalf(t, 9, id, "exp=9; got=%v", id) + id = int(f.Allocate(1, 1)) + require.Equalf(t, 18, id, "exp=18; got=%v", id) + id = int(f.Allocate(1, 1)) + require.Equalf(t, 0, id, "exp=0; got=%v", id) + exp = common.Pgids([]common.Pgid{}) + require.Truef(t, reflect.DeepEqual(exp, f.freePageIds()), "exp=%v; got=%v", exp, f.freePageIds()) } func TestInvalidArrayAllocation(t *testing.T) { diff --git a/internal/freelist/freelist_test.go b/internal/freelist/freelist_test.go index 12ac4b6b2..86d2fcb33 100644 --- a/internal/freelist/freelist_test.go +++ b/internal/freelist/freelist_test.go @@ -24,18 +24,15 @@ const TestFreelistType = "TEST_FREELIST_TYPE" func TestFreelist_free(t *testing.T) { f := newTestFreelist() f.Free(100, common.NewPage(12, 0, 0, 0)) - if !reflect.DeepEqual([]common.Pgid{12}, f.pendingPageIds()[100].ids) { - t.Fatalf("exp=%v; got=%v", []common.Pgid{12}, f.pendingPageIds()[100].ids) - } + require.Truef(t, reflect.DeepEqual([]common.Pgid{12}, f.pendingPageIds()[100].ids), "exp=%v; got=%v", []common.Pgid{12}, f.pendingPageIds()[100].ids) } // Ensure that a page and its overflow is added to a transaction's freelist. func TestFreelist_free_overflow(t *testing.T) { f := newTestFreelist() f.Free(100, common.NewPage(12, 0, 0, 3)) - if exp := []common.Pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pendingPageIds()[100].ids) { - t.Fatalf("exp=%v; got=%v", exp, f.pendingPageIds()[100].ids) - } + exp := []common.Pgid{12, 13, 14, 15} + require.Truef(t, reflect.DeepEqual(exp, f.pendingPageIds()[100].ids), "exp=%v; got=%v", exp, f.pendingPageIds()[100].ids) } // Ensure that double freeing a page is causing a panic @@ -76,15 +73,13 @@ func TestFreelist_free_freelist_alloctx(t *testing.T) { f.Free(101, common.NewPage(12, common.FreelistPageFlag, 0, 0)) require.True(t, f.Freed(12)) - if exp := []common.Pgid{12}; !reflect.DeepEqual(exp, f.pendingPageIds()[101].ids) { - t.Fatalf("exp=%v; got=%v", exp, f.pendingPageIds()[101].ids) - } + exp := []common.Pgid{12} + require.Truef(t, reflect.DeepEqual(exp, f.pendingPageIds()[101].ids), "exp=%v; got=%v", exp, f.pendingPageIds()[101].ids) f.ReleasePendingPages() require.True(t, f.Freed(12)) require.Empty(t, f.pendingPageIds()) - if exp := common.Pgids([]common.Pgid{12}); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) - } + exp2 := common.Pgids([]common.Pgid{12}) + require.Truef(t, reflect.DeepEqual(exp2, f.freePageIds()), "exp=%v; got=%v", exp2, f.freePageIds()) } // Ensure that a transaction's free pages can be released. @@ -95,14 +90,12 @@ func TestFreelist_release(t *testing.T) { f.Free(102, common.NewPage(39, 0, 0, 0)) f.release(100) f.release(101) - if exp := common.Pgids([]common.Pgid{9, 12, 13}); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) - } + exp := common.Pgids([]common.Pgid{9, 12, 13}) + require.Truef(t, reflect.DeepEqual(exp, f.freePageIds()), "exp=%v; got=%v", exp, f.freePageIds()) f.release(102) - if exp := common.Pgids([]common.Pgid{9, 12, 13, 39}); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) - } + exp = common.Pgids([]common.Pgid{9, 12, 13, 39}) + require.Truef(t, reflect.DeepEqual(exp, f.freePageIds()), "exp=%v; got=%v", exp, f.freePageIds()) } // Ensure that releaseRange handles boundary conditions correctly @@ -315,9 +308,8 @@ func TestFreelist_read(t *testing.T) { f.Read(page) // Ensure that there are two page ids in the freelist. - if exp := common.Pgids([]common.Pgid{23, 50}); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) - } + exp := common.Pgids([]common.Pgid{23, 50}) + require.Truef(t, reflect.DeepEqual(exp, f.freePageIds()), "exp=%v; got=%v", exp, f.freePageIds()) } // Ensure that we never read a non-freelist page @@ -350,9 +342,8 @@ func TestFreelist_write(t *testing.T) { // Ensure that the freelist is correct. // All pages should be present and in reverse order. - if exp := common.Pgids([]common.Pgid{3, 11, 12, 28, 39}); !reflect.DeepEqual(exp, f2.freePageIds()) { - t.Fatalf("exp=%v; got=%v", exp, f2.freePageIds()) - } + exp := common.Pgids([]common.Pgid{3, 11, 12, 28, 39}) + require.Truef(t, reflect.DeepEqual(exp, f2.freePageIds()), "exp=%v; got=%v", exp, f2.freePageIds()) } func TestFreelist_E2E_HappyPath(t *testing.T) { @@ -382,7 +373,7 @@ func TestFreelist_E2E_HappyPath(t *testing.T) { expectedPgids := map[common.Pgid]struct{}{3: {}, 5: {}, 8: {}} for i := 0; i < 3; i++ { allocated = f.Allocate(common.Txid(4), 1) - require.Contains(t, expectedPgids, allocated, "expected to find pgid %d", allocated) + require.Containsf(t, expectedPgids, allocated, "expected to find pgid %d", allocated) require.False(t, f.Freed(allocated)) delete(expectedPgids, allocated) } @@ -598,17 +589,15 @@ func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) { f.Init(exp) - if got := f.freePageIds(); !reflect.DeepEqual(exp, got) { - t.Fatalf("exp=%v; got=%v", exp, got) - } + got := f.freePageIds() + require.Truef(t, reflect.DeepEqual(exp, got), "exp=%v; got=%v", exp, got) f2 := newTestFreelist() exp2 := []common.Pgid{} f2.Init(exp2) - if got2 := f2.freePageIds(); !reflect.DeepEqual(got2, common.Pgids(exp2)) { - t.Fatalf("exp2=%#v; got2=%#v", exp2, got2) - } + got2 := f2.freePageIds() + require.Truef(t, reflect.DeepEqual(got2, common.Pgids(exp2)), "exp2=%#v; got2=%#v", exp2, got2) } diff --git a/internal/freelist/hashmap_test.go b/internal/freelist/hashmap_test.go index de1954abb..8243fe4c9 100644 --- a/internal/freelist/hashmap_test.go +++ b/internal/freelist/hashmap_test.go @@ -26,23 +26,19 @@ func TestFreelistHashmap_allocate(t *testing.T) { f.Init(ids) f.Allocate(1, 3) - if x := f.FreeCount(); x != 6 { - t.Fatalf("exp=6; got=%v", x) - } + x := f.FreeCount() + require.Equalf(t, 6, x, "exp=6; got=%v", x) f.Allocate(1, 2) - if x := f.FreeCount(); x != 4 { - t.Fatalf("exp=4; got=%v", x) - } + x = f.FreeCount() + require.Equalf(t, 4, x, "exp=4; got=%v", x) f.Allocate(1, 1) - if x := f.FreeCount(); x != 3 { - t.Fatalf("exp=3; got=%v", x) - } + x = f.FreeCount() + require.Equalf(t, 3, x, "exp=3; got=%v", x) f.Allocate(1, 0) - if x := f.FreeCount(); x != 3 { - t.Fatalf("exp=3; got=%v", x) - } + x = f.FreeCount() + require.Equalf(t, 3, x, "exp=3; got=%v", x) } func TestFreelistHashmap_mergeWithExist(t *testing.T) { @@ -101,18 +97,10 @@ func TestFreelistHashmap_mergeWithExist(t *testing.T) { f.mergeWithExistingSpan(tt.pgid) - if got := f.freePageIds(); !reflect.DeepEqual(tt.want, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.want, got) - } - if got := f.forwardMap; !reflect.DeepEqual(tt.wantForwardmap, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantForwardmap, got) - } - if got := f.backwardMap; !reflect.DeepEqual(tt.wantBackwardmap, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantBackwardmap, got) - } - if got := f.freemaps; !reflect.DeepEqual(tt.wantfreemap, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantfreemap, got) - } + require.Truef(t, reflect.DeepEqual(tt.want, f.freePageIds()), "name %s; exp=%v; got=%v", tt.name, tt.want, f.freePageIds()) + require.Truef(t, reflect.DeepEqual(tt.wantForwardmap, f.forwardMap), "name %s; exp=%v; got=%v", tt.name, tt.wantForwardmap, f.forwardMap) + require.Truef(t, reflect.DeepEqual(tt.wantBackwardmap, f.backwardMap), "name %s; exp=%v; got=%v", tt.name, tt.wantBackwardmap, f.backwardMap) + require.Truef(t, reflect.DeepEqual(tt.wantfreemap, f.freemaps), "name %s; exp=%v; got=%v", tt.name, tt.wantfreemap, f.freemaps) } } @@ -133,9 +121,7 @@ func TestFreelistHashmap_GetFreePageIDs(t *testing.T) { f.forwardMap = fm res := f.freePageIds() - if !sort.SliceIsSorted(res, func(i, j int) bool { return res[i] < res[j] }) { - t.Fatalf("pgids not sorted") - } + require.Truef(t, sort.SliceIsSorted(res, func(i, j int) bool { return res[i] < res[j] }), "pgids not sorted") } func Test_Freelist_Hashmap_Rollback(t *testing.T) { diff --git a/internal/surgeon/surgeon_test.go b/internal/surgeon/surgeon_test.go index 3d50988cd..8fd9553b9 100644 --- a/internal/surgeon/surgeon_test.go +++ b/internal/surgeon/surgeon_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" @@ -41,7 +42,7 @@ func TestRevertMetaPage(t *testing.T) { db.Close() // This causes the whole tree to be linked to the previous state - assert.NoError(t, surgeon.RevertMetaPage(db.Path())) + require.NoError(t, surgeon.RevertMetaPage(db.Path())) db.MustReopen() db.MustCheck() diff --git a/internal/surgeon/xray_test.go b/internal/surgeon/xray_test.go index 5f38d96ee..617b47dbe 100644 --- a/internal/surgeon/xray_test.go +++ b/internal/surgeon/xray_test.go @@ -24,12 +24,12 @@ func TestFindPathsToKey(t *testing.T) { navigator := surgeon.NewXRay(db.Path()) path1, err := navigator.FindPathsToKey([]byte("0451")) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, path1) page := path1[0][len(path1[0])-1] p, _, err := guts_cli.ReadPage(db.Path(), uint64(page)) - assert.NoError(t, err) + require.NoError(t, err) assert.GreaterOrEqual(t, []byte("0451"), p.LeafPageElement(0).Key()) assert.LessOrEqual(t, []byte("0451"), p.LeafPageElement(p.Count()-1).Key()) } @@ -55,12 +55,12 @@ func TestFindPathsToKey_Bucket(t *testing.T) { navigator := surgeon.NewXRay(db.Path()) path1, err := navigator.FindPathsToKey(subBucket) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, path1) page := path1[0][len(path1[0])-1] p, _, err := guts_cli.ReadPage(db.Path(), uint64(page)) - assert.NoError(t, err) + require.NoError(t, err) assert.GreaterOrEqual(t, subBucket, p.LeafPageElement(0).Key()) assert.LessOrEqual(t, subBucket, p.LeafPageElement(p.Count()-1).Key()) } diff --git a/internal/tests/tx_check_test.go b/internal/tests/tx_check_test.go index 7a4ab866d..7325cfda3 100644 --- a/internal/tests/tx_check_test.go +++ b/internal/tests/tx_check_test.go @@ -25,12 +25,12 @@ func TestTx_RecursivelyCheckPages_MisplacedPage(t *testing.T) { xRay := surgeon.NewXRay(db.Path()) path1, err := xRay.FindPathsToKey([]byte("0451")) - require.NoError(t, err, "cannot find page that contains key:'0451'") - require.Len(t, path1, 1, "Expected only one page that contains key:'0451'") + require.NoErrorf(t, err, "cannot find page that contains key:'0451'") + require.Lenf(t, path1, 1, "Expected only one page that contains key:'0451'") path2, err := xRay.FindPathsToKey([]byte("7563")) - require.NoError(t, err, "cannot find page that contains key:'7563'") - require.Len(t, path2, 1, "Expected only one page that contains key:'7563'") + require.NoErrorf(t, err, "cannot find page that contains key:'7563'") + require.Lenf(t, path2, 1, "Expected only one page that contains key:'7563'") srcPage := path1[0][len(path1[0])-1] targetPage := path2[0][len(path2[0])-1] @@ -64,13 +64,13 @@ func TestTx_RecursivelyCheckPages_CorruptedLeaf(t *testing.T) { xray := surgeon.NewXRay(db.Path()) path1, err := xray.FindPathsToKey([]byte("0451")) - require.NoError(t, err, "cannot find page that contains key:'0451'") - require.Len(t, path1, 1, "Expected only one page that contains key:'0451'") + require.NoErrorf(t, err, "cannot find page that contains key:'0451'") + require.Lenf(t, path1, 1, "Expected only one page that contains key:'0451'") srcPage := path1[0][len(path1[0])-1] p, pbuf, err := guts_cli.ReadPage(db.Path(), uint64(srcPage)) require.NoError(t, err) - require.Positive(t, p.Count(), "page must be not empty") + require.Positivef(t, p.Count(), "page must be not empty") p.LeafPageElement(p.Count() / 2).Key()[0] = 'z' require.NoError(t, guts_cli.WritePage(db.Path(), pbuf)) diff --git a/manydbs_test.go b/manydbs_test.go index 595c81b28..0186f374e 100644 --- a/manydbs_test.go +++ b/manydbs_test.go @@ -6,21 +6,19 @@ import ( "os" "path/filepath" "testing" + + "github.com/stretchr/testify/require" ) func createDb(t *testing.T) (*DB, func()) { // First, create a temporary directory to be used for the duration of // this test. tempDirName, err := os.MkdirTemp("", "bboltmemtest") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } + require.NoErrorf(t, err, "error creating temp dir: %v", err) path := filepath.Join(tempDirName, "testdb.db") bdb, err := Open(path, 0600, nil) - if err != nil { - t.Fatalf("error creating bbolt db: %v", err) - } + require.NoErrorf(t, err, "error creating bbolt db: %v", err) cleanup := func() { bdb.Close() @@ -56,9 +54,7 @@ func createAndPutKeys(t *testing.T) { return nil }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } } diff --git a/movebucket_test.go b/movebucket_test.go index a04e24c9c..0da82a89c 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -364,13 +364,13 @@ func openBucket(tx *bbolt.Tx, bk *bbolt.Bucket, bucketToOpen string) *bbolt.Buck func createBucketAndPopulateData(t testing.TB, tx *bbolt.Tx, bk *bbolt.Bucket, bucketName string) *bbolt.Bucket { if bk == nil { newBucket, err := tx.CreateBucket([]byte(bucketName)) - require.NoError(t, err, "failed to create bucket %s", bucketName) + require.NoErrorf(t, err, "failed to create bucket %s", bucketName) populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) return newBucket } newBucket, err := bk.CreateBucket([]byte(bucketName)) - require.NoError(t, err, "failed to create bucket %s", bucketName) + require.NoErrorf(t, err, "failed to create bucket %s", bucketName) populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) return newBucket } diff --git a/node_test.go b/node_test.go index ce36bf069..79e0e18b4 100644 --- a/node_test.go +++ b/node_test.go @@ -4,6 +4,8 @@ import ( "testing" "unsafe" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt/internal/common" ) @@ -17,9 +19,7 @@ func TestNode_put(t *testing.T) { n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, common.LeafPageFlag) - if len(n.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n.inodes)) - } + require.Lenf(t, n.inodes, 3, "exp=3; got=%d", len(n.inodes)) if k, v := n.inodes[0].Key(), n.inodes[0].Value(); string(k) != "bar" || string(v) != "1" { t.Fatalf("exp=; got=<%s,%s>", k, v) } @@ -29,9 +29,7 @@ func TestNode_put(t *testing.T) { if k, v := n.inodes[2].Key(), n.inodes[2].Value(); string(k) != "foo" || string(v) != "3" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if n.inodes[2].Flags() != uint32(common.LeafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].Flags()) - } + require.Equalf(t, n.inodes[2].Flags(), uint32(common.LeafPageFlag), "not a leaf: %d", n.inodes[2].Flags()) } // Ensure that a node can deserialize from a leaf page. @@ -58,12 +56,8 @@ func TestNode_read_LeafPage(t *testing.T) { n.read(page) // Check that there are two inodes with correct data. - if !n.isLeaf { - t.Fatal("expected leaf") - } - if len(n.inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(n.inodes)) - } + require.Truef(t, n.isLeaf, "expected leaf") + require.Lenf(t, n.inodes, 2, "exp=2; got=%d", len(n.inodes)) if k, v := n.inodes[0].Key(), n.inodes[0].Value(); string(k) != "bar" || string(v) != "fooz" { t.Fatalf("exp=; got=<%s,%s>", k, v) } @@ -92,9 +86,7 @@ func TestNode_write_LeafPage(t *testing.T) { n2.read(p) // Check that the two pages are the same. - if len(n2.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n2.inodes)) - } + require.Lenf(t, n2.inodes, 3, "exp=3; got=%d", len(n2.inodes)) if k, v := n2.inodes[0].Key(), n2.inodes[0].Value(); string(k) != "john" || string(v) != "johnson" { t.Fatalf("exp=; got=<%s,%s>", k, v) } @@ -122,15 +114,9 @@ func TestNode_split(t *testing.T) { n.split(100) var parent = n.parent - if len(parent.children) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children)) - } - if len(parent.children[0].inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) - } - if len(parent.children[1].inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) - } + require.Lenf(t, parent.children, 2, "exp=2; got=%d", len(parent.children)) + require.Lenf(t, parent.children[0].inodes, 2, "exp=2; got=%d", len(parent.children[0].inodes)) + require.Lenf(t, parent.children[1].inodes, 3, "exp=3; got=%d", len(parent.children[1].inodes)) } // Ensure that a page with the minimum number of inodes just returns a single node. @@ -144,9 +130,7 @@ func TestNode_split_MinKeys(t *testing.T) { // Split. n.split(20) - if n.parent != nil { - t.Fatalf("expected nil parent") - } + require.Nilf(t, n.parent, "expected nil parent") } // Ensure that a node that has keys that all fit on a page just returns one leaf. @@ -163,7 +147,5 @@ func TestNode_split_SinglePage(t *testing.T) { // Split. n.split(4096) - if n.parent != nil { - t.Fatalf("expected nil parent") - } + require.Nilf(t, n.parent, "expected nil parent") } diff --git a/simulation_test.go b/simulation_test.go index 6f4d5b236..335114811 100644 --- a/simulation_test.go +++ b/simulation_test.go @@ -8,6 +8,8 @@ import ( "sync/atomic" "testing" + "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" ) @@ -138,9 +140,7 @@ func testSimulate(t *testing.T, openOption *bolt.Options, round, threadCount, pa t.Logf("transactions:%d ignored:%d", opCount, igCount) close(errCh) for err := range errCh { - if err != nil { - t.Fatalf("error from inside goroutine: %v", err) - } + require.NoErrorf(t, err, "error from inside goroutine: %v", err) } db.MustClose() diff --git a/tests/dmflakey/dmflakey_test.go b/tests/dmflakey/dmflakey_test.go index 99e2de062..fc749599e 100644 --- a/tests/dmflakey/dmflakey_test.go +++ b/tests/dmflakey/dmflakey_test.go @@ -3,7 +3,6 @@ package dmflakey import ( - "errors" "flag" "fmt" "os" @@ -31,7 +30,7 @@ func TestBasic(t *testing.T) { tmpDir := t.TempDir() flakey, err := InitFlakey("go-dmflakey", tmpDir, fsType, "") - require.NoError(t, err, "init flakey") + require.NoErrorf(t, err, "init flakey") defer func() { assert.NoError(t, flakey.Teardown()) }() @@ -79,11 +78,11 @@ func TestDropWritesExt4(t *testing.T) { require.NoError(t, mount(root, flakey.DevicePath(), "")) data, err := os.ReadFile(f1) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "hello, world from f1", string(data)) _, err = os.ReadFile(f2) - assert.True(t, errors.Is(err, os.ErrNotExist)) + assert.ErrorIs(t, err, os.ErrNotExist) } func TestErrorWritesExt4(t *testing.T) { @@ -97,10 +96,10 @@ func TestErrorWritesExt4(t *testing.T) { f1 := filepath.Join(root, "f1") err := writeFile(f1, []byte("hello, world during failpoint"), 0600, true) - assert.ErrorContains(t, err, "input/output error") + require.ErrorContains(t, err, "input/output error") // resume - assert.NoError(t, flakey.AllowWrites()) + require.NoError(t, flakey.AllowWrites()) err = writeFile(f1, []byte("hello, world"), 0600, true) assert.NoError(t, err) @@ -108,7 +107,7 @@ func TestErrorWritesExt4(t *testing.T) { require.NoError(t, mount(root, flakey.DevicePath(), "")) data, err := os.ReadFile(f1) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "hello, world", string(data)) } @@ -119,7 +118,7 @@ func initFlakey(t *testing.T, fsType FSType) (_ Flakey, root string) { require.NoError(t, os.MkdirAll(target, 0600)) flakey, err := InitFlakey("go-dmflakey", tmpDir, fsType, "") - require.NoError(t, err, "init flakey") + require.NoErrorf(t, err, "init flakey") t.Cleanup(func() { assert.NoError(t, unmount(target)) diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index e12566d3e..0d1321fdb 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -49,8 +49,7 @@ func TestFailpoint_UnmapFail_DbClose(t *testing.T) { db, err := bolt.Open(f, 0600, &bolt.Options{Timeout: 30 * time.Second}) require.NoError(t, err) - err = db.Close() - require.NoError(t, err) + require.NoError(t, db.Close()) } func TestFailpoint_mLockFail(t *testing.T) { @@ -136,13 +135,9 @@ func TestFailpoint_LackOfDiskSpace(t *testing.T) { tx, err := db.Begin(true) require.NoError(t, err) - err = tx.Commit() - require.Error(t, err) - require.ErrorContains(t, err, "grow somehow failed") + require.ErrorContains(t, tx.Commit(), "grow somehow failed") - err = tx.Rollback() - require.Error(t, err) - require.ErrorIs(t, err, errors.ErrTxClosed) + require.ErrorIs(t, tx.Rollback(), errors.ErrTxClosed) // It should work after disabling the failpoint. err = gofail.Disable("lackOfDiskSpace") @@ -151,12 +146,9 @@ func TestFailpoint_LackOfDiskSpace(t *testing.T) { tx, err = db.Begin(true) require.NoError(t, err) - err = tx.Commit() - require.NoError(t, err) + require.NoError(t, tx.Commit()) - err = tx.Rollback() - require.Error(t, err) - require.ErrorIs(t, err, errors.ErrTxClosed) + require.ErrorIs(t, tx.Rollback(), errors.ErrTxClosed) } // TestIssue72 reproduces issue 72. @@ -324,7 +316,7 @@ func TestTx_Rollback_Freelist(t *testing.T) { beforeFreelistPgids, err := readFreelistPageIds(db.Path()) require.NoError(t, err) - require.Greater(t, len(beforeFreelistPgids), 0) + require.NotEmpty(t, beforeFreelistPgids) t.Log("Simulate TXN rollback") err = db.Update(func(tx *bolt.Tx) error { diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index d8c497e0a..4faabd9bb 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -167,7 +167,7 @@ func doPowerFailure(t *testing.T, du time.Duration, fsType dmflakey.FSType, mkfs cmd.Stderr = logFd cmd.Env = append(cmd.Env, "GOFAIL_HTTP="+fpURL) t.Logf("start %s", strings.Join(args, " ")) - require.NoError(t, cmd.Start(), "args: %v", args) + require.NoErrorf(t, cmd.Start(), "args: %v", args) errCh := make(chan error, 1) go func() { @@ -192,13 +192,13 @@ func doPowerFailure(t *testing.T, du time.Duration, fsType dmflakey.FSType, mkfs activeFailpoint(t, fpURL, targetFp, "panic") } else { t.Log("kill bbolt") - assert.NoError(t, cmd.Process.Kill()) + require.NoError(t, cmd.Process.Kill()) } select { case <-time.After(10 * time.Second): t.Log("bbolt is supposed to be already stopped, but actually not yet; forcibly kill it") - assert.NoError(t, cmd.Process.Kill()) + require.NoError(t, cmd.Process.Kill()) case err := <-errCh: require.Error(t, err) } @@ -210,13 +210,13 @@ func doPowerFailure(t *testing.T, du time.Duration, fsType dmflakey.FSType, mkfs t.Logf("verify data") output, err := exec.Command("bbolt", "check", dbPath).CombinedOutput() - require.NoError(t, err, "bbolt check output: %s", string(output)) + require.NoErrorf(t, err, "bbolt check output: %s", string(output)) } // activeFailpoint actives the failpoint by http. func activeFailpoint(t *testing.T, targetUrl string, fpName, fpVal string) { u, err := url.JoinPath(targetUrl, fpName) - require.NoError(t, err, "parse url %s", targetUrl) + require.NoErrorf(t, err, "parse url %s", targetUrl) req, err := http.NewRequest("PUT", u, bytes.NewBuffer([]byte(fpVal))) require.NoError(t, err) @@ -227,7 +227,7 @@ func activeFailpoint(t *testing.T, targetUrl string, fpName, fpVal string) { data, err := io.ReadAll(resp.Body) require.NoError(t, err) - require.Equal(t, 204, resp.StatusCode, "response body: %s", string(data)) + require.Equalf(t, 204, resp.StatusCode, "response body: %s", string(data)) } // FlakeyDevice extends dmflakey.Flakey interface. @@ -246,14 +246,13 @@ func initFlakeyDevice(t *testing.T, name string, fsType dmflakey.FSType, mkfsOpt imgDir := t.TempDir() flakey, err := dmflakey.InitFlakey(name, imgDir, fsType, mkfsOpt) - require.NoError(t, err, "init flakey %s", name) + require.NoErrorf(t, err, "init flakey %s", name) t.Cleanup(func() { assert.NoError(t, flakey.Teardown()) }) rootDir := t.TempDir() - err = unix.Mount(flakey.DevicePath(), rootDir, string(fsType), 0, mntOpt) - require.NoError(t, err, "init rootfs on %s", rootDir) + require.NoErrorf(t, unix.Mount(flakey.DevicePath(), rootDir, string(fsType), 0, mntOpt), "init rootfs on %s", rootDir) t.Cleanup(func() { assert.NoError(t, unmountAll(rootDir)) }) @@ -321,6 +320,6 @@ func unmountAll(target string) error { func randomInt(t *testing.T, max int) int { n, err := rand.Int(rand.Reader, big.NewInt(int64(max))) - assert.NoError(t, err) + require.NoError(t, err) return int(n.Int64()) } diff --git a/tx_check_test.go b/tx_check_test.go index a0ce69a29..13732ac51 100644 --- a/tx_check_test.go +++ b/tx_check_test.go @@ -39,7 +39,7 @@ func TestTx_Check_CorruptPage(t *testing.T) { for cErr := range errChan { cErrs = append(cErrs, cErr) } - require.Greater(t, len(cErrs), 0) + require.NotEmpty(t, cErrs) t.Log("Check valid pages.") cErrs = cErrs[:0] @@ -48,7 +48,7 @@ func TestTx_Check_CorruptPage(t *testing.T) { for cErr := range errChan { cErrs = append(cErrs, cErr) } - require.Equal(t, 0, len(cErrs)) + require.Empty(t, cErrs) } return nil }) @@ -108,7 +108,7 @@ func TestTx_Check_WithNestBucket(t *testing.T) { for cErr := range errChan { cErrs = append(cErrs, cErr) } - require.Equal(t, 0, len(cErrs)) + require.Empty(t, cErrs) return nil }) @@ -139,7 +139,7 @@ func corruptRandomLeafPageInBucket(t testing.TB, db *bbolt.DB, bucketName []byte victimPage, victimBuf, err := guts_cli.ReadPage(db.Path(), uint64(victimPageId)) require.NoError(t, err) require.True(t, victimPage.IsLeafPage()) - require.True(t, victimPage.Count() > 1) + require.Greater(t, victimPage.Count(), uint16(1)) // intentionally make the second key < the first key. element := victimPage.LeafPageElement(1) diff --git a/tx_test.go b/tx_test.go index cc59804b2..bd1c2b1ba 100644 --- a/tx_test.go +++ b/tx_test.go @@ -21,32 +21,21 @@ import ( // TestTx_Check_ReadOnly tests consistency checking on a ReadOnly database. func TestTx_Check_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) + require.NoError(t, db.Close()) readOnlyDB, err := bolt.Open(db.Path(), 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer readOnlyDB.Close() tx, err := readOnlyDB.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // ReadOnly DB will load freelist on Check call. numChecks := 2 errc := make(chan error, numChecks) @@ -58,36 +47,25 @@ func TestTx_Check_ReadOnly(t *testing.T) { go check() } for i := 0; i < numChecks; i++ { - if err := <-errc; err != nil { - t.Fatal(err) - } + err := <-errc + require.NoError(t, err) } // Close the view transaction - err = tx.Rollback() - if err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) } // Ensure that committing a closed transaction returns an error. func TestTx_Commit_ErrTxClosed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if _, err := tx.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } + _, err = tx.CreateBucket([]byte("foo")) + require.NoError(t, err) - if err := tx.Commit(); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Commit()) - if err := tx.Commit(); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, tx.Commit(), berrors.ErrTxClosed, "unexpected error") } // Ensure that rolling back a closed transaction returns an error. @@ -95,137 +73,96 @@ func TestTx_Rollback_ErrTxClosed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if err := tx.Rollback(); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, tx.Rollback()) + require.Equalf(t, tx.Rollback(), berrors.ErrTxClosed, "unexpected error") } // Ensure that committing a read-only transaction returns an error. func TestTx_Commit_ErrTxNotWritable(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != berrors.ErrTxNotWritable { - t.Fatal(err) - } + require.NoError(t, err) + require.Equal(t, tx.Commit(), berrors.ErrTxNotWritable) // Close the view transaction - err = tx.Rollback() - if err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Rollback()) } // Ensure that a transaction can retrieve a cursor on the root bucket. func TestTx_Cursor(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) - if _, err := tx.CreateBucket([]byte("woojits")); err != nil { - t.Fatal(err) - } + _, err = tx.CreateBucket([]byte("woojits")) + require.NoError(t, err) c := tx.Cursor() - if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } + k, v := c.First() + require.Truef(t, bytes.Equal(k, []byte("widgets")), "unexpected key: %v", k) + require.Nilf(t, v, "unexpected value: %v", v) - if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } + k, v = c.Next() + require.Truef(t, bytes.Equal(k, []byte("woojits")), "unexpected key: %v", k) + require.Nilf(t, v, "unexpected value: %v", v) - if k, v := c.Next(); k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", k) - } + k, v = c.Next() + require.Nilf(t, k, "unexpected key: %v", k) + require.Nilf(t, v, "unexpected value: %v", k) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that creating a bucket with a read-only transaction returns an error. func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.View(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("foo")) - if err != berrors.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } + require.Equalf(t, err, berrors.ErrTxNotWritable, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that creating a bucket on a closed transaction returns an error. func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, tx.Commit()) - if _, err := tx.CreateBucket([]byte("foo")); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + _, err = tx.CreateBucket([]byte("foo")) + require.Equalf(t, err, berrors.ErrTxClosed, "unexpected error: %s", err) } // Ensure that a Tx can retrieve a bucket. func TestTx_Bucket(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) + require.NotNilf(t, tx.Bucket([]byte("widgets")), "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a Tx retrieving a non-existent key returns nil. func TestTx_Get_NotFound(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if b.Get([]byte("no_such_key")) != nil { - t.Fatal("expected nil value") - } + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.Nilf(t, b.Get([]byte("no_such_key")), "expected nil value") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can be created and retrieved. @@ -233,79 +170,61 @@ func TestTx_CreateBucket(t *testing.T) { db := btesting.MustCreateDB(t) // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } + require.NoError(t, err) + require.NotNilf(t, b, "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Read the bucket through a separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } + err = db.View(func(tx *bolt.Tx) error { + require.NotNilf(t, tx.Bucket([]byte("widgets")), "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can be created if it doesn't already exist. func TestTx_CreateBucketIfNotExists(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { // Create bucket. - if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + require.NoError(t, err) + require.NotNilf(t, b, "expected bucket") // Create bucket again. - if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } + b, err = tx.CreateBucketIfNotExists([]byte("widgets")) + require.NoError(t, err) + require.NotNilf(t, b, "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Read the bucket through a separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } + err = db.View(func(tx *bolt.Tx) error { + require.NotNilf(t, tx.Bucket([]byte("widgets")), "expected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure transaction returns an error if creating an unnamed bucket. func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists([]byte{}); err != berrors.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists([]byte{}) + require.Equalf(t, err, berrors.ErrBucketNameRequired, "unexpected error: %s", err) - if _, err := tx.CreateBucketIfNotExists(nil); err != berrors.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } + _, err = tx.CreateBucketIfNotExists(nil) + require.Equalf(t, err, berrors.ErrBucketNameRequired, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket cannot be created twice. @@ -313,37 +232,31 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { db := btesting.MustCreateDB(t) // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Create the same bucket again. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != berrors.ErrBucketExists { - t.Fatalf("unexpected error: %s", err) - } + err = db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + require.Equalf(t, err, berrors.ErrBucketExists, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket is created with a non-blank name. func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket(nil); err != berrors.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket(nil) + require.Equalf(t, err, berrors.ErrBucketNameRequired, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that a bucket can be deleted. @@ -351,134 +264,95 @@ func TestTx_DeleteBucket(t *testing.T) { db := btesting.MustCreateDB(t) // Create a bucket and add a value. - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) // Delete the bucket and make sure we can't get the value. - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - if tx.Bucket([]byte("widgets")) != nil { - t.Fatal("unexpected bucket") - } + err = db.Update(func(tx *bolt.Tx) error { + require.NoError(t, tx.DeleteBucket([]byte("widgets"))) + require.Nilf(t, tx.Bucket([]byte("widgets")), "unexpected bucket") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.Update(func(tx *bolt.Tx) error { + err = db.Update(func(tx *bolt.Tx) error { // Create the bucket again and make sure there's not a phantom value. b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("unexpected phantom value: %v", v) - } + require.NoError(t, err) + require.Nilf(t, b.Get([]byte("foo")), "unexpected phantom value") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that deleting a bucket on a closed transaction returns an error. func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { db := btesting.MustCreateDB(t) tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - if err := tx.DeleteBucket([]byte("foo")); err != berrors.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } + require.NoError(t, err) + require.NoError(t, tx.Commit()) + require.Equalf(t, tx.DeleteBucket([]byte("foo")), berrors.ErrTxClosed, "unexpected error") } // Ensure that deleting a bucket with a read-only transaction returns an error. func TestTx_DeleteBucket_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("foo")); err != berrors.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } + err := db.View(func(tx *bolt.Tx) error { + require.Equalf(t, tx.DeleteBucket([]byte("foo")), berrors.ErrTxNotWritable, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that nothing happens when deleting a bucket that doesn't exist. func TestTx_DeleteBucket_NotFound(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != berrors.ErrBucketNotFound { - t.Fatalf("unexpected error: %s", err) - } + err := db.Update(func(tx *bolt.Tx) error { + require.Equalf(t, tx.DeleteBucket([]byte("widgets")), berrors.ErrBucketNotFound, "unexpected error") return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that no error is returned when a tx.ForEach function does not return // an error. func TestTx_ForEach_NoError(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + err = tx.ForEach(func(name []byte, b *bolt.Bucket) error { return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that an error is returned when a tx.ForEach function returns an error. func TestTx_ForEach_WithError(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) marker := errors.New("marker") - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + err = tx.ForEach(func(name []byte, b *bolt.Bucket) error { return marker - }); err != marker { - t.Fatalf("unexpected error: %s", err) - } + }) + require.Equalf(t, err, marker, "unexpected error: %s", err) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Ensure that Tx commit handlers are called after a transaction successfully commits. @@ -486,18 +360,15 @@ func TestTx_OnCommit(t *testing.T) { db := btesting.MustCreateDB(t) var x int - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { tx.OnCommit(func() { x += 1 }) tx.OnCommit(func() { x += 2 }) - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return nil - }); err != nil { - t.Fatal(err) - } else if x != 3 { - t.Fatalf("unexpected x: %d", x) - } + }) + require.NoError(t, err) + require.Equalf(t, 3, x, "unexpected x: %d", x) } // Ensure that Tx commit handlers are NOT called after a transaction rolls back. @@ -505,18 +376,15 @@ func TestTx_OnCommit_Rollback(t *testing.T) { db := btesting.MustCreateDB(t) var x int - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { tx.OnCommit(func() { x += 1 }) tx.OnCommit(func() { x += 2 }) - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } + _, err := tx.CreateBucket([]byte("widgets")) + require.NoError(t, err) return errors.New("rollback this commit") - }); err == nil || err.Error() != "rollback this commit" { - t.Fatalf("unexpected error: %s", err) - } else if x != 0 { - t.Fatalf("unexpected x: %d", x) - } + }) + require.EqualErrorf(t, err, "rollback this commit", "unexpected error: %s", err) + require.Equalf(t, 0, x, "unexpected x: %d", x) } // Ensure that the database can be copied to a file path. @@ -524,48 +392,32 @@ func TestTx_CopyFile(t *testing.T) { db := btesting.MustCreateDB(t) path := tempfile() - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.NoError(t, b.Put([]byte("baz"), []byte("bat"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) db2, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := db2.View(func(tx *bolt.Tx) error { - if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { - t.Fatalf("unexpected value: %v", v) - } - if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } + err = db2.View(func(tx *bolt.Tx) error { + v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + require.Truef(t, bytes.Equal(v, []byte("bar")), "unexpected value: %v", v) + v = tx.Bucket([]byte("widgets")).Get([]byte("baz")) + require.Truef(t, bytes.Equal(v, []byte("bat")), "unexpected value: %v", v) return nil - }); err != nil { - t.Fatal(err) - } - - if err := db2.Close(); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) + require.NoError(t, db2.Close()) } type failWriterError struct{} @@ -592,53 +444,37 @@ func (f *failWriter) Write(p []byte) (n int, err error) { // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Meta(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.NoError(t, b.Put([]byte("baz"), []byte("bat"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) - }); err == nil || err.Error() != "meta 0 copy: error injected for tests" { - t.Fatalf("unexpected error: %v", err) - } + }) + require.EqualErrorf(t, err, "meta 0 copy: error injected for tests", "unexpected error: %v", err) } // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Normal(t *testing.T) { db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, b.Put([]byte("foo"), []byte("bar"))) + require.NoError(t, b.Put([]byte("baz"), []byte("bat"))) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) - if err := db.View(func(tx *bolt.Tx) error { + err = db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) - }); err == nil || err.Error() != "error injected for tests" { - t.Fatalf("unexpected error: %v", err) - } + }) + require.EqualErrorf(t, err, "error injected for tests", "unexpected error: %v", err) } // TestTx_Rollback ensures there is no error when tx rollback whether we sync freelist or not. @@ -653,41 +489,24 @@ func TestTx_Rollback(t *testing.T) { db.NoFreelistSync = isSyncFreelist tx, err := db.Begin(true) - if err != nil { - t.Fatalf("Error starting tx: %v", err) - } + require.NoErrorf(t, err, "Error starting tx: %v", err) bucket := []byte("mybucket") - if _, err := tx.CreateBucket(bucket); err != nil { - t.Fatalf("Error creating bucket: %v", err) - } - if err := tx.Commit(); err != nil { - t.Fatalf("Error on commit: %v", err) - } + _, err = tx.CreateBucket(bucket) + require.NoErrorf(t, err, "Error creating bucket: %v", err) + require.NoErrorf(t, tx.Commit(), "Error on commit") tx, err = db.Begin(true) - if err != nil { - t.Fatalf("Error starting tx: %v", err) - } + require.NoErrorf(t, err, "Error starting tx: %v", err) b := tx.Bucket(bucket) - if err := b.Put([]byte("k"), []byte("v")); err != nil { - t.Fatalf("Error on put: %v", err) - } + require.NoErrorf(t, b.Put([]byte("k"), []byte("v")), "Error on put") // Imagine there is an error and tx needs to be rolled-back - if err := tx.Rollback(); err != nil { - t.Fatalf("Error on rollback: %v", err) - } + require.NoErrorf(t, tx.Rollback(), "Error on rollback") tx, err = db.Begin(false) - if err != nil { - t.Fatalf("Error starting tx: %v", err) - } + require.NoErrorf(t, err, "Error starting tx: %v", err) b = tx.Bucket(bucket) - if v := b.Get([]byte("k")); v != nil { - t.Fatalf("Value for k should not have been stored") - } - if err := tx.Rollback(); err != nil { - t.Fatalf("Error on rollback: %v", err) - } + require.Nilf(t, b.Get([]byte("k")), "Value for k should not have been stored") + require.NoErrorf(t, tx.Rollback(), "Error on rollback") } } @@ -706,27 +525,21 @@ func TestTx_releaseRange(t *testing.T) { bucket := "bucket" put := func(key, value string) { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte(bucket)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return b.Put([]byte(key), []byte(value)) - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } del := func(key string) { - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte(bucket)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return b.Delete([]byte(key)) - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } getWithTxn := func(txn *bolt.Tx, key string) []byte { @@ -735,23 +548,17 @@ func TestTx_releaseRange(t *testing.T) { openReadTxn := func() *bolt.Tx { readTx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return readTx } checkWithReadTxn := func(txn *bolt.Tx, key string, wantValue []byte) { value := getWithTxn(txn, key) - if !bytes.Equal(value, wantValue) { - t.Errorf("Wanted value to be %s for key %s, but got %s", wantValue, key, string(value)) - } + assert.Truef(t, bytes.Equal(value, wantValue), "Wanted value to be %s for key %s, but got %s", wantValue, key, string(value)) } rollback := func(txn *bolt.Tx) { - if err := txn.Rollback(); err != nil { - t.Fatal(err) - } + require.NoError(t, txn.Rollback()) } put("k1", "v1") @@ -1035,8 +842,7 @@ func TestTx_TruncateBeforeWrite(t *testing.T) { require.NoError(t, err) err = b.Put([]byte{byte(count)}, bigvalue) require.NoError(t, err) - err = tx.Commit() - require.NoError(t, err) + require.NoError(t, tx.Commit()) size := fileSize(db.Path()) diff --git a/unix_test.go b/unix_test.go index ac53ad559..8bf69a814 100644 --- a/unix_test.go +++ b/unix_test.go @@ -6,6 +6,8 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/sys/unix" bolt "go.etcd.io/bbolt" @@ -26,22 +28,17 @@ func TestMlock_DbCanGrow_Small(t *testing.T) { db := btesting.MustCreateDBWithOption(t, &bolt.Options{Mlock: true}) - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("bucket")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) key := []byte("key") value := []byte("value") - if err := b.Put(key, value); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(key, value)) return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } @@ -69,41 +66,32 @@ func TestMlock_DbCanGrow_Big(t *testing.T) { } newDbSize := fileSize(db.Path()) - if newDbSize <= dbSize { - t.Errorf("db didn't grow: %v <= %v", newDbSize, dbSize) - } + assert.Greaterf(t, newDbSize, dbSize, "db didn't grow: %v <= %v", newDbSize, dbSize) } func insertChunk(t *testing.T, db *btesting.DB, chunkId int) { chunkSize := 1024 - if err := db.Update(func(tx *bolt.Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("bucket")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < chunkSize; i++ { key := []byte(fmt.Sprintf("key-%d-%d", chunkId, i)) value := []byte("value") - if err := b.Put(key, value); err != nil { - t.Fatal(err) - } + require.NoError(t, b.Put(key, value)) } return nil - }); err != nil { - t.Fatal(err) - } + }) + require.NoError(t, err) } // Main reason for this check is travis limiting mlockable memory to 64KB // https://github.com/travis-ci/travis-ci/issues/2462 func skipOnMemlockLimitBelow(t *testing.T, memlockLimitRequest uint64) { var info unix.Rlimit - if err := unix.Getrlimit(unix.RLIMIT_MEMLOCK, &info); err != nil { - t.Fatal(err) - } + require.NoError(t, unix.Getrlimit(unix.RLIMIT_MEMLOCK, &info)) if info.Cur < memlockLimitRequest { t.Skipf(