Skip to content
This repository was archived by the owner on Feb 21, 2024. It is now read-only.

Commit 564967f

Browse files
authored
Merge pull request #1924 from seebs/golangci-lint
Golangci lint
2 parents 099dc2d + 449c853 commit 564967f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+818
-340
lines changed

.circleci/config.yml

+6-5
Original file line numberDiff line numberDiff line change
@@ -24,17 +24,18 @@ jobs:
2424
- persist_to_workspace:
2525
root: .
2626
paths: "*"
27-
linter:
27+
check-license-headers:
2828
<<: *defaults
2929
steps:
3030
- *fast-checkout
31-
- run: make install-gometalinter
32-
- run: make gometalinter
33-
check-license-headers:
31+
- run: make check-license-headers
32+
linter:
3433
<<: *defaults
3534
steps:
3635
- *fast-checkout
37-
- run: make check-license-headers
36+
- run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.13.2
37+
- run: sudo cp bin/golangci-lint /usr/local/bin/
38+
- run: make golangci-lint
3839
test-build-arm:
3940
<<: *defaults
4041
steps:

Makefile

+8-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
.PHONY: build check-clean clean cover cover-viz default docker docker-build docker-test generate generate-protoc generate-pql gometalinter install install-build-deps install-gometalinter install-protoc install-protoc-gen-gofast install-peg prerelease prerelease-upload release release-build test
1+
.PHONY: build check-clean clean cover cover-viz default docker docker-build docker-test generate generate-protoc generate-pql gometalinter install install-build-deps install-golangci-lint install-gometalinter install-protoc install-protoc-gen-gofast install-peg prerelease prerelease-upload release release-build test
22

33
CLONE_URL=github.com/pilosa/pilosa
44
VERSION := $(shell git describe --tags 2> /dev/null || echo unknown)
@@ -131,6 +131,10 @@ docker-build:
131131
docker-test:
132132
docker run --rm -v $(PWD):/go/src/$(CLONE_URL) -w /go/src/$(CLONE_URL) golang:$(GO_VERSION) go test -tags='$(BUILD_TAGS)' $(TESTFLAGS) ./...
133133

134+
# Run golangci-lint
135+
golangci-lint: require-golangci-lint
136+
golangci-lint run
137+
134138
# Run gometalinter with custom flags
135139
gometalinter: require-gometalinter vendor
136140
GO111MODULE=off gometalinter --vendor --disable-all \
@@ -185,6 +189,9 @@ install-protoc:
185189
install-peg:
186190
GO111MODULE=off go get github.com/pointlander/peg
187191

192+
install-golangci-lint:
193+
GO111MODULE=off go get github.com/golangci/golangci-lint/cmd/golangci-lint
194+
188195
install-gometalinter:
189196
GO111MODULE=off go get -u github.com/alecthomas/gometalinter
190197
GO111MODULE=off gometalinter --install

boltdb/attrstore.go

+46-48
Original file line numberDiff line numberDiff line change
@@ -215,66 +215,64 @@ func (s *attrStore) SetBulkAttrs(m map[uint64]map[string]interface{}) error {
215215
}
216216

217217
// Blocks returns a list of all blocks in the store.
218-
func (s *attrStore) Blocks() ([]pilosa.AttrBlock, error) {
219-
tx, err := s.db.Begin(false)
220-
if err != nil {
221-
return nil, errors.Wrap(err, "starting transaction")
222-
}
223-
defer tx.Rollback()
224-
225-
// Wrap cursor to segment by block.
226-
cur := newBlockCursor(tx.Bucket([]byte("attrs")).Cursor(), attrBlockSize)
227-
228-
// Iterate over each block.
229-
var blocks []pilosa.AttrBlock
230-
for cur.nextBlock() {
231-
block := pilosa.AttrBlock{ID: cur.blockID()}
218+
func (s *attrStore) Blocks() (blocks []pilosa.AttrBlock, err error) {
219+
err = s.db.View(func(tx *bolt.Tx) error {
220+
// Wrap cursor to segment by block.
221+
cur := newBlockCursor(tx.Bucket([]byte("attrs")).Cursor(), attrBlockSize)
222+
223+
// Iterate over each block.
224+
for cur.nextBlock() {
225+
block := pilosa.AttrBlock{ID: cur.blockID()}
226+
227+
// Compute checksum of every key/value in block.
228+
h := xxhash.New()
229+
for k, v := cur.next(); k != nil; k, v = cur.next() {
230+
// hash function writes don't usually need to be checked
231+
_, _ = h.Write(k)
232+
_, _ = h.Write(v)
233+
}
234+
block.Checksum = h.Sum(nil)
232235

233-
// Compute checksum of every key/value in block.
234-
h := xxhash.New()
235-
for k, v := cur.next(); k != nil; k, v = cur.next() {
236-
h.Write(k)
237-
h.Write(v)
236+
// Append block.
237+
blocks = append(blocks, block)
238238
}
239-
block.Checksum = h.Sum(nil)
240-
241-
// Append block.
242-
blocks = append(blocks, block)
239+
return nil
240+
})
241+
if err != nil {
242+
return nil, errors.Wrap(err, "getting blocks")
243243
}
244-
245244
return blocks, nil
246245
}
247246

248247
// BlockData returns all data for a single block.
249-
func (s *attrStore) BlockData(i uint64) (map[uint64]map[string]interface{}, error) {
250-
m := make(map[uint64]map[string]interface{})
248+
func (s *attrStore) BlockData(i uint64) (m map[uint64]map[string]interface{}, err error) {
249+
m = make(map[uint64]map[string]interface{})
251250

252251
// Start read-only transaction.
253-
tx, err := s.db.Begin(false)
254-
if err != nil {
255-
return nil, errors.Wrap(err, "starting transaction")
256-
}
257-
defer tx.Rollback()
258-
259-
// Move to the start of the block.
260-
min := u64tob(i * attrBlockSize)
261-
max := u64tob((i + 1) * attrBlockSize)
262-
cur := tx.Bucket([]byte("attrs")).Cursor()
263-
for k, v := cur.Seek(min); k != nil; k, v = cur.Next() {
264-
// Exit if we're past the end of the block.
265-
if bytes.Compare(k, max) != -1 {
266-
break
267-
}
252+
err = s.db.View(func(tx *bolt.Tx) error {
253+
// Move to the start of the block.
254+
min := u64tob(i * attrBlockSize)
255+
max := u64tob((i + 1) * attrBlockSize)
256+
cur := tx.Bucket([]byte("attrs")).Cursor()
257+
for k, v := cur.Seek(min); k != nil; k, v = cur.Next() {
258+
// Exit if we're past the end of the block.
259+
if bytes.Compare(k, max) != -1 {
260+
break
261+
}
268262

269-
// Decode attribute map and associate with id.
270-
attrs, err := pilosa.DecodeAttrs(v)
271-
if err != nil {
272-
return nil, errors.Wrap(err, "decoding attrs")
273-
}
274-
m[btou64(k)] = attrs
263+
// Decode attribute map and associate with id.
264+
attrs, err := pilosa.DecodeAttrs(v)
265+
if err != nil {
266+
return errors.Wrap(err, "decoding attrs")
267+
}
268+
m[btou64(k)] = attrs
275269

270+
}
271+
return nil
272+
})
273+
if err != nil {
274+
return nil, errors.Wrap(err, "getting block data")
276275
}
277-
278276
return m, nil
279277
}
280278

cluster.go

+8-3
Original file line numberDiff line numberDiff line change
@@ -844,8 +844,8 @@ func (c *cluster) partition(index string, shard uint64) int {
844844

845845
// Hash the bytes and mod by partition count.
846846
h := fnv.New64a()
847-
h.Write([]byte(index))
848-
h.Write(buf[:])
847+
_, _ = h.Write([]byte(index))
848+
_, _ = h.Write(buf[:])
849849
return int(h.Sum64() % uint64(c.partitionN))
850850
}
851851

@@ -1892,7 +1892,12 @@ func (c *cluster) mergeClusterStatus(cs *ClusterStatus) error {
18921892
for _, node := range officialNodes {
18931893
if node.ID == c.Node.ID && node.State != c.Node.State {
18941894
c.logger.Printf("mismatched state in mergeClusterStatus got %v have %v", node.State, c.Node.State)
1895-
go c.setNodeState(c.Node.State)
1895+
go func(fromState, toState string) {
1896+
err := c.setNodeState(toState)
1897+
if err != nil {
1898+
c.logger.Printf("error setting node state from %v to %v: %v", fromState, toState, err)
1899+
}
1900+
}(node.State, c.Node.State)
18961901
}
18971902
if err := c.addNode(node); err != nil {
18981903
return errors.Wrap(err, "adding node")

cluster_internal_test.go

+44-19
Original file line numberDiff line numberDiff line change
@@ -607,15 +607,19 @@ func TestCluster_ResizeStates(t *testing.T) {
607607

608608
t.Run("Single node, in topology", func(t *testing.T) {
609609
tc := NewClusterCluster(0)
610-
tc.addNode()
610+
if err := tc.addNode(); err != nil {
611+
t.Fatalf("adding node: %v", err)
612+
}
611613

612614
node := tc.Clusters[0]
613615

614616
// write topology to data file
615617
top := &Topology{
616618
nodeIDs: []string{node.Node.ID},
617619
}
618-
tc.WriteTopology(node.Path, top)
620+
if err := tc.WriteTopology(node.Path, top); err != nil {
621+
t.Fatalf("writing topology: %v", err)
622+
}
619623

620624
// Open TestCluster.
621625
if err := tc.Open(); err != nil {
@@ -635,15 +639,19 @@ func TestCluster_ResizeStates(t *testing.T) {
635639

636640
t.Run("Single node, not in topology", func(t *testing.T) {
637641
tc := NewClusterCluster(0)
638-
tc.addNode()
642+
if err := tc.addNode(); err != nil {
643+
t.Fatalf("adding node: %v", err)
644+
}
639645

640646
node := tc.Clusters[0]
641647

642648
// write topology to data file
643649
top := &Topology{
644650
nodeIDs: []string{"some-other-host"},
645651
}
646-
tc.WriteTopology(node.Path, top)
652+
if err := tc.WriteTopology(node.Path, top); err != nil {
653+
t.Fatalf("writing topology: %v", err)
654+
}
647655

648656
// Open TestCluster.
649657
expected := "coordinator node0 is not in topology: [some-other-host]"
@@ -660,14 +668,18 @@ func TestCluster_ResizeStates(t *testing.T) {
660668

661669
t.Run("Multiple nodes, no data", func(t *testing.T) {
662670
tc := NewClusterCluster(0)
663-
tc.addNode()
671+
if err := tc.addNode(); err != nil {
672+
t.Fatalf("adding node: %v", err)
673+
}
664674

665675
// Open TestCluster.
666676
if err := tc.Open(); err != nil {
667-
t.Fatal(err)
677+
t.Fatalf("opening cluster: %v", err)
668678
}
669679

670-
tc.addNode()
680+
if err := tc.addNode(); err != nil {
681+
t.Fatalf("adding node: %v", err)
682+
}
671683

672684
node0 := tc.Clusters[0]
673685
node1 := tc.Clusters[1]
@@ -698,18 +710,22 @@ func TestCluster_ResizeStates(t *testing.T) {
698710

699711
t.Run("Multiple nodes, in/not in topology", func(t *testing.T) {
700712
tc := NewClusterCluster(0)
701-
tc.addNode()
713+
if err := tc.addNode(); err != nil {
714+
t.Fatalf("adding node: %v", err)
715+
}
702716
node0 := tc.Clusters[0]
703717

704718
// write topology to data file
705719
top := &Topology{
706720
nodeIDs: []string{"node0", "node2"},
707721
}
708-
tc.WriteTopology(node0.Path, top)
722+
if err := tc.WriteTopology(node0.Path, top); err != nil {
723+
t.Fatalf("writing topology: %v", err)
724+
}
709725

710726
// Open TestCluster.
711727
if err := tc.Open(); err != nil {
712-
t.Fatal(err)
728+
t.Fatalf("opening cluster: %v", err)
713729
}
714730

715731
// Ensure that node is in state STARTING before the other node joins.
@@ -719,19 +735,20 @@ func TestCluster_ResizeStates(t *testing.T) {
719735

720736
// Expect an error by adding a node not in the topology.
721737
expectedError := "host is not in topology: node1"
722-
err := tc.addNode()
723-
if err == nil || err.Error() != expectedError {
738+
if err := tc.addNode(); err == nil || err.Error() != expectedError {
724739
t.Errorf("did not receive expected error: %s", expectedError)
725740
}
726741

727-
tc.addNode()
742+
if err := tc.addNode(); err != nil {
743+
t.Fatalf("adding node: %v", err)
744+
}
728745
node2 := tc.Clusters[2]
729746

730747
// Ensure that node comes up in state NORMAL.
731748
if node0.State() != ClusterStateNormal {
732749
t.Errorf("expected node0 state: %v, but got: %v", ClusterStateNormal, node0.State())
733750
} else if node2.State() != ClusterStateNormal {
734-
t.Errorf("expected node1 state: %v, but got: %v", ClusterStateNormal, node2.State())
751+
t.Errorf("expected node2 state: %v, but got: %v", ClusterStateNormal, node2.State())
735752
}
736753

737754
// Close TestCluster.
@@ -742,7 +759,9 @@ func TestCluster_ResizeStates(t *testing.T) {
742759

743760
t.Run("Multiple nodes, with data", func(t *testing.T) {
744761
tc := NewClusterCluster(0)
745-
tc.addNode()
762+
if err := tc.addNode(); err != nil {
763+
t.Fatalf("adding node: %v", err)
764+
}
746765
node0 := tc.Clusters[0]
747766

748767
// Open TestCluster.
@@ -752,10 +771,14 @@ func TestCluster_ResizeStates(t *testing.T) {
752771

753772
// Add Bit Data to node0.
754773
if err := tc.CreateField("i", "f", OptFieldTypeDefault()); err != nil {
755-
t.Fatal(err)
774+
t.Fatalf("creating field: %v", err)
775+
}
776+
if err := tc.SetBit("i", "f", 1, 101, nil); err != nil {
777+
t.Fatalf("setting bit: %v", err)
778+
}
779+
if err := tc.SetBit("i", "f", 1, ShardWidth+1, nil); err != nil {
780+
t.Fatalf("setting bit: %v", err)
756781
}
757-
tc.SetBit("i", "f", 1, 101, nil)
758-
tc.SetBit("i", "f", 1, ShardWidth+1, nil)
759782

760783
// Before starting the resize, get the CheckSum to use for
761784
// comparison later.
@@ -765,7 +788,9 @@ func TestCluster_ResizeStates(t *testing.T) {
765788
node0Checksum := node0Fragment.Checksum()
766789

767790
// addNode needs to block until the resize process has completed.
768-
tc.addNode()
791+
if err := tc.addNode(); err != nil {
792+
t.Fatalf("adding node: %v", err)
793+
}
769794
node1 := tc.Clusters[1]
770795

771796
// Ensure that nodes come up in state NORMAL.

cmd/root.go

+5-3
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,13 @@ Build Time: ` + pilosa.BuildTime + "\n",
5151
}
5252

5353
// return "dry run" error if "dry-run" flag is set
54-
if ret, err := cmd.Flags().GetBool("dry-run"); ret && err == nil {
54+
ret, err := cmd.Flags().GetBool("dry-run")
55+
if err != nil {
56+
return fmt.Errorf("problem getting dry-run flag: %v", err)
57+
}
58+
if ret {
5559
if cmd.Parent() != nil {
5660
return fmt.Errorf("dry run")
57-
} else if err != nil {
58-
return fmt.Errorf("problem getting dry-run flag: %v", err)
5961
}
6062
}
6163

cmd/root_test.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -187,10 +187,12 @@ bind = "127.0.0.1:10101"
187187
"127.0.0.1:10101",
188188
"127.0.0.1:10111",
189189
]`
190-
file.Write([]byte(config))
190+
if _, err := file.Write([]byte(config)); err != nil {
191+
t.Fatalf("writing config file: %v", err)
192+
}
191193
file.Close()
192194
_, err = ExecNewRootCommand(t, "server", "--config", file.Name())
193-
if err.Error() != "invalid option in configuration file: cluster.partitions" {
195+
if err == nil || err.Error() != "invalid option in configuration file: cluster.partitions" {
194196
t.Fatalf("Expected invalid option in configuration file, but err: '%v'", err)
195197
}
196198
}

0 commit comments

Comments
 (0)