15
15
package server_test
16
16
17
17
import (
18
+ "bytes"
18
19
"context"
19
20
"encoding/json"
21
+ "flag"
20
22
"fmt"
21
23
"io/ioutil"
22
24
"math/rand"
@@ -28,13 +30,22 @@ import (
28
30
"testing/quick"
29
31
"time"
30
32
33
+ "golang.org/x/sync/errgroup"
34
+
31
35
"github.com/pelletier/go-toml"
32
36
"github.com/pilosa/pilosa"
33
37
"github.com/pilosa/pilosa/http"
38
+ "github.com/pilosa/pilosa/roaring"
34
39
"github.com/pilosa/pilosa/server"
35
40
"github.com/pilosa/pilosa/test"
36
41
)
37
42
43
+ var runStress bool
44
+
45
+ func init () { // nolint: gochecknoinits
46
+ flag .BoolVar (& runStress , "stress" , false , "Enable stress tests (time consuming)" )
47
+ }
48
+
38
49
// Ensure program can process queries and maintain consistency.
39
50
func TestMain_Set_Quick (t * testing.T ) {
40
51
if testing .Short () {
@@ -754,3 +765,97 @@ func TestClusterQueriesAfterRestart(t *testing.T) {
754
765
}
755
766
756
767
// TODO: confirm that things keep working if a node is hard-closed (no nodeLeave event) and immediately restarted with a different address.
768
+
769
+ func TestClusterExhaustingConnections (t * testing.T ) {
770
+ if ! runStress {
771
+ t .Skip ("stress" )
772
+ }
773
+ cluster := test .MustRunCluster (t , 5 )
774
+ defer cluster .Close ()
775
+ cmd1 := cluster [1 ]
776
+
777
+ for _ , com := range cluster {
778
+ nodes := com .API .Hosts (context .Background ())
779
+ for _ , n := range nodes {
780
+ if n .State != "READY" {
781
+ t .Fatalf ("unexpected node state after upping cluster: %v" , nodes )
782
+ }
783
+ }
784
+ }
785
+
786
+ cmd1 .MustCreateIndex (t , "testidx" , pilosa.IndexOptions {})
787
+ cmd1 .MustCreateField (t , "testidx" , "testfield" , pilosa .OptFieldTypeSet (pilosa .CacheTypeRanked , 10 ))
788
+
789
+ eg := errgroup.Group {}
790
+ for i := 0 ; i < 20 ; i ++ {
791
+ i := i
792
+ eg .Go (func () error {
793
+ for j := i ; j < 10000 ; j += 20 {
794
+ _ , err := cluster [i % 5 ].API .Query (context .Background (), & pilosa.QueryRequest {
795
+ Index : "testidx" ,
796
+ Query : fmt .Sprintf ("Set(%d, testfield=0)" , j * pilosa .ShardWidth ),
797
+ })
798
+ if err != nil {
799
+ return err
800
+ }
801
+ }
802
+ return nil
803
+ })
804
+ }
805
+ err := eg .Wait ()
806
+ if err != nil {
807
+ t .Fatalf ("setting lots of shards: %v" , err )
808
+ }
809
+ }
810
+
811
+ func TestClusterExhaustingConnectionsImport (t * testing.T ) {
812
+ if ! runStress {
813
+ t .Skip ("stress" )
814
+ }
815
+ cluster := test .MustRunCluster (t , 5 )
816
+ defer cluster .Close ()
817
+ cmd1 := cluster [1 ]
818
+
819
+ for _ , com := range cluster {
820
+ nodes := com .API .Hosts (context .Background ())
821
+ for _ , n := range nodes {
822
+ if n .State != "READY" {
823
+ t .Fatalf ("unexpected node state after upping cluster: %v" , nodes )
824
+ }
825
+ }
826
+ }
827
+
828
+ cmd1 .MustCreateIndex (t , "testidx" , pilosa.IndexOptions {})
829
+ cmd1 .MustCreateField (t , "testidx" , "testfield" , pilosa .OptFieldTypeSet (pilosa .CacheTypeRanked , 10 ))
830
+
831
+ bm := roaring .NewBitmap ()
832
+ bm .DirectAdd (0 )
833
+ buf := & bytes.Buffer {}
834
+ bm .WriteTo (buf )
835
+ data := buf .Bytes ()
836
+
837
+ eg := errgroup.Group {}
838
+ for i := uint64 (0 ); i < 20 ; i ++ {
839
+ i := i
840
+ eg .Go (func () error {
841
+ for j := i ; j < 10000 ; j += 20 {
842
+ if (j - i )% 1000 == 0 {
843
+ fmt .Printf ("%d is %.2f%% done.\n " , i , float64 (j - i )* 100 / 100000 )
844
+ }
845
+ err := cluster [i % 5 ].API .ImportRoaring (context .Background (), "testidx" , "testfield" , j , false , & pilosa.ImportRoaringRequest {
846
+ Views : map [string ][]byte {
847
+ "" : data ,
848
+ },
849
+ })
850
+ if err != nil {
851
+ return err
852
+ }
853
+ }
854
+ return nil
855
+ })
856
+ }
857
+ err := eg .Wait ()
858
+ if err != nil {
859
+ t .Fatalf ("setting lots of shards: %v" , err )
860
+ }
861
+ }
0 commit comments