@@ -18,6 +18,7 @@ import (
1818 "bytes"
1919 "context"
2020 "errors"
21+ "runtime"
2122 "strconv"
2223 "sync"
2324 "time"
@@ -57,6 +58,8 @@ type DMapPipeline struct {
5758 result map [uint64 ][]redis.Cmder
5859 ctx context.Context
5960 cancel context.CancelFunc
61+
62+ concurrency int // defaults to runtime.NumCPU()
6063}
6164
6265func (dp * DMapPipeline ) addCommand (key string , cmd redis.Cmder ) (uint64 , int ) {
@@ -418,8 +421,7 @@ func (dp *DMapPipeline) Exec(ctx context.Context) error {
418421 defer dp .cancel ()
419422
420423 var errGr errgroup.Group
421- numCpu := 1
422- sem := semaphore .NewWeighted (int64 (numCpu ))
424+ sem := semaphore .NewWeighted (int64 (dp .concurrency ))
423425 for i := uint64 (0 ); i < dp .dm .clusterClient .partitionCount ; i ++ {
424426 err := sem .Acquire (ctx , 1 )
425427 if err != nil {
@@ -494,15 +496,23 @@ func (dp *DMapPipeline) Close() {
494496// results in case of big pipelines and small read/write timeouts.
495497// Redis client has retransmission logic in case of timeouts, pipeline
496498// can be retransmitted and commands can be executed more than once.
497- func (dm * ClusterDMap ) Pipeline () (* DMapPipeline , error ) {
499+ func (dm * ClusterDMap ) Pipeline (opts ... PipelineOption ) (* DMapPipeline , error ) {
498500 ctx , cancel := context .WithCancel (context .Background ())
499- return & DMapPipeline {
501+ dp := & DMapPipeline {
500502 dm : dm ,
501503 commands : make (map [uint64 ][]redis.Cmder ),
502504 result : make (map [uint64 ][]redis.Cmder ),
503505 ctx : ctx ,
504506 cancel : cancel ,
505- }, nil
507+
508+ concurrency : runtime .NumCPU (),
509+ }
510+
511+ for _ , opt := range opts {
512+ opt (dp )
513+ }
514+
515+ return dp , nil
506516}
507517
508518// This stores a slice of commands for each partition. There is a possibility that a single
0 commit comments