@@ -83,57 +83,71 @@ func TestMain(m *testing.M) {
83
83
84
84
func TestUploads (t * testing.T ) {
85
85
t .Run ("tracks loading" , func (t * testing.T ) {
86
- db , minioResource , whClient := setupServer (t , false , nil , nil )
87
-
88
- var (
89
- ctx = context .Background ()
90
- events = 100
91
- jobs = 1
92
- )
86
+ testCases := []struct {
87
+ batchStagingFiles bool
88
+ }{
89
+ {batchStagingFiles : false },
90
+ {batchStagingFiles : true },
91
+ }
92
+ for _ , tc := range testCases {
93
+ if tc .batchStagingFiles {
94
+ t .Setenv (config .ConfigKeyToEnv (config .DefaultEnvPrefix , "Warehouse.enableV2NotifierJob" ), "true" )
95
+ t .Setenv (config .ConfigKeyToEnv (config .DefaultEnvPrefix , "Warehouse.loadFiles.queryWithUploadID.enable" ), "true" )
96
+ }
97
+ db , minioResource , whClient := setupServer (t , false , nil , nil )
93
98
94
- eventsPayload := strings . Join ( lo . RepeatBy ( events , func ( int ) string {
95
- return fmt . Sprintf ( `{"data":{"id":%q,"user_id":%q,"received_at":"2023-05-12T04:36:50.199Z"},"metadata":{"columns":{"id":"string","user_id":"string","received_at":"datetime"}, "table": "tracks"}}` ,
96
- uuid . New (). String (),
97
- uuid . New (). String (),
99
+ var (
100
+ ctx = context . Background ()
101
+ events = 100
102
+ jobs = 1
98
103
)
99
- }), "\n " )
104
+ eventsPayload := strings .Join (lo .RepeatBy (events , func (int ) string {
105
+ return fmt .Sprintf (`{"data":{"id":%q,"user_id":%q,"received_at":"2023-05-12T04:36:50.199Z"},"metadata":{"columns":{"id":"string","user_id":"string","received_at":"datetime"}, "table": "tracks"}}` ,
106
+ uuid .New ().String (),
107
+ uuid .New ().String (),
108
+ )
109
+ }), "\n " )
100
110
101
- require .NoError (t , whClient .Process (ctx , whclient.StagingFile {
102
- WorkspaceID : workspaceID ,
103
- SourceID : sourceID ,
104
- DestinationID : destinationID ,
105
- Location : prepareStagingFile (t , ctx , minioResource , eventsPayload ).ObjectName ,
106
- TotalEvents : events ,
107
- FirstEventAt : time .Now ().Format (misc .RFC3339Milli ),
108
- LastEventAt : time .Now ().Add (time .Minute * 30 ).Format (misc .RFC3339Milli ),
109
- UseRudderStorage : false ,
110
- DestinationRevisionID : destinationID ,
111
- Schema : map [string ]map [string ]string {
112
- "tracks" : {
113
- "id" : "string" ,
114
- "user_id" : "string" ,
115
- "received_at" : "datetime" ,
111
+ require .NoError (t , whClient .Process (ctx , whclient.StagingFile {
112
+ WorkspaceID : workspaceID ,
113
+ SourceID : sourceID ,
114
+ DestinationID : destinationID ,
115
+ Location : prepareStagingFile (t , ctx , minioResource , eventsPayload ).ObjectName ,
116
+ TotalEvents : events ,
117
+ FirstEventAt : time .Now ().Format (misc .RFC3339Milli ),
118
+ LastEventAt : time .Now ().Add (time .Minute * 30 ).Format (misc .RFC3339Milli ),
119
+ UseRudderStorage : false ,
120
+ BytesPerTable : map [string ]int64 {
121
+ "tracks" : int64 (len (eventsPayload )),
116
122
},
117
- },
118
- }))
119
- requireStagingFileEventsCount (t , ctx , db , events , []lo.Tuple2 [string , any ]{
120
- {A : "source_id" , B : sourceID },
121
- {A : "destination_id" , B : destinationID },
122
- {A : "status" , B : succeeded },
123
- }... )
124
- requireTableUploadEventsCount (t , ctx , db , events , []lo.Tuple2 [string , any ]{
125
- {A : "status" , B : exportedData },
126
- {A : "wh_uploads.source_id" , B : sourceID },
127
- {A : "wh_uploads.destination_id" , B : destinationID },
128
- {A : "wh_uploads.namespace" , B : namespace },
129
- }... )
130
- requireUploadJobsCount (t , ctx , db , jobs , []lo.Tuple2 [string , any ]{
131
- {A : "source_id" , B : sourceID },
132
- {A : "destination_id" , B : destinationID },
133
- {A : "namespace" , B : namespace },
134
- {A : "status" , B : exportedData },
135
- }... )
136
- requireDownstreamEventsCount (t , ctx , db , fmt .Sprintf ("%s.%s" , namespace , "tracks" ), events )
123
+ DestinationRevisionID : destinationID ,
124
+ Schema : map [string ]map [string ]string {
125
+ "tracks" : {
126
+ "id" : "string" ,
127
+ "user_id" : "string" ,
128
+ "received_at" : "datetime" ,
129
+ },
130
+ },
131
+ }))
132
+ requireStagingFileEventsCount (t , ctx , db , events , []lo.Tuple2 [string , any ]{
133
+ {A : "source_id" , B : sourceID },
134
+ {A : "destination_id" , B : destinationID },
135
+ {A : "status" , B : succeeded },
136
+ }... )
137
+ requireTableUploadEventsCount (t , ctx , db , events , []lo.Tuple2 [string , any ]{
138
+ {A : "status" , B : exportedData },
139
+ {A : "wh_uploads.source_id" , B : sourceID },
140
+ {A : "wh_uploads.destination_id" , B : destinationID },
141
+ {A : "wh_uploads.namespace" , B : namespace },
142
+ }... )
143
+ requireUploadJobsCount (t , ctx , db , jobs , []lo.Tuple2 [string , any ]{
144
+ {A : "source_id" , B : sourceID },
145
+ {A : "destination_id" , B : destinationID },
146
+ {A : "namespace" , B : namespace },
147
+ {A : "status" , B : exportedData },
148
+ }... )
149
+ requireDownstreamEventsCount (t , ctx , db , fmt .Sprintf ("%s.%s" , namespace , "tracks" ), events )
150
+ }
137
151
})
138
152
t .Run ("user and identifies loading" , func (t * testing.T ) {
139
153
db , minioResource , whClient := setupServer (t , false , nil , nil )
0 commit comments