@@ -247,9 +247,15 @@ def backfill(self, dest, room_id, limit, extremities=[]):
247
247
if set (e_id for e_id , _ in ev .prev_events ) - event_ids
248
248
]
249
249
250
+ logger .info (
251
+ "backfill: Got %d events with %d edges" ,
252
+ len (events ), len (edges ),
253
+ )
254
+
250
255
# For each edge get the current state.
251
256
252
257
auth_events = {}
258
+ state_events = {}
253
259
events_to_state = {}
254
260
for e_id in edges :
255
261
state , auth = yield self .replication_layer .get_state_for_room (
@@ -258,12 +264,46 @@ def backfill(self, dest, room_id, limit, extremities=[]):
258
264
event_id = e_id
259
265
)
260
266
auth_events .update ({a .event_id : a for a in auth })
267
+ auth_events .update ({s .event_id : s for s in state })
268
+ state_events .update ({s .event_id : s for s in state })
261
269
events_to_state [e_id ] = state
262
270
271
+ seen_events = yield self .store .have_events (
272
+ set (auth_events .keys ()) | set (state_events .keys ())
273
+ )
274
+
275
+ all_events = events + state_events .values () + auth_events .values ()
276
+ required_auth = set (
277
+ a_id for event in all_events for a_id , _ in event .auth_events
278
+ )
279
+
280
+ missing_auth = required_auth - set (auth_events )
281
+ results = yield defer .gatherResults (
282
+ [
283
+ self .replication_layer .get_pdu (
284
+ [dest ],
285
+ event_id ,
286
+ outlier = True ,
287
+ timeout = 10000 ,
288
+ )
289
+ for event_id in missing_auth
290
+ ],
291
+ consumeErrors = True
292
+ ).addErrback (unwrapFirstError )
293
+ auth_events .update ({a .event_id : a for a in results })
294
+
263
295
yield defer .gatherResults (
264
296
[
265
- self ._handle_new_event (dest , a )
297
+ self ._handle_new_event (
298
+ dest , a ,
299
+ auth_events = {
300
+ (auth_events [a_id ].type , auth_events [a_id ].state_key ):
301
+ auth_events [a_id ]
302
+ for a_id , _ in a .auth_events
303
+ },
304
+ )
266
305
for a in auth_events .values ()
306
+ if a .event_id not in seen_events
267
307
],
268
308
consumeErrors = True ,
269
309
).addErrback (unwrapFirstError )
@@ -274,6 +314,11 @@ def backfill(self, dest, room_id, limit, extremities=[]):
274
314
dest , event_map [e_id ],
275
315
state = events_to_state [e_id ],
276
316
backfilled = True ,
317
+ auth_events = {
318
+ (auth_events [a_id ].type , auth_events [a_id ].state_key ):
319
+ auth_events [a_id ]
320
+ for a_id , _ in event_map [e_id ].auth_events
321
+ },
277
322
)
278
323
for e_id in events_to_state
279
324
],
@@ -900,8 +945,10 @@ def _handle_new_event(self, origin, event, state=None, backfilled=False,
900
945
event .event_id , event .signatures ,
901
946
)
902
947
948
+ outlier = event .internal_metadata .is_outlier ()
949
+
903
950
context = yield self .state_handler .compute_event_context (
904
- event , old_state = state
951
+ event , old_state = state , outlier = outlier ,
905
952
)
906
953
907
954
if not auth_events :
@@ -912,7 +959,7 @@ def _handle_new_event(self, origin, event, state=None, backfilled=False,
912
959
event .event_id , auth_events ,
913
960
)
914
961
915
- is_new_state = not event . internal_metadata . is_outlier ()
962
+ is_new_state = not outlier
916
963
917
964
# This is a hack to fix some old rooms where the initial join event
918
965
# didn't reference the create event in its auth events.
0 commit comments