-
Notifications
You must be signed in to change notification settings - Fork 328
Expand file tree
/
Copy pathplugin.rs
More file actions
4128 lines (3795 loc) · 150 KB
/
plugin.rs
File metadata and controls
4128 lines (3795 loc) · 150 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use std::cell::RefCell;
use std::collections::HashMap;
use std::collections::HashSet;
use std::num::NonZeroUsize;
use std::ops::ControlFlow;
use std::sync::Arc;
use std::sync::OnceLock;
use std::time::Duration;
use apollo_compiler::Schema;
use apollo_compiler::ast::NamedType;
use apollo_compiler::collections::IndexMap;
use apollo_compiler::parser::Parser;
use apollo_compiler::resolvers;
use apollo_compiler::schema::ObjectType;
use apollo_compiler::validation::Valid;
use apollo_federation::connectors::StringTemplate;
use http::HeaderValue;
use http::header::CACHE_CONTROL;
use itertools::Itertools;
use lru::LruCache;
use multimap::MultiMap;
use opentelemetry::Array;
use opentelemetry::Key;
use opentelemetry::StringValue;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use serde_json_bytes::ByteString;
use serde_json_bytes::Value;
use tokio::sync::RwLock;
use tokio::sync::broadcast;
use tokio_stream::StreamExt;
use tokio_stream::wrappers::IntervalStream;
use tower::BoxError;
use tower::ServiceBuilder;
use tower::ServiceExt;
use tower_service::Service;
use tracing::Instrument;
use tracing::Level;
use tracing::Span;
use super::cache_control::CacheControl;
use super::invalidation::Invalidation;
use super::invalidation_endpoint::InvalidationEndpointConfig;
use super::invalidation_endpoint::InvalidationService;
use super::invalidation_endpoint::SubgraphInvalidationConfig;
use super::metrics::CacheMetricContextKey;
use super::metrics::record_fetch_error;
use crate::Context;
use crate::Endpoint;
use crate::ListenAddr;
use crate::configuration::subgraph::SubgraphConfiguration;
use crate::context::CONTAINS_GRAPHQL_ERROR;
use crate::error::FetchError;
use crate::graphql;
use crate::graphql::Error;
use crate::json_ext::Object;
use crate::json_ext::Path;
use crate::json_ext::PathElement;
use crate::plugin::PluginInit;
use crate::plugin::PluginPrivate;
use crate::plugins::authorization::CacheKeyMetadata;
use crate::plugins::response_cache::cache_key::PrimaryCacheKeyEntity;
use crate::plugins::response_cache::cache_key::PrimaryCacheKeyRoot;
use crate::plugins::response_cache::cache_key::hash_additional_data;
use crate::plugins::response_cache::cache_key::hash_query;
use crate::plugins::response_cache::debugger::CacheEntryKind;
use crate::plugins::response_cache::debugger::CacheKeyContext;
use crate::plugins::response_cache::debugger::CacheKeySource;
use crate::plugins::response_cache::debugger::add_cache_key_to_context;
use crate::plugins::response_cache::debugger::add_cache_keys_to_context;
use crate::plugins::response_cache::storage;
use crate::plugins::response_cache::storage::CacheEntry;
use crate::plugins::response_cache::storage::CacheStorage;
use crate::plugins::response_cache::storage::Document;
use crate::plugins::response_cache::storage::redis::Storage;
use crate::plugins::telemetry::LruSizeInstrument;
use crate::plugins::telemetry::dynamic_attribute::SpanDynAttribute;
use crate::plugins::telemetry::span_ext::SpanMarkError;
use crate::query_planner::OperationKind;
use crate::services::subgraph;
use crate::services::subgraph::SubgraphRequestId;
use crate::services::supergraph;
use crate::spec::QueryHash;
use crate::spec::TYPENAME;
/// Change this key if you introduce a breaking change in response caching algorithm to make sure it won't take the previous entries
pub(crate) const RESPONSE_CACHE_VERSION: &str = "1.2";
pub(crate) const CACHE_TAG_DIRECTIVE_NAME: &str = "federation__cacheTag";
pub(crate) const ENTITIES: &str = "_entities";
pub(crate) const REPRESENTATIONS: &str = "representations";
pub(crate) const CONTEXT_CACHE_KEY: &str = "apollo::response_cache::key";
/// Context key to enable support of debugger
pub(crate) const CONTEXT_DEBUG_CACHE_KEYS: &str = "apollo::response_cache::debug_cached_keys";
pub(crate) const CACHE_DEBUG_HEADER_NAME: &str = "apollo-cache-debugging";
pub(crate) const CACHE_DEBUG_EXTENSIONS_KEY: &str = "apolloCacheDebugging";
pub(crate) const CACHE_DEBUGGER_VERSION: &str = "1.0";
pub(crate) const GRAPHQL_RESPONSE_EXTENSION_ROOT_FIELDS_CACHE_TAGS: &str = "apolloCacheTags";
pub(crate) const GRAPHQL_RESPONSE_EXTENSION_ENTITY_CACHE_TAGS: &str = "apolloEntityCacheTags";
/// Used to mark cache tags as internal and should not be exported or displayed to our users
pub(crate) const INTERNAL_CACHE_TAG_PREFIX: &str = "__apollo_internal::";
const DEFAULT_LRU_PRIVATE_QUERIES_SIZE: NonZeroUsize = NonZeroUsize::new(2048).unwrap();
const LRU_PRIVATE_QUERIES_INSTRUMENT_NAME: &str =
"apollo.router.response_cache.private_queries.lru.size";
register_private_plugin!("apollo", "response_cache", ResponseCache);
#[derive(Clone)]
pub(crate) struct ResponseCache {
pub(super) storage: Arc<StorageInterface>,
endpoint_config: Option<Arc<InvalidationEndpointConfig>>,
subgraphs: Arc<SubgraphConfiguration<Subgraph>>,
entity_type: Option<String>,
enabled: bool,
debug: bool,
private_queries: Arc<RwLock<LruCache<PrivateQueryKey, ()>>>,
pub(crate) invalidation: Invalidation,
supergraph_schema: Arc<Valid<Schema>>,
/// map containing the enum GRAPH
subgraph_enums: Arc<HashMap<String, String>>,
lru_size_instrument: LruSizeInstrument,
/// Sender to tell spawned tasks to abort when this struct is dropped
drop_tx: broadcast::Sender<()>,
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
struct PrivateQueryKey {
query_hash: String,
has_private_id: bool,
}
#[derive(Clone, Default)]
pub(crate) struct StorageInterface {
all: Option<Arc<OnceLock<Storage>>>,
subgraphs: HashMap<String, Arc<OnceLock<Storage>>>,
}
impl StorageInterface {
pub(crate) fn get(&self, subgraph: &str) -> Option<&Storage> {
let storage = self.subgraphs.get(subgraph).or(self.all.as_ref())?;
storage.get()
}
/// Activate all storages so they can start emitting metrics.
pub(crate) fn activate(&self) {
if let Some(all) = &self.all
&& let Some(storage) = all.get()
{
storage.activate();
}
for storage in self.subgraphs.values() {
if let Some(storage) = storage.get() {
storage.activate();
}
}
}
}
#[cfg(all(
test,
any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux"))
))]
impl StorageInterface {
/// Replace the `all` storage layer in this struct.
///
/// This supports tests which initialize the `StorageInterface` without a backing database
/// and then add one later, simulating a delayed storage connection.
pub(crate) fn replace_storage(&self, storage: Storage) -> Option<()> {
self.all.as_ref()?.set(storage).ok()
}
}
#[cfg(all(
test,
any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux"))
))]
impl From<Storage> for StorageInterface {
fn from(storage: Storage) -> Self {
Self {
all: Some(Arc::new(storage.into())),
subgraphs: HashMap::new(),
}
}
}
/// Configuration for response caching
#[derive(Clone, Debug, JsonSchema, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub(crate) struct Config {
/// Enable or disable the response caching feature
#[serde(default)]
pub(crate) enabled: bool,
#[serde(default)]
/// Enable debug mode for the debugger
debug: bool,
/// Configure invalidation per subgraph
pub(crate) subgraph: SubgraphConfiguration<Subgraph>,
/// Global invalidation configuration
invalidation: Option<InvalidationEndpointConfig>,
/// Buffer size for known private queries (default: 2048)
#[serde(default = "default_lru_private_queries_size")]
private_queries_buffer_size: NonZeroUsize,
}
const fn default_lru_private_queries_size() -> NonZeroUsize {
DEFAULT_LRU_PRIVATE_QUERIES_SIZE
}
/// Per subgraph configuration for response caching
#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields, default)]
pub(crate) struct Subgraph {
/// Redis configuration
pub(crate) redis: Option<storage::redis::Config>,
/// expiration for all keys for this subgraph, unless overridden by the `Cache-Control` header in subgraph responses
pub(crate) ttl: Option<Ttl>,
/// activates caching for this subgraph, overrides the global configuration
pub(crate) enabled: Option<bool>,
/// Context key used to separate cache sections per user
pub(crate) private_id: Option<String>,
/// Invalidation configuration
pub(crate) invalidation: Option<SubgraphInvalidationConfig>,
}
impl Default for Subgraph {
fn default() -> Self {
Self {
redis: None,
enabled: Some(true),
ttl: Default::default(),
private_id: Default::default(),
invalidation: Default::default(),
}
}
}
/// Per subgraph configuration for response caching
#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub(crate) struct Ttl(
#[serde(deserialize_with = "humantime_serde::deserialize")]
#[schemars(with = "String")]
pub(crate) Duration,
);
#[derive(Default, Serialize, Deserialize, Debug)]
#[serde(default)]
pub(crate) struct CacheSubgraph(pub(crate) HashMap<String, CacheHitMiss>);
#[derive(Default, Serialize, Deserialize, Debug)]
#[serde(default)]
pub(crate) struct CacheHitMiss {
pub(crate) hit: usize,
pub(crate) miss: usize,
}
#[async_trait::async_trait]
impl PluginPrivate for ResponseCache {
const HIDDEN_FROM_CONFIG_JSON_SCHEMA: bool = true;
type Config = Config;
async fn new(init: PluginInit<Self::Config>) -> Result<Self, BoxError>
where
Self: Sized,
{
let entity_type = init
.supergraph_schema
.schema_definition
.query
.as_ref()
.map(|q| q.name.to_string());
if init.config.subgraph.all.ttl.is_none()
&& init
.config
.subgraph
.subgraphs
.values()
.any(|s| s.ttl.is_none())
{
return Err("a TTL must be configured for all subgraphs or globally"
.to_string()
.into());
}
if init
.config
.subgraph
.all
.invalidation
.as_ref()
.map(|i| i.shared_key.is_empty())
.unwrap_or_default()
{
return Err(
"you must set a default shared_key invalidation for all subgraphs"
.to_string()
.into(),
);
}
let mut storage_interface = StorageInterface::default();
let (drop_tx, drop_rx) = tokio::sync::broadcast::channel(2);
if init.config.enabled
&& init.config.subgraph.all.enabled.unwrap_or_default()
&& let Some(config) = init.config.subgraph.all.redis.clone()
{
let storage = Arc::new(OnceLock::new());
storage_interface.all = Some(storage.clone());
connect_or_spawn_reconnection_task(config, storage, drop_rx).await?;
}
for (subgraph, subgraph_config) in &init.config.subgraph.subgraphs {
if Self::static_subgraph_enabled(init.config.enabled, &init.config.subgraph, subgraph) {
match subgraph_config.redis.clone() {
Some(config) => {
// We need to do this because the subgraph config automatically clones from the `all` config during deserialization.
// We don't want to create a new connection pool if the subgraph just inherits from the `all` config (only if all is enabled).
if Some(&config) != init.config.subgraph.all.redis.as_ref()
|| storage_interface.all.is_none()
{
let storage = Arc::new(OnceLock::new());
storage_interface
.subgraphs
.insert(subgraph.clone(), storage.clone());
connect_or_spawn_reconnection_task(
config,
storage,
drop_tx.subscribe(),
)
.await?;
}
}
None => {
if storage_interface.all.is_none() {
return Err(
format!("you must have a redis configured either for all subgraphs or for subgraph {subgraph:?}")
.into(),
);
}
}
}
}
}
let storage_interface = Arc::new(storage_interface);
let invalidation = Invalidation::new(storage_interface.clone()).await?;
Ok(Self {
storage: storage_interface,
entity_type,
enabled: init.config.enabled,
debug: init.config.debug,
endpoint_config: init.config.invalidation.clone().map(Arc::new),
subgraphs: Arc::new(init.config.subgraph),
private_queries: Arc::new(RwLock::new(LruCache::new(
init.config.private_queries_buffer_size,
))),
invalidation,
subgraph_enums: Arc::new(get_subgraph_enums(&init.supergraph_schema)),
supergraph_schema: init.supergraph_schema,
lru_size_instrument: LruSizeInstrument::new(LRU_PRIVATE_QUERIES_INSTRUMENT_NAME),
drop_tx,
})
}
fn activate(&self) {
self.storage.activate();
}
fn supergraph_service(
&self,
service: supergraph::BoxCloneService,
) -> supergraph::BoxCloneService {
let debug = self.debug;
ServiceBuilder::new()
.map_response(move |mut response: supergraph::Response| {
if let Some(mut cache_control) = response
.context
.extensions()
.with_lock(|lock| lock.get::<CacheControl>().cloned())
{
// If the response contains GraphQL errors, force Cache-Control: no-store to prevent
// intermediate caches (CDNs, reverse proxies) from caching partial or error responses.
let has_errors = response
.context
.get_json_value(CONTAINS_GRAPHQL_ERROR)
.and_then(|v| v.as_bool())
.unwrap_or(false);
if has_errors {
cache_control = CacheControl::no_store();
}
let _ = cache_control.to_headers(response.response.headers_mut());
}
if debug
&& let Some(debug_data) =
response.context.get_json_value(CONTEXT_DEBUG_CACHE_KEYS)
{
return response.map_stream(move |mut body| {
body.extensions.insert(
CACHE_DEBUG_EXTENSIONS_KEY,
serde_json_bytes::json!({
"version": CACHE_DEBUGGER_VERSION,
"data": debug_data.clone()
}),
);
body
});
}
response
})
.service(service)
.boxed_clone()
}
fn subgraph_service(
&self,
name: &str,
service: subgraph::BoxCloneService,
) -> subgraph::BoxCloneService {
let subgraph_ttl = self
.subgraph_ttl(name)
.unwrap_or_else(|| Duration::from_secs(60 * 60 * 24)); // The unwrap should not happen because it's checked when creating the plugin (except for tests)
let subgraph_enabled = self.subgraph_enabled(name);
let private_id = self.subgraphs.get(name).private_id.clone();
let name = name.to_string();
if subgraph_enabled {
let private_queries = self.private_queries.clone();
let inner = ServiceBuilder::new()
.map_response(move |response: subgraph::Response| {
update_cache_control(
&response.context,
&CacheControl::new(response.response.headers(), subgraph_ttl.into())
.ok()
.unwrap_or_else(CacheControl::no_store),
);
response
})
.service(CacheService {
service,
entity_type: self.entity_type.clone(),
name: name.to_string(),
storage: self.storage.clone(),
subgraph_ttl,
private_queries,
private_id_key_name: private_id,
debug: self.debug,
supergraph_schema: self.supergraph_schema.clone(),
subgraph_enums: self.subgraph_enums.clone(),
lru_size_instrument: self.lru_size_instrument.clone(),
});
tower::util::BoxCloneService::new(inner)
} else {
ServiceBuilder::new()
.map_response(move |response: subgraph::Response| {
update_cache_control(
&response.context,
&CacheControl::new(response.response.headers(), subgraph_ttl.into())
.ok()
.unwrap_or_else(CacheControl::no_store),
);
response
})
.service(service)
.boxed_clone()
}
}
fn web_endpoints(&self) -> MultiMap<ListenAddr, Endpoint> {
let mut map = MultiMap::new();
// At least 1 subgraph enabled caching
let any_caching_enabled = self
.subgraphs
.subgraphs
.iter()
.any(|(subgraph_name, _cfg)| self.subgraph_enabled(subgraph_name))
|| self.subgraphs.all.enabled.unwrap_or_default();
let global_invalidation_enabled = self
.subgraphs
.all
.invalidation
.as_ref()
.map(|i| i.enabled)
.unwrap_or_default();
// If at least one subgraph is enabled and has invalidation enabled
let any_subgraph_invalidation_enabled =
self.subgraphs.subgraphs.iter().any(|(subgraph_name, cfg)| {
self.subgraph_enabled(subgraph_name)
&& cfg
.invalidation
.as_ref()
.map(|i| i.enabled)
.unwrap_or_default()
});
if self.enabled
&& any_caching_enabled
&& (global_invalidation_enabled || any_subgraph_invalidation_enabled)
{
match &self.endpoint_config {
Some(endpoint_config) => {
let endpoint = Endpoint::from_router_service(
endpoint_config.path.clone(),
InvalidationService::new(self.subgraphs.clone(), self.invalidation.clone())
.boxed_clone(),
);
tracing::info!(
"Response cache invalidation endpoint listening on: {}{}",
endpoint_config.listen,
endpoint_config.path
);
map.insert(endpoint_config.listen.clone(), endpoint);
}
None => {
tracing::warn!(
"Cannot start response cache invalidation endpoint because the listen address and endpoint is not configured"
);
}
}
}
map
}
}
#[cfg(all(
test,
any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux"))
))]
pub(super) const INVALIDATION_SHARED_KEY: &str = "supersecret";
impl ResponseCache {
#[cfg(all(
test,
any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux"))
))]
pub(crate) async fn for_test(
storage: Storage,
subgraphs: SubgraphConfiguration<Subgraph>,
supergraph_schema: Arc<Valid<Schema>>,
truncate_namespace: bool,
drop_tx: broadcast::Sender<()>,
) -> Result<Self, BoxError>
where
Self: Sized,
{
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
if truncate_namespace {
storage.truncate_namespace().await?;
}
let storage = Arc::new(StorageInterface {
all: Some(Arc::new(storage.into())),
subgraphs: HashMap::new(),
});
let invalidation = Invalidation::new(storage.clone()).await?;
Ok(Self {
storage,
entity_type: None,
enabled: true,
debug: true,
subgraphs: Arc::new(subgraphs),
private_queries: Arc::new(RwLock::new(LruCache::new(DEFAULT_LRU_PRIVATE_QUERIES_SIZE))),
endpoint_config: Some(Arc::new(InvalidationEndpointConfig {
path: String::from("/invalidation"),
listen: ListenAddr::SocketAddr(SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
4000,
)),
})),
invalidation,
subgraph_enums: Arc::new(get_subgraph_enums(&supergraph_schema)),
supergraph_schema,
lru_size_instrument: LruSizeInstrument::new(LRU_PRIVATE_QUERIES_INSTRUMENT_NAME),
drop_tx,
})
}
#[cfg(all(
test,
any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux"))
))]
/// Use this method when you want to test ResponseCache without database available
pub(crate) async fn without_storage_for_failure_mode(
subgraphs: HashMap<String, Subgraph>,
supergraph_schema: Arc<Valid<Schema>>,
) -> Result<Self, BoxError>
where
Self: Sized,
{
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
let storage = Arc::new(StorageInterface {
all: Some(Default::default()),
subgraphs: HashMap::new(),
});
let invalidation = Invalidation::new(storage.clone()).await?;
let (drop_tx, _drop_rx) = broadcast::channel(2);
Ok(Self {
storage,
entity_type: None,
enabled: true,
debug: true,
subgraphs: Arc::new(SubgraphConfiguration {
all: Subgraph {
invalidation: Some(SubgraphInvalidationConfig {
enabled: true,
shared_key: INVALIDATION_SHARED_KEY.to_string(),
}),
..Default::default()
},
subgraphs,
}),
private_queries: Arc::new(RwLock::new(LruCache::new(DEFAULT_LRU_PRIVATE_QUERIES_SIZE))),
endpoint_config: Some(Arc::new(InvalidationEndpointConfig {
path: String::from("/invalidation"),
listen: ListenAddr::SocketAddr(SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
4000,
)),
})),
invalidation,
subgraph_enums: Arc::new(get_subgraph_enums(&supergraph_schema)),
supergraph_schema,
lru_size_instrument: LruSizeInstrument::new(LRU_PRIVATE_QUERIES_INSTRUMENT_NAME),
drop_tx,
})
}
/// Returns boolean to know if cache is enabled for this subgraph
fn subgraph_enabled(&self, subgraph_name: &str) -> bool {
Self::static_subgraph_enabled(self.enabled, &self.subgraphs, subgraph_name)
}
/// Static method which returns boolean to know if cache is enabled for this subgraph
fn static_subgraph_enabled(
plugin_enabled: bool,
subgraph_config: &SubgraphConfiguration<Subgraph>,
subgraph_name: &str,
) -> bool {
if !plugin_enabled {
return false;
}
match (
subgraph_config.all.enabled,
subgraph_config.get(subgraph_name).enabled,
) {
(_, Some(x)) => x, // explicit per-subgraph setting overrides the `all` default
(Some(true) | None, None) => true, // unset defaults to true
(Some(false), None) => false,
}
}
// Returns the configured ttl for this subgraph
fn subgraph_ttl(&self, subgraph_name: &str) -> Option<Duration> {
self.subgraphs
.get(subgraph_name)
.ttl
.clone()
.map(|t| t.0)
.or_else(|| self.subgraphs.all.ttl.clone().map(|ttl| ttl.0))
}
}
impl Drop for ResponseCache {
fn drop(&mut self) {
let _ = self.drop_tx.send(());
}
}
/// Get the map of subgraph enum variant mapped with subgraph name
fn get_subgraph_enums(supergraph_schema: &Valid<Schema>) -> HashMap<String, String> {
let mut subgraph_enums = HashMap::new();
if let Some(graph_enum) = supergraph_schema.get_enum("join__Graph") {
subgraph_enums.extend(graph_enum.values.iter().filter_map(
|(enum_name, enum_value_def)| {
let subgraph_name = enum_value_def
.directives
.get("join__graph")?
.specified_argument_by_name("name")?
.as_str()?
.to_string();
Some((enum_name.to_string(), subgraph_name))
},
));
}
subgraph_enums
}
#[derive(Clone)]
struct CacheService {
service: subgraph::BoxCloneService,
name: String,
entity_type: Option<String>,
storage: Arc<StorageInterface>,
subgraph_ttl: Duration,
private_queries: Arc<RwLock<LruCache<PrivateQueryKey, ()>>>,
private_id_key_name: Option<String>,
debug: bool,
supergraph_schema: Arc<Valid<Schema>>,
subgraph_enums: Arc<HashMap<String, String>>,
lru_size_instrument: LruSizeInstrument,
}
impl Service<subgraph::Request> for CacheService {
type Response = subgraph::Response;
type Error = BoxError;
type Future = <subgraph::BoxCloneService as Service<subgraph::Request>>::Future;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, request: subgraph::Request) -> Self::Future {
let clone = self.clone();
let inner = std::mem::replace(self, clone);
Box::pin(inner.call_inner(request))
}
}
impl CacheService {
async fn call_inner(
mut self,
request: subgraph::Request,
) -> Result<subgraph::Response, BoxError> {
let storage = match self
.storage
.get(&self.name)
.ok_or(storage::Error::NoStorage)
{
Ok(storage) => storage.clone(),
Err(err) => {
record_fetch_error(&err, &self.name);
return self
.service
.map_response(move |response: subgraph::Response| {
update_cache_control(
&response.context,
&CacheControl::new(response.response.headers(), None)
.ok()
.unwrap_or_else(CacheControl::no_store),
);
response
})
.call(request)
.await;
}
};
self.debug = self.debug
&& (request
.supergraph_request
.headers()
.get(CACHE_DEBUG_HEADER_NAME)
== Some(&HeaderValue::from_static("true")));
// Check if the request is part of a batch. If it is, completely bypass response caching since it
// will break any request batches which this request is part of.
// This check is what enables Batching and response caching to work together, so be very careful
// before making any changes to it.
if request.is_part_of_batch() {
return self.service.call(request).await;
}
// [RFC 9111](https://datatracker.ietf.org/doc/html/rfc9111):
// * no-store: allows serving response from cache, but prohibits storing response in cache
// * no-cache: prohibits serving response from cache, but allows storing response in cache
//
// NB: no-cache actually prohibits serving response from cache _without revalidation_, but
// in the router this is the same thing
let cache_control = if request
.subgraph_request
.headers()
.contains_key(&CACHE_CONTROL)
{
let cache_control = match CacheControl::new(request.subgraph_request.headers(), None) {
Ok(cache_control) => cache_control,
Err(err) => {
return Ok(subgraph::Response::builder()
.subgraph_name(request.subgraph_name)
.id(request.id)
.context(request.context)
.error(
graphql::Error::builder()
.message(format!("cannot get cache-control header: {err}"))
.extension_code("INVALID_CACHE_CONTROL_HEADER")
.build(),
)
.extensions(Object::default())
.build());
}
};
// Don't use cache at all if both no-store and no-cache are set in cache-control header
if cache_control.is_no_cache() && cache_control.is_no_store() {
let mut resp = self.service.call(request).await?;
cache_control.to_headers(resp.response.headers_mut())?;
return Ok(resp);
}
Some(cache_control)
} else {
None
};
let private_id = self.get_private_id(&request.context);
// Knowing if there's a private_id or not will differentiate the hash because for a same query it can be both public and private depending if we have private_id set or not
let private_query_key = PrivateQueryKey {
query_hash: hash_query(&request.query_hash),
has_private_id: private_id.is_some(),
};
let is_known_private = {
self.private_queries
.read()
.await
.contains(&private_query_key)
};
let is_entity = request
.subgraph_request
.body()
.variables
.contains_key(REPRESENTATIONS);
// the response will have a private scope but we don't have a way to differentiate users, so
// we know we will not get or store anything in the cache
if is_known_private && private_id.is_none() {
self.call_service_for_private_query_without_id(request, is_entity)
.await
} else if is_entity {
self.call_service_for_entities_query(
request,
storage,
is_known_private,
private_id,
private_query_key,
cache_control,
)
.await
} else {
self.call_service_for_root_fields_operation(
request,
storage,
is_known_private,
private_id,
private_query_key,
cache_control,
)
.await
}
}
async fn call_service_for_private_query_without_id(
mut self,
request: subgraph::Request,
is_entity: bool,
) -> Result<subgraph::Response, BoxError> {
let mut debug_subgraph_request = None;
let mut root_operation_fields = Vec::new();
if self.debug {
root_operation_fields = request.root_operation_fields();
debug_subgraph_request = Some(request.subgraph_request.body().clone());
}
let resp = self.service.call(request).await?;
if self.debug {
let cache_control =
CacheControl::new(resp.response.headers(), self.subgraph_ttl.into())?;
let kind = if is_entity {
CacheEntryKind::Entity {
typename: "".to_string(),
entity_key: Default::default(),
}
} else {
CacheEntryKind::RootFields {
root_fields: root_operation_fields,
}
};
let cache_key_context = CacheKeyContext {
key: "-".to_string(),
invalidation_keys: vec![],
kind,
hashed_private_id: None,
subgraph_name: self.name.clone(),
subgraph_request: debug_subgraph_request.unwrap_or_default(),
source: CacheKeySource::Subgraph,
cache_control,
data: serde_json_bytes::to_value(resp.response.body().clone()).unwrap_or_default(),
warnings: Vec::new(),
should_store: false,
}
.update_metadata();
add_cache_key_to_context(&resp.context, cache_key_context)?;
}
Ok(resp)
}
async fn call_service_for_root_fields_operation(
mut self,
request: subgraph::Request,
storage: Storage,
is_known_private: bool,
private_id: Option<String>,
private_query_key: PrivateQueryKey,
request_cache_control: Option<CacheControl>,
) -> Result<subgraph::Response, BoxError> {
// Skip cache entirely if this is a root fields operation that isn't a query
if request.operation_kind != OperationKind::Query {
return self.service.call(request).await;
}
let mut cache_hit: HashMap<String, CacheHitMiss> = HashMap::new();
match cache_lookup_root(
self.name.clone(),
self.entity_type.as_deref(),
storage.clone(),
is_known_private,
private_id.as_deref(),
self.debug,
request,
self.supergraph_schema.clone(),
&self.subgraph_enums,
request_cache_control.as_ref(),
)
.instrument(tracing::info_span!(
"response_cache.lookup",
kind = "root",
subgraph.name = self.name.clone(),
"graphql.type" = self.entity_type.as_deref().unwrap_or_default(),
debug = self.debug,
private = is_known_private,
contains_private_id = private_id.is_some(),
"cache.key" = ::tracing::field::Empty,
))
.await?
{
ControlFlow::Break(response) => {
cache_hit.insert("Query".to_string(), CacheHitMiss { hit: 1, miss: 0 });
let _ = response.context.insert(
CacheMetricContextKey::new(response.subgraph_name.clone()),
CacheSubgraph(cache_hit),
);
Ok(response)
}
ControlFlow::Continue((request, mut root_cache_key, mut invalidation_keys)) => {
cache_hit.insert("Query".to_string(), CacheHitMiss { hit: 0, miss: 1 });
let _ = request.context.insert(
CacheMetricContextKey::new(request.subgraph_name.clone()),
CacheSubgraph(cache_hit),
);
// stash a few pieces of the request to use for debugging later
let mut root_operation_fields: Vec<String> = Vec::new();
let mut debug_subgraph_request = None;
if self.debug {
root_operation_fields = request.root_operation_fields();
debug_subgraph_request = Some(request.subgraph_request.body().clone());
}
let response = self.service.call(request).await?;
let mut cache_control =
response.subgraph_cache_control(self.subgraph_ttl.into())?;
// Support cache tags coming from subgraph response extensions
if let Some(Value::Array(cache_tags)) =
response.get_from_extensions(GRAPHQL_RESPONSE_EXTENSION_ROOT_FIELDS_CACHE_TAGS)
{