|
1 | 1 | //! Entrypoint for InfluxDB 3 Core Server
|
2 | 2 |
|
3 | 3 | use anyhow::{Context, bail};
|
4 |
| -use datafusion_util::config::register_iox_object_store; |
5 | 4 | use futures::{FutureExt, future::FusedFuture, pin_mut};
|
6 | 5 | use influxdb3_authz::TokenAuthenticator;
|
7 | 6 | use influxdb3_cache::{
|
@@ -546,6 +545,20 @@ pub async fn command(config: Config) -> Result<()> {
|
546 | 545 | let f = SendPanicsToTracing::new_with_metrics(&metrics);
|
547 | 546 | std::mem::forget(f);
|
548 | 547 |
|
| 548 | + // When you have extra executor, you need separate metrics registry! It is not clear what |
| 549 | + // the impact would be |
| 550 | + // TODO: confirm this is not going to mess up downstream metrics consumers |
| 551 | + let write_path_metrics = setup_metric_registry(); |
| 552 | + |
| 553 | + // Install custom panic handler and forget about it. |
| 554 | + // |
| 555 | + // This leaks the handler and prevents it from ever being dropped during the |
| 556 | + // lifetime of the program - this is actually a good thing, as it prevents |
| 557 | + // the panic handler from being removed while unwinding a panic (which in |
| 558 | + // turn, causes a panic - see #548) |
| 559 | + let write_path_panic_handler_fn = SendPanicsToTracing::new_with_metrics(&write_path_metrics); |
| 560 | + std::mem::forget(write_path_panic_handler_fn); |
| 561 | + |
549 | 562 | // Construct a token to trigger clean shutdown
|
550 | 563 | let frontend_shutdown = CancellationToken::new();
|
551 | 564 | let shutdown_manager = ShutdownManager::new(frontend_shutdown.clone());
|
@@ -619,8 +632,36 @@ pub async fn command(config: Config) -> Result<()> {
|
619 | 632 | Arc::clone(&metrics),
|
620 | 633 | ),
|
621 | 634 | ));
|
622 |
| - let runtime_env = exec.new_context().inner().runtime_env(); |
623 |
| - register_iox_object_store(runtime_env, parquet_store.id(), Arc::clone(&object_store)); |
| 635 | + |
| 636 | + // Note: using same metrics registry causes runtime panic. |
| 637 | + let write_path_executor = Arc::new(Executor::new_with_config_and_executor( |
| 638 | + ExecutorConfig { |
| 639 | + // should this be divided? or should this contend for threads with executor that's |
| 640 | + // setup for querying only |
| 641 | + target_query_partitions: tokio_datafusion_config.num_threads.unwrap(), |
| 642 | + object_stores: [&parquet_store] |
| 643 | + .into_iter() |
| 644 | + .map(|store| (store.id(), Arc::clone(store.object_store()))) |
| 645 | + .collect(), |
| 646 | + metric_registry: Arc::clone(&write_path_metrics), |
| 647 | + // use as much memory for persistence, can this be UnboundedMemoryPool? |
| 648 | + mem_pool_size: usize::MAX, |
| 649 | + // These are new additions, just skimming through the code it does not look like we can |
| 650 | + // achieve the same effect as having a separate executor. It looks like it's for "all" |
| 651 | + // queries, it'd be nice to have a filter to say when the query matches this pattern |
| 652 | + // apply these limits. If that's possible maybe we could avoid creating a separate |
| 653 | + // executor. |
| 654 | + per_query_mem_pool_config: PerQueryMemoryPoolConfig::Disabled, |
| 655 | + heap_memory_limit: None, |
| 656 | + }, |
| 657 | + DedicatedExecutor::new( |
| 658 | + "datafusion_write_path", |
| 659 | + tokio_datafusion_config |
| 660 | + .builder() |
| 661 | + .map_err(Error::TokioRuntime)?, |
| 662 | + Arc::clone(&write_path_metrics), |
| 663 | + ), |
| 664 | + )); |
624 | 665 |
|
625 | 666 | let trace_header_parser = TraceHeaderParser::new()
|
626 | 667 | .with_jaeger_trace_context_header_name(
|
@@ -685,7 +726,7 @@ pub async fn command(config: Config) -> Result<()> {
|
685 | 726 | last_cache,
|
686 | 727 | distinct_cache,
|
687 | 728 | time_provider: Arc::<SystemProvider>::clone(&time_provider),
|
688 |
| - executor: Arc::clone(&exec), |
| 729 | + executor: Arc::clone(&write_path_executor), |
689 | 730 | wal_config,
|
690 | 731 | parquet_cache,
|
691 | 732 | metric_registry: Arc::clone(&metrics),
|
|
0 commit comments