|
12 | 12 | #include "NullPool.h" |
13 | 13 | #include "SystemThreadPool.h" |
14 | 14 | #include "thread_specific_ptr.h" |
15 | | -#include "ThreadPool.h" |
16 | 15 | #include <sstream> |
17 | 16 | #include <stdexcept> |
18 | 17 |
|
@@ -58,8 +57,7 @@ CoreContext::CoreContext(const std::shared_ptr<CoreContext>& pParent, t_childLis |
58 | 57 | m_backReference(backReference), |
59 | 58 | m_sigilType(sigilType), |
60 | 59 | m_stateBlock(std::make_shared<CoreContextStateBlock>(pParent ? pParent->m_stateBlock : nullptr)), |
61 | | - m_junctionBoxManager(new JunctionBoxManager), |
62 | | - m_threadPool(std::make_shared<NullPool>()) |
| 60 | + m_junctionBoxManager(new JunctionBoxManager) |
63 | 61 | {} |
64 | 62 |
|
65 | 63 | CoreContext::~CoreContext(void) { |
@@ -448,48 +446,10 @@ void CoreContext::Initiate(void) { |
448 | 446 |
|
449 | 447 | // Now we can recover the first thread that will need to be started |
450 | 448 | auto beginning = m_threads.begin(); |
451 | | - |
452 | | - // Start our threads before starting any child contexts: |
453 | | - std::shared_ptr<ThreadPool> threadPool; |
454 | | - auto nullPool = std::dynamic_pointer_cast<NullPool>(m_threadPool); |
455 | | - if (nullPool) { |
456 | | - // Decide which pool will become our current thread pool. Global context is the final case, |
457 | | - // which defaults to the system thread pool |
458 | | - if (!nullPool->GetSuccessor()) |
459 | | - nullPool->SetSuccessor(m_pParent ? m_pParent->GetThreadPool() : SystemThreadPool::New()); |
460 | | - |
461 | | - // Trigger null pool destruction at this point: |
462 | | - m_threadPool = nullPool->MoveDispatchersToSuccessor(); |
463 | | - } |
464 | | - |
465 | | - // The default case should not generally occur, but if it were the case that the null pool were |
466 | | - // updated before the context was initiated, then we would have no work to do as no successors |
467 | | - // exist to be moved. In that case, simply take a record of the current thread pool for the |
468 | | - // call to Start that follows the unlock. |
469 | | - threadPool = m_threadPool; |
470 | 449 | lk.unlock(); |
471 | 450 | onInitiated(); |
472 | 451 | m_stateBlock->m_stateChanged.notify_all(); |
473 | 452 |
|
474 | | - // Start the thread pool out of the lock, and then update our start token if our thread pool |
475 | | - // reference has not changed. The next pool could potentially be nullptr if the parent is going |
476 | | - // down while we are going up. |
477 | | - if (threadPool) { |
478 | | - // Initiate |
479 | | - auto startToken = threadPool->Start(); |
480 | | - |
481 | | - // Transfer all dispatchers from the null pool to the new thread pool: |
482 | | - std::lock_guard<std::mutex> lk(m_stateBlock->m_lock); |
483 | | - |
484 | | - // If the thread pool was updated while we were trying to start the pool we observed earlier, |
485 | | - // then allow our token to expire and do not do any other work. Whomever caused the thread |
486 | | - // pool pointer to be updated would also have seen that the context is currently started, |
487 | | - // and would have updated both the thread pool pointer and the start token at the same time. |
488 | | - if (m_threadPool == threadPool) |
489 | | - // Swap, not assign; we don't want teardown to happen while synchronized |
490 | | - std::swap(m_startToken, startToken); |
491 | | - } |
492 | | - |
493 | 453 | if (beginning != m_threads.end()) { |
494 | 454 | auto outstanding = m_stateBlock->IncrementOutstandingThreadCount(shared_from_this()); |
495 | 455 | for (auto q = beginning; q != m_threads.end(); ++q) |
@@ -552,16 +512,13 @@ void CoreContext::SignalShutdown(bool wait, ShutdownMode shutdownMode) { |
552 | 512 |
|
553 | 513 | // Thread pool token and pool pointer |
554 | 514 | std::shared_ptr<void> startToken; |
555 | | - std::shared_ptr<ThreadPool> threadPool; |
556 | 515 |
|
557 | 516 | // Tear down all the children, evict thread pool: |
558 | 517 | { |
559 | 518 | std::lock_guard<std::mutex> lk(m_stateBlock->m_lock); |
560 | 519 |
|
561 | 520 | startToken = std::move(m_startToken); |
562 | 521 | m_startToken.reset(); |
563 | | - threadPool = std::move(m_threadPool); |
564 | | - m_threadPool.reset(); |
565 | 522 |
|
566 | 523 | // Fill strong lock series in order to ensure proper teardown interleave: |
567 | 524 | childrenInterleave.reserve(m_children.size()); |
@@ -717,48 +674,6 @@ void CoreContext::BuildCurrentState(void) { |
717 | 674 | } |
718 | 675 | } |
719 | 676 |
|
720 | | -void CoreContext::SetThreadPool(const std::shared_ptr<ThreadPool>& threadPool) { |
721 | | - if (!threadPool) |
722 | | - throw std::invalid_argument("A context cannot be given a null thread pool"); |
723 | | - |
724 | | - std::shared_ptr<ThreadPool> priorThreadPool; |
725 | | - { |
726 | | - std::lock_guard<std::mutex> lk(m_stateBlock->m_lock); |
727 | | - if (IsShutdown()) |
728 | | - // Nothing to do, context already down |
729 | | - return; |
730 | | - |
731 | | - if (!IsRunning()) { |
732 | | - // Just set up the forwarding thread pool |
733 | | - auto nullPool = std::dynamic_pointer_cast<NullPool>(m_threadPool); |
734 | | - if (!nullPool) |
735 | | - throw autowiring_error("Internal error, null pool was deassigned even though the context has not been started"); |
736 | | - priorThreadPool = nullPool->GetSuccessor(); |
737 | | - nullPool->SetSuccessor(threadPool); |
738 | | - return; |
739 | | - } |
740 | | - |
741 | | - priorThreadPool = m_threadPool; |
742 | | - m_threadPool = threadPool; |
743 | | - } |
744 | | - |
745 | | - // We are presently running. We need to start the pool, and then attempt to |
746 | | - // update our token |
747 | | - auto startToken = threadPool->Start(); |
748 | | - std::lock_guard<std::mutex> lk(m_stateBlock->m_lock); |
749 | | - if (m_threadPool != threadPool) |
750 | | - // Thread pool was updated by someone else, let them complete their operation |
751 | | - return; |
752 | | - |
753 | | - // Update our start token and return. Swap, not move; we don't want to risk |
754 | | - // calling destructors while synchronized. |
755 | | - std::swap(m_startToken, startToken); |
756 | | -} |
757 | | - |
758 | | -std::shared_ptr<ThreadPool> CoreContext::GetThreadPool(void) const { |
759 | | - return (std::lock_guard<std::mutex>)m_stateBlock->m_lock, m_threadPool; |
760 | | -} |
761 | | - |
762 | 677 | void CoreContext::Dump(std::ostream& os) const { |
763 | 678 | std::lock_guard<std::mutex> lk(m_stateBlock->m_lock); |
764 | 679 |
|
|
0 commit comments