-
Notifications
You must be signed in to change notification settings - Fork 129
Integration test for CachingSession
#1237
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
Bouncheck
wants to merge
2
commits into
scylladb:branch-hackathon
Choose a base branch
from
Bouncheck:caching-session-test
base: branch-hackathon
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
+223
−31
Open
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,193 @@ | ||
use std::sync::Arc; | ||
|
||
use crate::utils::test_with_3_node_cluster; | ||
use crate::utils::{setup_tracing, unique_keyspace_name, PerformDDL}; | ||
use scylla::batch::Batch; | ||
use scylla::batch::BatchType; | ||
use scylla::client::caching_session::CachingSession; | ||
use scylla_proxy::RequestOpcode; | ||
use scylla_proxy::RequestReaction; | ||
use scylla_proxy::RequestRule; | ||
use scylla_proxy::ShardAwareness; | ||
use scylla_proxy::{Condition, ProxyError, Reaction, RequestFrame, TargetShard, WorkerError}; | ||
use tokio::sync::mpsc; | ||
|
||
fn consume_current_feedbacks( | ||
rx: &mut mpsc::UnboundedReceiver<(RequestFrame, Option<TargetShard>)>, | ||
) -> usize { | ||
std::iter::from_fn(|| rx.try_recv().ok()).count() | ||
} | ||
|
||
#[tokio::test] | ||
#[cfg(not(scylla_cloud_tests))] | ||
async fn ensure_cache_is_used() { | ||
use scylla::client::execution_profile::ExecutionProfile; | ||
|
||
use crate::utils::SingleTargetLBP; | ||
|
||
setup_tracing(); | ||
let res = test_with_3_node_cluster( | ||
ShardAwareness::QueryNode, | ||
|proxy_uris, translation_map, mut running_proxy| async move { | ||
let session = scylla::client::session_builder::SessionBuilder::new() | ||
.known_node(proxy_uris[0].as_str()) | ||
.address_translator(Arc::new(translation_map)) | ||
.build() | ||
.await | ||
.unwrap(); | ||
|
||
let cluster_size: usize = 3; | ||
let (feedback_txs, mut feedback_rxs): (Vec<_>, Vec<_>) = (0..cluster_size) | ||
.map(|_| mpsc::unbounded_channel::<(RequestFrame, Option<TargetShard>)>()) | ||
.unzip(); | ||
for (i, tx) in feedback_txs.iter().cloned().enumerate() { | ||
running_proxy.running_nodes[i].change_request_rules(Some(vec![RequestRule( | ||
Condition::and( | ||
Condition::RequestOpcode(RequestOpcode::Prepare), | ||
Condition::not(Condition::ConnectionRegisteredAnyEvent), | ||
), | ||
RequestReaction::noop().with_feedback_when_performed(tx), | ||
)])); | ||
} | ||
|
||
let ks = unique_keyspace_name(); | ||
let rs = "{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}"; | ||
session | ||
.ddl(format!( | ||
"CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {}", | ||
ks, rs | ||
)) | ||
.await | ||
.unwrap(); | ||
session.use_keyspace(ks, false).await.unwrap(); | ||
session | ||
.ddl("CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))") | ||
.await | ||
.unwrap(); | ||
// Assumption: all nodes have the same number of shards | ||
let nr_shards = session | ||
.get_cluster_state() | ||
.get_nodes_info() | ||
.first() | ||
.expect("No nodes information available") | ||
.sharder() | ||
.map(|sharder| sharder.nr_shards.get() as usize) | ||
.unwrap_or_else(|| 1); // If there is no sharder, assume 1 shard. | ||
|
||
// Consume all feedbacks so far to ensure we will not count something unrelated. | ||
let _feedbacks = feedback_rxs | ||
.iter_mut() | ||
.map(consume_current_feedbacks) | ||
.sum::<usize>(); | ||
|
||
let caching_session: CachingSession = CachingSession::from(session, 100); | ||
|
||
let batch_size: usize = 4; | ||
let mut batch = Batch::new(BatchType::Logged); | ||
for i in 1..=batch_size { | ||
let insert_b_c = format!("INSERT INTO tab (a, b, c) VALUES ({}, ?, ?)", i); | ||
batch.append_statement(insert_b_c.as_str()); | ||
} | ||
let batch_values: Vec<(i32, i32)> = (1..=batch_size as i32).map(|i| (i, i)).collect(); | ||
|
||
// First batch that should generate prepares for each shard. | ||
caching_session | ||
.batch(&batch, batch_values.clone()) | ||
.await | ||
.unwrap(); | ||
let feedbacks: usize = feedback_rxs.iter_mut().map(consume_current_feedbacks).sum(); | ||
assert_eq!(feedbacks, batch_size * nr_shards * cluster_size); | ||
|
||
// Few extra runs. Those batches should not result in any prepares being sent. | ||
for _ in 0..4 { | ||
caching_session | ||
.batch(&batch, batch_values.clone()) | ||
.await | ||
.unwrap(); | ||
let feedbacks: usize = feedback_rxs.iter_mut().map(consume_current_feedbacks).sum(); | ||
assert_eq!(feedbacks, 0); | ||
} | ||
|
||
let prepared_batch_res_rows: Vec<(i32, i32, i32)> = caching_session | ||
.execute_unpaged("SELECT * FROM tab", &[]) | ||
.await | ||
.unwrap() | ||
.into_rows_result() | ||
.unwrap() | ||
.rows() | ||
.unwrap() | ||
.collect::<Result<_, _>>() | ||
.unwrap(); | ||
|
||
// Select should have been prepared on all shards | ||
let feedbacks: usize = feedback_rxs.iter_mut().map(consume_current_feedbacks).sum(); | ||
assert_eq!(feedbacks, nr_shards * cluster_size); | ||
|
||
// Verify the data from inserts | ||
let mut prepared_batch_res_rows = prepared_batch_res_rows; | ||
prepared_batch_res_rows.sort(); | ||
let expected_rows: Vec<(i32, i32, i32)> = | ||
(1..=batch_size as i32).map(|i| (i, i, i)).collect(); | ||
assert_eq!(prepared_batch_res_rows, expected_rows); | ||
|
||
// Run some alters to invalidate the server side cache, similarly to scylla/src/session_test.rs | ||
caching_session | ||
.ddl("ALTER TABLE tab RENAME c to tmp") | ||
.await | ||
.unwrap(); | ||
caching_session | ||
.ddl("ALTER TABLE tab RENAME b to c") | ||
.await | ||
.unwrap(); | ||
caching_session | ||
.ddl("ALTER TABLE tab RENAME tmp to b") | ||
.await | ||
.unwrap(); | ||
|
||
// execute_unpageds caused by alters likely resulted in some prepares being sent. | ||
// Consume those frames. | ||
feedback_rxs | ||
.iter_mut() | ||
.map(consume_current_feedbacks) | ||
.sum::<usize>(); | ||
|
||
// Run batch for each shard. The server cache should be updated on the first mismatch, | ||
// therefore only first contacted shard will request reprepare due to mismatch. | ||
for node_info in caching_session | ||
.get_session() | ||
.get_cluster_state() | ||
.get_nodes_info() | ||
.iter() | ||
{ | ||
for shard_id in 0..nr_shards { | ||
let policy = SingleTargetLBP { | ||
target: (node_info.clone(), Some(shard_id as u32)), | ||
}; | ||
let execution_profile = ExecutionProfile::builder() | ||
.load_balancing_policy(Arc::new(policy)) | ||
.build(); | ||
batch.set_execution_profile_handle(Some(execution_profile.into_handle())); | ||
caching_session | ||
.batch(&batch, batch_values.clone()) | ||
.await | ||
.unwrap(); | ||
let feedbacks: usize = | ||
feedback_rxs.iter_mut().map(consume_current_feedbacks).sum(); | ||
let expected_feedbacks = if shard_id == 0 { batch_size } else { 0 }; | ||
assert_eq!( | ||
feedbacks, expected_feedbacks, | ||
"Mismatch in feedbacks on execution for node: {:?}, shard: {}", | ||
node_info, shard_id | ||
); | ||
} | ||
} | ||
running_proxy | ||
}, | ||
) | ||
.await; | ||
match res { | ||
Ok(()) => (), | ||
Err(ProxyError::Worker(WorkerError::DriverDisconnected(_))) => (), | ||
Err(err) => panic!("{}", err), | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
mod authenticate; | ||
mod batch; | ||
mod caching_session; | ||
mod consistency; | ||
mod cql_collections; | ||
mod cql_types; | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't understand why I had to add this line here even though I use this struct in my test. Shouldn't it be not a dead_code because of that?