diff --git a/lib/galaxy/managers/jobs.py b/lib/galaxy/managers/jobs.py index d274b4643e7c..61e723554411 100644 --- a/lib/galaxy/managers/jobs.py +++ b/lib/galaxy/managers/jobs.py @@ -12,6 +12,7 @@ cast, Optional, TYPE_CHECKING, + TypeVar, Union, ) @@ -118,7 +119,11 @@ from galaxy.work.context import WorkRequestContext if TYPE_CHECKING: - from sqlalchemy.sql.expression import Select + from sqlalchemy.sql.expression import ( + ColumnElement, + Label, + Select, + ) log = logging.getLogger(__name__) @@ -134,7 +139,7 @@ class JobLock(BaseModel): active: bool = Field(title="Job lock status", description="If active, jobs will not dispatch") -def get_path_key(path_tuple): +def get_path_key(path_tuple: tuple): path_key = "" tuple_elements = len(path_tuple) for i, p in enumerate(path_tuple): @@ -154,12 +159,15 @@ def get_path_key(path_tuple): def safe_label_or_none(label: str) -> Optional[str]: - if label and len(label) > 63: + if len(label) > 63: return None return label -def safe_aliased(model_class, name=None): +T = TypeVar("T") + + +def safe_aliased(model_class: type[T], name: str) -> type[T]: """Create an aliased model class with a unique name.""" return aliased(model_class, name=safe_label_or_none(name)) @@ -447,11 +455,11 @@ def by_tool_input( job_state: Optional[JobStatesT] = (Job.states.OK,), history_id: Union[int, None] = None, require_name_match: bool = True, - ): + ) -> Union[Job, None]: """Search for jobs producing same results using the 'inputs' part of a tool POST.""" - input_data = defaultdict(list) + input_data: dict[Any, list[dict[str, Any]]] = defaultdict(list) - def populate_input_data_input_id(path, key, value): + def populate_input_data_input_id(path: tuple, key, value) -> tuple[Any, Any]: """Traverses expanded incoming using remap and collects input_ids and input_data.""" if key == "id": path_key = get_path_key(path[:-2]) @@ -499,13 +507,13 @@ def __search( tool_id: str, tool_version: Optional[str], user: model.User, - input_data, + input_data: dict[Any, list[dict[str, Any]]], job_state: Optional[JobStatesT], param_dump: ToolStateDumpedToJsonInternalT, wildcard_param_dump=None, history_id: Union[int, None] = None, require_name_match: bool = True, - ): + ) -> Union[Job, None]: search_timer = ExecutionTimer() def replace_dataset_ids(path, key, value): @@ -525,7 +533,7 @@ def replace_dataset_ids(path, key, value): stmt = select(model.Job.id.label("job_id")) - data_conditions: list = [] + data_conditions: list[ColumnElement[bool]] = [] # We now build the stmt filters that relate to the input datasets # that this job uses. We keep track of the requested dataset id in `requested_ids`, @@ -533,7 +541,7 @@ def replace_dataset_ids(path, key, value): # and the ids that have been used in the job that has already been run in `used_ids`. requested_ids = [] data_types = [] - used_ids: list = [] + used_ids: list[Label[int]] = [] for k, input_list in input_data.items(): # k will be matched against the JobParameter.name column. This can be prefixed depending on whether # the input is in a repeat, or not (section and conditional) @@ -722,7 +730,7 @@ def _filter_jobs( return stmt - def _exclude_jobs_with_deleted_outputs(self, stmt): + def _exclude_jobs_with_deleted_outputs(self, stmt: "Select[tuple[int]]") -> "Select": subquery_alias = stmt.subquery("filtered_jobs_subquery") outer_select_columns = [subquery_alias.c[col.name] for col in stmt.selected_columns] outer_stmt = select(*outer_select_columns).select_from(subquery_alias) @@ -767,14 +775,14 @@ def _exclude_jobs_with_deleted_outputs(self, stmt): def _build_stmt_for_hda( self, stmt: "Select[tuple[int]]", - data_conditions: list, - used_ids: list, + data_conditions: list["ColumnElement[bool]"], + used_ids: list["Label[int]"], k, v, identifier, value_index: int, require_name_match: bool = True, - ): + ) -> "Select[tuple[int]]": a = aliased(model.JobToInputDatasetAssociation) b = aliased(model.HistoryDatasetAssociation) c = aliased(model.HistoryDatasetAssociation) @@ -831,7 +839,15 @@ def _build_stmt_for_hda( ) return stmt - def _build_stmt_for_ldda(self, stmt, data_conditions, used_ids, k, v, value_index): + def _build_stmt_for_ldda( + self, + stmt: "Select[tuple[int]]", + data_conditions: list["ColumnElement[bool]"], + used_ids: list["Label[int]"], + k, + v, + value_index: int, + ) -> "Select[tuple[int]]": a = aliased(model.JobToInputLibraryDatasetAssociation) label = safe_label_or_none(f"{k}_{value_index}") labeled_col = a.ldda_id.label(label) @@ -848,8 +864,15 @@ def agg_expression(self, column): return func.array_agg(column, order_by=column) def _build_stmt_for_hdca( - self, stmt, data_conditions, used_ids, k, v, user_id, value_index, require_name_match=True - ): + self, + stmt: "Select[tuple[int]]", + data_conditions: list["ColumnElement[bool]"], + used_ids: list["Label[int]"], + k, + v, + user_id: int, + value_index: int, + ) -> "Select[tuple[int]]": # Strategy for efficiently finding equivalent HDCAs: # 1. Determine the structural depth of the target HDCA by its collection_type. # 2. For the target HDCA (identified by 'v'): @@ -1088,7 +1111,16 @@ def _build_stmt_for_hdca( data_conditions.append(a.name == k) return stmt - def _build_stmt_for_dce(self, stmt, data_conditions, used_ids, k, v, user_id, value_index): + def _build_stmt_for_dce( + self, + stmt: "Select[tuple[int]]", + data_conditions: list["ColumnElement[bool]"], + used_ids: list["Label[int]"], + k, + v, + user_id: int, + value_index: int, + ) -> "Select[tuple[int]]": dce_root_target = self.sa_session.get_one(model.DatasetCollectionElement, v) # Determine if the target DCE points to an HDA or a child collection diff --git a/lib/galaxy_test/api/test_tool_execute.py b/lib/galaxy_test/api/test_tool_execute.py index de8ac3bcca61..59018a376680 100644 --- a/lib/galaxy_test/api/test_tool_execute.py +++ b/lib/galaxy_test/api/test_tool_execute.py @@ -47,38 +47,38 @@ def test_multidata_param( } ) ) - execution = required_tool.execute.with_inputs(inputs) + execution = required_tool.execute().with_inputs(inputs) execution.assert_has_job(0).with_output("out1").with_contents("1\t2\t3\n4\t5\t6\n") execution.assert_has_job(0).with_output("out2").with_contents("4\t5\t6\n1\t2\t3\n") @requires_tool_id("expression_forty_two") def test_galaxy_expression_tool_simplest(required_tool: RequiredTool): - required_tool.execute.assert_has_single_job.with_single_output.with_contents("42") + required_tool.execute().assert_has_single_job.with_single_output.with_contents("42") @requires_tool_id("expression_parse_int") def test_galaxy_expression_tool_simple(required_tool: RequiredTool): - execution = required_tool.execute.with_inputs({"input1": "7"}) + execution = required_tool.execute().with_inputs({"input1": "7"}) execution.assert_has_single_job.with_single_output.with_contents("7") @requires_tool_id("expression_log_line_count") def test_galaxy_expression_metadata(target_history: TargetHistory, required_tool: RequiredTool): hda1 = target_history.with_dataset("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14").src_dict - execution = required_tool.execute.with_inputs({"input1": hda1}) + execution = required_tool.execute().with_inputs({"input1": hda1}) execution.assert_has_single_job.with_single_output.with_contents("3") @requires_tool_id("multi_select") def test_multi_select_as_list(required_tool: RequiredTool): - execution = required_tool.execute.with_inputs({"select_ex": ["--ex1", "ex2"]}) + execution = required_tool.execute().with_inputs({"select_ex": ["--ex1", "ex2"]}) execution.assert_has_single_job.with_output("output").with_contents("--ex1,ex2") @requires_tool_id("multi_select") def test_multi_select_optional(required_tool: RequiredTool): - execution = required_tool.execute.with_inputs( + execution = required_tool.execute().with_inputs( { "select_ex": ["--ex1"], "select_optional": None, @@ -92,14 +92,14 @@ def test_multi_select_optional(required_tool: RequiredTool): @requires_tool_id("identifier_single") def test_identifier_outside_map(target_history: TargetHistory, required_tool: RequiredTool): hda = target_history.with_dataset("123", named="Plain HDA") - execute = required_tool.execute.with_inputs({"input1": hda.src_dict}) + execute = required_tool.execute().with_inputs({"input1": hda.src_dict}) execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("Plain HDA") @requires_tool_id("identifier_multiple") def test_identifier_in_multiple_reduce(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_pair() - execute = required_tool.execute.with_inputs({"input1": hdca.src_dict}) + execute = required_tool.execute().with_inputs({"input1": hdca.src_dict}) execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") @@ -120,7 +120,7 @@ def test_identifier_map_over_multiple_input_in_conditional( }, } ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") @@ -138,7 +138,7 @@ def test_identifier_multiple_reduce_in_repeat( "the_repeat_0|the_data|input1": hdca.src_dict, } ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") @@ -172,7 +172,7 @@ def test_map_over_with_output_format_actions( } ) ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) expected_extension = "txt" if (use_action == "do") else "data" execute.assert_has_job(0).with_single_output.with_file_ext(expected_extension) @@ -182,7 +182,7 @@ def test_map_over_with_output_format_actions( @requires_tool_id("output_action_change_format_paired") def test_map_over_with_nested_paired_output_format_actions(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_example_list_of_pairs() - execute = required_tool.execute.with_inputs( + execute = required_tool.execute().with_inputs( {"input": {"batch": True, "values": [dict(map_over_type="paired", **hdca.src_dict)]}} ) execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) @@ -194,7 +194,7 @@ def test_map_over_with_nested_paired_output_format_actions(target_history: Targe def test_identifier_with_data_collection(target_history: TargetHistory, required_tool: RequiredTool): contents = [("foo", "text for foo element"), ("bar", "more text for bar element")] hdca = target_history.with_list(contents) - execute = required_tool.execute.with_inputs({"input1": hdca.src_dict}) + execute = required_tool.execute().with_inputs({"input1": hdca.src_dict}) execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("foo\nbar") @@ -203,7 +203,7 @@ def test_identifier_in_actions(target_history: TargetHistory, required_tool: Req contents = [("foo", "text for foo element"), ("bar", "more text for bar element")] hdca = target_history.with_list(contents) - execute = required_tool.execute.with_inputs({"input": {"batch": True, "values": [hdca.src_dict]}}) + execute = required_tool.execute().with_inputs({"input": {"batch": True, "values": [hdca.src_dict]}}) output = execute.assert_has_job(0).assert_has_single_output assert output.details["metadata_column_names"][1] == "foo", output.details @@ -215,7 +215,7 @@ def test_identifier_in_actions(target_history: TargetHistory, required_tool: Req @requires_tool_id("identifier_single_in_repeat") def test_identifier_single_in_repeat(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_pair() - execute = required_tool.execute.with_inputs( + execute = required_tool.execute().with_inputs( {"the_repeat_0|the_data|input1": {"batch": True, "values": [hdca.src_dict]}} ) execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) @@ -227,7 +227,7 @@ def test_identifier_single_in_repeat(target_history: TargetHistory, required_too @requires_tool_id("identifier_multiple_in_conditional") def test_identifier_multiple_in_conditional(target_history: TargetHistory, required_tool: RequiredTool): hda = target_history.with_dataset("123", named="Normal HDA1") - execute = required_tool.execute.with_inputs( + execute = required_tool.execute().with_inputs( { "outer_cond|inner_cond|input1": hda.src_dict, } @@ -239,7 +239,7 @@ def test_identifier_multiple_in_conditional(target_history: TargetHistory, requi def test_identifier_with_multiple_normal_datasets(target_history: TargetHistory, required_tool: RequiredTool): hda1 = target_history.with_dataset("123", named="Normal HDA1") hda2 = target_history.with_dataset("456", named="Normal HDA2") - execute = required_tool.execute.with_inputs({"input1": [hda1.src_dict, hda2.src_dict]}) + execute = required_tool.execute().with_inputs({"input1": [hda1.src_dict, hda2.src_dict]}) execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("Normal HDA1\nNormal HDA2") @@ -249,7 +249,7 @@ def test_map_over_empty_collection(target_history: TargetHistory, required_tool: inputs = { "input1": {"batch": True, "values": [hdca.src_dict]}, } - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_n_jobs(0) name = execute.assert_creates_implicit_collection(0).details["name"] assert "Concatenate datasets" in name @@ -304,7 +304,7 @@ def test_multi_run_in_repeat( } ) ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) _check_multi_run_in_repeat(execute) @@ -343,7 +343,7 @@ def test_multi_run_in_repeat_mismatch( } ) ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) _check_multi_run_in_repeat(execute) @@ -400,7 +400,7 @@ def test_multirun_on_multiple_inputs( } ) ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_n_jobs(2) execute.assert_has_job(0).with_single_output.with_contents_stripped("123\n789") execute.assert_has_job(1).with_single_output.with_contents_stripped("456\n0ab") @@ -446,7 +446,7 @@ def test_multirun_on_multiple_inputs_unlinked( } ) ) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_n_jobs(4) execute.assert_has_job(0).with_single_output.with_contents_stripped("123\n789") execute.assert_has_job(1).with_single_output.with_contents_stripped("123\n0ab") @@ -462,7 +462,7 @@ def test_map_over_collection( legacy = {"input1": {"batch": True, "values": [hdca.src_dict]}} request = {"input1": {"__class__": "Batch", "values": [hdca.src_dict]}} inputs = tool_input_format.when.flat(legacy).when.nested(legacy).when.request(request) - execute = required_tool.execute.with_inputs(inputs) + execute = required_tool.execute().with_inputs(inputs) execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) output_collection = execute.assert_creates_implicit_collection(0) output_collection.assert_has_dataset_element("forward").with_contents_stripped("123") @@ -472,7 +472,7 @@ def test_map_over_collection( @requires_tool_id("cat|cat1") def test_map_over_data_with_paired_or_unpaired_unpaired(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_unpaired() - execute = required_tool.execute.with_inputs({"input1": {"batch": True, "values": [hdca.src_dict]}}) + execute = required_tool.execute().with_inputs({"input1": {"batch": True, "values": [hdca.src_dict]}}) execute.assert_has_n_jobs(1).assert_creates_n_implicit_collections(1) output_collection = execute.assert_creates_implicit_collection(0) output_collection.assert_collection_type_is("paired_or_unpaired") @@ -482,7 +482,7 @@ def test_map_over_data_with_paired_or_unpaired_unpaired(target_history: TargetHi @requires_tool_id("cat|cat1") def test_map_over_data_with_paired_or_unpaired_paired(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_paired_or_unpaired_pair() - execute = required_tool.execute.with_inputs({"input1": {"batch": True, "values": [hdca.src_dict]}}) + execute = required_tool.execute().with_inputs({"input1": {"batch": True, "values": [hdca.src_dict]}}) execute.assert_creates_n_implicit_collections(1) output_collection = execute.assert_creates_implicit_collection(0) output_collection.assert_collection_type_is("paired_or_unpaired") @@ -493,7 +493,7 @@ def test_map_over_data_with_paired_or_unpaired_paired(target_history: TargetHist @requires_tool_id("cat|cat1") def test_map_over_data_with_list_paired_or_unpaired(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_list_of_paired_and_unpaired() - execute = required_tool.execute.with_inputs({"input1": {"batch": True, "values": [hdca.src_dict]}}) + execute = required_tool.execute().with_inputs({"input1": {"batch": True, "values": [hdca.src_dict]}}) execute.assert_creates_n_implicit_collections(1) output_collection = execute.assert_creates_implicit_collection(0) output_collection.assert_collection_type_is("list:paired_or_unpaired") @@ -504,7 +504,7 @@ def test_map_over_data_with_list_paired_or_unpaired(target_history: TargetHistor @requires_tool_id("collection_paired_or_unpaired") def test_map_over_paired_or_unpaired_with_list_paired(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_example_list_of_pairs() - execute = required_tool.execute.with_inputs( + execute = required_tool.execute().with_inputs( {"f1": {"batch": True, "values": [{"map_over_type": "paired", **hdca.src_dict}]}} ) execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) @@ -517,7 +517,7 @@ def test_map_over_paired_or_unpaired_with_list_paired(target_history: TargetHist def test_map_over_paired_or_unpaired_with_list(target_history: TargetHistory, required_tool: RequiredTool): contents = [("foo", "text for foo element")] hdca = target_history.with_list(contents) - execute = required_tool.execute.with_inputs( + execute = required_tool.execute().with_inputs( {"f1": {"batch": True, "values": [{"map_over_type": "single_datasets", **hdca.src_dict}]}} ) execute.assert_has_n_jobs(1).assert_creates_n_implicit_collections(1) @@ -528,7 +528,7 @@ def test_map_over_paired_or_unpaired_with_list(target_history: TargetHistory, re @requires_tool_id("collection_paired_or_unpaired") def test_map_over_paired_or_unpaired_with_list_of_lists(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_example_list_of_lists() - execute = required_tool.execute.with_inputs( + execute = required_tool.execute().with_inputs( {"f1": {"batch": True, "values": [{"map_over_type": "single_datasets", **hdca.src_dict}]}} ) execute.assert_has_n_jobs(3).assert_creates_n_implicit_collections(1) @@ -542,7 +542,7 @@ def test_map_over_paired_or_unpaired_with_list_of_lists(target_history: TargetHi @requires_tool_id("collection_paired_or_unpaired") def test_adapting_dataset_to_paired_or_unpaired(target_history: TargetHistory, required_tool: RequiredTool): hda1 = target_history.with_dataset("1\t2\t3").src_dict - execution = required_tool.execute.with_inputs( + execution = required_tool.execute().with_inputs( { "f1": { "src": "CollectionAdapter", @@ -558,7 +558,7 @@ def test_adapting_dataset_to_paired_or_unpaired(target_history: TargetHistory, r @requires_tool_id("cat_collection") def test_adapting_dataset_to_list(target_history: TargetHistory, required_tool: RequiredTool): hda1 = target_history.with_dataset("1\t2\t3").src_dict - execution = required_tool.execute.with_inputs( + execution = required_tool.execute().with_inputs( { "input1": { "src": "CollectionAdapter", @@ -575,7 +575,7 @@ def test_adapting_dataset_to_list(target_history: TargetHistory, required_tool: def test_adapting_two_datasets_to_paired_collection(target_history: TargetHistory, required_tool: RequiredTool): hda1 = target_history.with_dataset("1\t2\t3").src_dict hda2 = target_history.with_dataset("4\t5\t6").src_dict - execution = required_tool.execute.with_inputs( + execution = required_tool.execute().with_inputs( { "f1": { "src": "CollectionAdapter", @@ -594,7 +594,7 @@ def test_adapting_two_datasets_to_paired_collection(target_history: TargetHistor @requires_tool_id("gx_data") def test_map_over_data_param_with_list_of_lists(target_history: TargetHistory, required_tool: RequiredTool): hdca = target_history.with_example_list_of_lists() - execute = required_tool.execute.with_inputs({"parameter": {"batch": True, "values": [hdca.src_dict]}}) + execute = required_tool.execute().with_inputs({"parameter": {"batch": True, "values": [hdca.src_dict]}}) execute.assert_has_n_jobs(3).assert_creates_n_implicit_collections(1) execute.assert_creates_implicit_collection(0) @@ -604,7 +604,7 @@ def test_optional_repeats_with_mins_filled_id(target_history: TargetHistory, req # we have a tool test for this but I wanted to verify it wasn't just the # tool test framework filling in a default. Creating a raw request here # verifies that currently select parameters don't require a selection. - required_tool.execute.assert_has_single_job.with_single_output.containing("false").containing("length: 2") + required_tool.execute().assert_has_single_job.with_single_output.containing("false").containing("length: 2") @requires_tool_id("gx_select") @@ -612,7 +612,7 @@ def test_optional_repeats_with_mins_filled_id(target_history: TargetHistory, req def test_select_first_by_default(required_tools: list[RequiredTool], tool_input_format: DescribeToolInputs): empty = tool_input_format.when.any({}) for required_tool in required_tools: - required_tool.execute.with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( + required_tool.execute().with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( "--ex1" ) @@ -626,7 +626,7 @@ def test_select_on_null_errors(required_tools: list[RequiredTool], tool_input_fo # is passed, an error (rightfully) occurs. This test verifies that. null_parameter = tool_input_format.when.any({"parameter": None}) for required_tool in required_tools: - fails = required_tool.execute.with_inputs(null_parameter).assert_fails + fails = required_tool.execute().with_inputs(null_parameter).assert_fails if tool_input_format.is_request: fails.with_error_containing("Input should be") else: @@ -641,7 +641,7 @@ def test_select_empty_causes_error_regardless( # despite selects otherwise selecting defaults - nothing can be done if the select option list is empty empty = tool_input_format.when.any({}) for required_tool in required_tools: - failure = required_tool.execute.with_inputs(empty).assert_fails + failure = required_tool.execute().with_inputs(empty).assert_fails if tool_input_format.is_request: failure.with_error_containing("validation error") else: @@ -656,10 +656,10 @@ def test_select_optional_null_by_default(required_tools: list[RequiredTool], too empty = tool_input_format.when.any({}) null_parameter = tool_input_format.when.any({"parameter": None}) for required_tool in required_tools: - required_tool.execute.with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( + required_tool.execute().with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( "None" ) - required_tool.execute.with_inputs(null_parameter).assert_has_single_job.with_output( + required_tool.execute().with_inputs(null_parameter).assert_has_single_job.with_output( "output" ).with_contents_stripped("None") @@ -673,10 +673,10 @@ def test_select_multiple_does_not_select_first_by_default( empty = tool_input_format.when.any({}) null_parameter = tool_input_format.when.any({"parameter": None}) for required_tool in required_tools: - required_tool.execute.with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( + required_tool.execute().with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( "None" ) - required_tool.execute.with_inputs(null_parameter).assert_has_single_job.with_output( + required_tool.execute().with_inputs(null_parameter).assert_has_single_job.with_output( "output" ).with_contents_stripped("None") @@ -687,7 +687,7 @@ def test_select_multiple_does_default_to_select_values_marked_as_selected( ): empty = tool_input_format.when.any({}) for required_tool in required_tools: - required_tool.execute.with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( + required_tool.execute().with_inputs(empty).assert_has_single_job.with_output("output").with_contents_stripped( "--ex3" ) @@ -696,31 +696,31 @@ def test_select_multiple_does_default_to_select_values_marked_as_selected( @requires_tool_id("gx_text_optional_false") def test_null_to_text_tools(required_tools: list[RequiredTool], tool_input_format: DescribeToolInputs): for required_tool in required_tools: - execute = required_tool.execute.with_inputs(tool_input_format.when.any({})) + execute = required_tool.execute().with_inputs(tool_input_format.when.any({})) execute.assert_has_single_job.with_output("output").with_contents_stripped("") execute.assert_has_single_job.with_output("inputs_json").with_json({"parameter": ""}) - execute = required_tool.execute.with_inputs(tool_input_format.when.any({"parameter": None})) + execute = required_tool.execute().with_inputs(tool_input_format.when.any({"parameter": None})) execute.assert_has_single_job.with_output("output").with_contents_stripped("") execute.assert_has_single_job.with_output("inputs_json").with_json({"parameter": ""}) @requires_tool_id("gx_text_optional") def test_null_to_optional_text_tool(required_tool: RequiredTool, tool_input_format: DescribeToolInputs): - execute = required_tool.execute.with_inputs(tool_input_format.when.any({})) + execute = required_tool.execute().with_inputs(tool_input_format.when.any({})) execute.assert_has_single_job.with_output("output").with_contents_stripped("") execute.assert_has_single_job.with_output("inputs_json").with_json({"parameter": None}) - execute = required_tool.execute.with_inputs(tool_input_format.when.any({"parameter": None})) + execute = required_tool.execute().with_inputs(tool_input_format.when.any({"parameter": None})) execute.assert_has_single_job.with_output("output").with_contents_stripped("") execute.assert_has_single_job.with_output("inputs_json").with_json({"parameter": None}) @requires_tool_id("gx_text_empty_validation") def test_null_to_text_tool_with_validation(required_tool: RequiredTool, tool_input_format: DescribeToolInputs): - required_tool.execute.with_inputs(tool_input_format.when.any({})).assert_fails() - required_tool.execute.with_inputs(tool_input_format.when.any({"parameter": None})).assert_fails() - required_tool.execute.with_inputs(tool_input_format.when.any({"parameter": ""})).assert_fails() + required_tool.execute().with_inputs(tool_input_format.when.any({})).assert_fails() + required_tool.execute().with_inputs(tool_input_format.when.any({"parameter": None})).assert_fails() + required_tool.execute().with_inputs(tool_input_format.when.any({"parameter": ""})).assert_fails() @requires_tool_id("cat|cat1") @@ -729,7 +729,7 @@ def test_deferred_basic(required_tool: RequiredTool, target_history: TargetHisto inputs = { "input1": has_src_dict.src_dict, } - output = required_tool.execute.with_inputs(inputs).assert_has_single_job.with_single_output + output = required_tool.execute().with_inputs(inputs).assert_has_single_job.with_single_output output.assert_contains("chr1 147962192 147962580 CCDS989.1_cds_0_0_chr1_147962193_r 0 -") @@ -740,7 +740,7 @@ def test_deferred_with_metadata_options_filter(required_tool: RequiredTool, targ "input_bam": has_src_dict.src_dict, "ref_names": "chrM", } - required_tool.execute.with_inputs(inputs).assert_has_single_job.with_single_output.with_contents_stripped("chrM") + required_tool.execute().with_inputs(inputs).assert_has_single_job.with_single_output.with_contents_stripped("chrM") @requires_tool_id("cat_list") @@ -750,6 +750,6 @@ def test_deferred_multi_input(required_tool: RequiredTool, target_history: Targe inputs = { "input1": [has_src_dict_bed.src_dict, has_src_dict_txt.src_dict], } - output = required_tool.execute.with_inputs(inputs).assert_has_single_job.with_single_output + output = required_tool.execute().with_inputs(inputs).assert_has_single_job.with_single_output output.assert_contains("chr1 147962192 147962580 CCDS989.1_cds_0_0_chr1_147962193_r 0 -") output.assert_contains("chr1 4225 19670") diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py index c06ff6a127aa..60c5fdc905ac 100644 --- a/lib/galaxy_test/api/test_tools.py +++ b/lib/galaxy_test/api/test_tools.py @@ -3112,23 +3112,6 @@ def test_group_tag_selection_multiple(self, history_id): output_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) assert output_content.strip() == "123\n456\n456\n0ab" - @skip_without_tool("cat1") - def test_run_deferred_dataset(self, history_id): - details = self.dataset_populator.create_deferred_hda( - history_id, "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test-data/1.bed", ext="bed" - ) - inputs = { - "input1": dataset_to_param(details), - } - outputs = self._cat1_outputs(history_id, inputs=inputs) - output = outputs[0] - details = self.dataset_populator.get_history_dataset_details( - history_id, dataset=output, wait=True, assert_ok=True - ) - assert details["state"] == "ok" - output_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) - assert output_content.startswith("chr1 147962192 147962580 CCDS989.1_cds_0_0_chr1_147962193_r 0 -") - @skip_without_tool("cat1") def test_run_deferred_dataset_cached(self, history_id): content = uuid4().hex @@ -3162,20 +3145,6 @@ def test_run_deferred_dataset_cached(self, history_id): assert new_job_details["state"] == "ok" assert new_job_details["copied_from_job_id"] == job_details["id"] - @skip_without_tool("metadata_bam") - def test_run_deferred_dataset_with_metadata_options_filter(self, history_id): - url_1 = self.dataset_populator.base64_url_for_test_file("1.bam") - details = self.dataset_populator.create_deferred_hda(history_id, url_1, ext="bam") - inputs = {"input_bam": dataset_to_param(details), "ref_names": "chrM"} - run_response = self.dataset_populator.run_tool(tool_id="metadata_bam", inputs=inputs, history_id=history_id) - output = run_response["outputs"][0] - output_details = self.dataset_populator.get_history_dataset_details( - history_id, dataset=output, wait=True, assert_ok=True - ) - assert output_details["state"] == "ok" - output_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) - assert output_content.startswith("chrM") - @skip_without_tool("pileup") def test_metadata_validator_on_deferred_input(self, history_id): url_1 = self.dataset_populator.base64_url_for_test_file("1.bam") diff --git a/lib/galaxy_test/base/api.py b/lib/galaxy_test/base/api.py index 0a6cb67b82d9..8efad9d6c88a 100644 --- a/lib/galaxy_test/base/api.py +++ b/lib/galaxy_test/base/api.py @@ -3,6 +3,7 @@ from typing import ( Any, Optional, + TYPE_CHECKING, ) from urllib.parse import ( urlencode, @@ -30,6 +31,9 @@ ) from .interactor import TestCaseGalaxyInteractor as BaseInteractor +if TYPE_CHECKING: + from requests import Response + CONFIG_PREFIXES = ["GALAXY_TEST_CONFIG_", "GALAXY_CONFIG_OVERRIDE_", "GALAXY_CONFIG_"] CELERY_BROKER = get_from_env("CELERY_BROKER", CONFIG_PREFIXES, "memory://") CELERY_BACKEND = get_from_env("CELERY_BACKEND", CONFIG_PREFIXES, "rpc://localhost") @@ -198,7 +202,7 @@ def _patch(self, *args, **kwds): def _assert_status_code_is_ok(self, response): assert_status_code_is_ok(response) - def _assert_status_code_is(self, response, expected_status_code): + def _assert_status_code_is(self, response: "Response", expected_status_code: int) -> None: assert_status_code_is(response, expected_status_code) def _assert_has_keys(self, response, *keys): diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index f14b46d7a74d..fe315426c2fa 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -605,7 +605,7 @@ def fetch_hda(self, history_id: str, item: dict[str, Any], wait: bool = True) -> assert len(hdas) == 1 return hdas[0] - def create_deferred_hda(self, history_id, uri: str, ext: Optional[str] = None) -> dict[str, Any]: + def create_deferred_hda(self, history_id: str, uri: str, ext: Optional[str] = None) -> dict[str, Any]: item = { "src": "url", "url": uri, @@ -4161,9 +4161,8 @@ def __init__(self, dataset_populator: BaseDatasetPopulator, tool_id: str, defaul self._tool_id = tool_id self._default_history_id = default_history_id - @property - def execute(self) -> "DescribeToolExecution": - execution = DescribeToolExecution(self._dataset_populator, self._tool_id) + def execute(self, use_cached_job: bool = False) -> "DescribeToolExecution": + execution = DescribeToolExecution(self._dataset_populator, self._tool_id, use_cached_job=use_cached_job) if self._default_history_id: execution.in_history(self._default_history_id) return execution @@ -4213,9 +4212,10 @@ class DescribeToolExecution: _inputs: dict[str, Any] _tool_request_id: Optional[str] = None # if input_format == "request" request ID - def __init__(self, dataset_populator: BaseDatasetPopulator, tool_id: str): + def __init__(self, dataset_populator: BaseDatasetPopulator, tool_id: str, use_cached_job: bool = False) -> None: self._dataset_populator = dataset_populator self._tool_id = tool_id + self.use_cached_job = use_cached_job self._inputs = {} def in_history(self, has_history_id: Union[str, "TargetHistory"]) -> Self: @@ -4245,7 +4245,9 @@ def with_request(self, inputs: dict[str, Any]) -> Self: return self def _execute(self): - kwds = {} + kwds: dict[str, Any] = { + "use_cached_job": self.use_cached_job, + } if self._input_format is not None: kwds["input_format"] = self._input_format history_id = self._ensure_history_id