Skip to content

Commit f3b8831

Browse files
feat(executors): ensure that queries are found relatively to workflow file
1 parent dfb36f0 commit f3b8831

File tree

6 files changed

+53
-27
lines changed

6 files changed

+53
-27
lines changed

libs/core/garf/core/fetchers/fake.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,19 +43,21 @@ def __init__(
4343
query_specification_builder: query_editor.QuerySpecification = (
4444
query_editor.QuerySpecification
4545
),
46+
data: Sequence[dict[str, Any]] | None = None,
4647
data_location: str | os.PathLike[str] | None = None,
4748
csv_location: str | os.PathLike[str] | None = None,
4849
json_location: str | os.PathLike[str] | None = None,
4950
**kwargs: str,
5051
) -> None:
51-
if not api_client and not (
52-
data_location := json_location or csv_location or data_location
53-
):
54-
raise report_fetcher.ApiReportFetcherError(
55-
'Missing fake data for the fetcher.'
56-
)
5752
if not api_client:
58-
api_client = api_clients.FakeApiClient.from_file(data_location)
53+
if data:
54+
api_client = api_clients.FakeApiClient(results=list(data))
55+
elif data_location := json_location or csv_location or data_location:
56+
api_client = api_clients.FakeApiClient.from_file(data_location)
57+
else:
58+
raise report_fetcher.ApiReportFetcherError(
59+
'Missing fake data for the fetcher.'
60+
)
5961
super().__init__(api_client, parser, query_specification_builder, **kwargs)
6062

6163
@classmethod

libs/executors/garf/executors/entrypoints/cli.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
import argparse
2323
import logging
24+
import pathlib
2425
import sys
2526

2627
import garf.executors
@@ -82,6 +83,7 @@ def main():
8283
)
8384
reader_client = reader.create_reader(args.input)
8485
if workflow_file := args.workflow:
86+
wf_parent = pathlib.Path.cwd() / pathlib.Path(workflow_file).parent
8587
execution_workflow = workflow.Workflow.from_file(workflow_file)
8688
for i, step in enumerate(execution_workflow.steps, 1):
8789
with tracer.start_as_current_span(f'{i}-{step.fetcher}'):
@@ -99,7 +101,8 @@ def main():
99101
)
100102
for query in queries:
101103
if isinstance(query, garf.executors.workflow.QueryPath):
102-
batch[query.path] = reader_client.read(query.path)
104+
query_path = wf_parent / pathlib.Path(query.path)
105+
batch[query.path] = reader_client.read(query_path)
103106
else:
104107
batch[query.query.title] = query.query.text
105108
query_executor.execute_batch(

libs/executors/garf/executors/execution_context.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
import os
2222
import pathlib
23+
from typing import Any
2324

2425
import pydantic
2526
import smart_open
@@ -42,8 +43,8 @@ class ExecutionContext(pydantic.BaseModel):
4243
query_parameters: query_editor.GarfQueryParameters | None = pydantic.Field(
4344
default_factory=dict
4445
)
45-
fetcher_parameters: dict[str, str | bool | int | list[str | int]] | None = (
46-
pydantic.Field(default_factory=dict)
46+
fetcher_parameters: dict[str, Any] | None = pydantic.Field(
47+
default_factory=dict
4748
)
4849
writer: str | list[str] | None = None
4950
writer_parameters: dict[str, str] | None = pydantic.Field(
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
SELECT
22
resource,
3-
dimension.name AS name,
4-
metric.clicks AS clics
3+
dimensions.name AS name,
4+
metrics.clicks AS clicks
55
FROM resource

libs/executors/tests/end-to-end/test_cli.py

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -124,20 +124,9 @@ def test_fake_source_from_config(self, tmp_path):
124124
assert result.returncode == 0
125125
assert json.loads(result.stdout) == self.expected_output
126126

127-
def test_fake_source_from_workflow(self, tmp_path):
127+
def test_fake_source_from_workflow(self):
128128
workflow_path = _SCRIPT_PATH / 'test_workflow.yaml'
129-
with open(workflow_path, 'r', encoding='utf-8') as f:
130-
workflow_data = yaml.safe_load(f)
131-
original_data_location = workflow_data['steps'][0]['fetcher_parameters'][
132-
'data_location'
133-
]
134-
workflow_data['steps'][0]['fetcher_parameters']['data_location'] = str(
135-
_SCRIPT_PATH / original_data_location
136-
)
137-
tmp_workflow = tmp_path / 'workflow.yaml'
138-
with open(tmp_workflow, 'w', encoding='utf-8') as f:
139-
yaml.dump(workflow_data, f, encoding='utf-8')
140-
command = f'garf -w {str(tmp_workflow)} --loglevel ERROR'
129+
command = f'garf -w {str(workflow_path)} --loglevel ERROR'
141130
result = subprocess.run(
142131
command,
143132
shell=True,
@@ -147,4 +136,6 @@ def test_fake_source_from_workflow(self, tmp_path):
147136
)
148137

149138
assert result.returncode == 0
150-
assert json.loads(result.stdout) == self.expected_output
139+
for output in result.stdout.split('\n'):
140+
if output:
141+
assert json.loads(output) == self.expected_output

libs/executors/tests/end-to-end/test_workflow.yaml

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ steps:
22
- alias: test
33
fetcher: fake
44
queries:
5+
- path: query.sql
56
- query:
67
title: test_query
78
text: |
@@ -14,4 +15,32 @@ steps:
1415
writer_parameters:
1516
format: json
1617
fetcher_parameters:
17-
data_location: test.json
18+
data:
19+
- resource: Campaign A
20+
dimensions:
21+
name: Ad Group 1
22+
id: 101
23+
metrics:
24+
clicks: 1500
25+
cost: 250.75
26+
- resource: Campaign B
27+
dimensions:
28+
name: Ad Group 2
29+
id: 102
30+
metrics:
31+
clicks: 2300
32+
cost: 410.20
33+
- resource: Campaign C
34+
dimensions:
35+
name: Ad Group 3
36+
id: 103
37+
metrics:
38+
clicks: 800
39+
cost: 120.50
40+
- resource: Campaign A
41+
dimensions:
42+
name: Ad Group 4
43+
id: 104
44+
metrics:
45+
clicks: 3200
46+
cost: 600.00

0 commit comments

Comments
 (0)