Skip to content

Commit 9a33bb3

Browse files
committed
add without html option
1 parent 1e57601 commit 9a33bb3

File tree

2 files changed

+28
-10
lines changed

2 files changed

+28
-10
lines changed

scatter/pipeline/hierarchical_main.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
import argparse
22
import sys
33

4-
from hierarchical_utils import initialization, run_step, termination
4+
from hierarchical_utils import initialization
5+
from hierarchical_utils import run_step
6+
from hierarchical_utils import termination
57
from steps.embedding import embedding
68
from steps.extraction import extraction
79
from steps.hierarchical_aggregation import hierarchical_aggregation
@@ -11,6 +13,7 @@
1113
from steps.hierarchical_overview import hierarchical_overview
1214
from steps.hierarchical_visualization import hierarchical_visualization
1315

16+
1417
def parse_arguments():
1518
parser = argparse.ArgumentParser(
1619
description="Run the annotation pipeline with optional flags."
@@ -35,6 +38,12 @@ def parse_arguments():
3538
action="store_true",
3639
help="Skip the interactive confirmation prompt and run pipeline immediately.",
3740
)
41+
42+
parser.add_argument(
43+
"--without-html",
44+
action="store_true",
45+
help="Skip the html output.",
46+
)
3847
return parser.parse_args()
3948

4049

@@ -49,6 +58,8 @@ def main():
4958
new_argv.extend(["-o", args.only])
5059
if args.skip_interaction:
5160
new_argv.append("-skip-interaction")
61+
if args.without_html:
62+
new_argv.append("--without-html")
5263

5364
config = initialization(new_argv)
5465

scatter/pipeline/hierarchical_utils.py

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,17 @@
11
import json
22
import os
33
import traceback
4-
from datetime import datetime, timedelta
4+
from datetime import datetime
5+
from datetime import timedelta
56

67
with open("./hierarchical_specs.json") as f:
78
specs = json.load(f)
89

910

1011
def validate_config(config):
11-
if not "input" in config:
12+
if "input" not in config:
1213
raise Exception("Missing required field 'input' in config")
13-
if not "question" in config:
14+
if "question" not in config:
1415
raise Exception("Missing required field 'question' in config")
1516
valid_fields = ["input", "question", "model", "name", "intro"]
1617
step_names = [x["step"] for x in specs]
@@ -63,7 +64,11 @@ def different_params(step):
6364
run = True
6465
reason = None
6566
found_prev = len([x for x in previous_jobs if x["step"] == step["step"]]) > 0
66-
if config.get("force", False):
67+
68+
if stepname == "hierarchical_visualization" and config.get("without-html", False):
69+
reason = "skipping html output"
70+
run = False
71+
elif config.get("force", False):
6772
reason = "forced with -f"
6873
elif config.get("only", None) != None and config["only"] != stepname:
6974
run = False
@@ -112,6 +117,8 @@ def initialization(sysargv):
112117
config["only"] = sysargv[i + 1]
113118
if option == "-skip-interaction":
114119
config["skip-interaction"] = True
120+
if option == "--without-html":
121+
config["without-html"] = True
115122

116123
output_dir = config["output_dir"]
117124

@@ -131,18 +138,18 @@ def initialization(sysargv):
131138
print("Hum, the last Job crashed a while ago...Proceeding!")
132139

133140
# set default LLM model
134-
if not "model" in config:
141+
if "model" not in config:
135142
config["model"] = "gpt-4o-mini"
136143

137144
# prepare configs for each jobs
138145
for step_spec in specs:
139146
step = step_spec["step"]
140-
if not step in config:
147+
if step not in config:
141148
config[step] = {}
142149
# set default option values
143150
if "options" in step_spec:
144151
for key, value in step_spec["options"].items():
145-
if not key in config[step]:
152+
if key not in config[step]:
146153
config[step][key] = value
147154
# try and include source code
148155
try:
@@ -153,12 +160,12 @@ def initialization(sysargv):
153160
# resolve common options for llm-based jobs
154161
if step_spec.get("use_llm", False):
155162
# resolve prompt
156-
if not "prompt" in config.get(step):
163+
if "prompt" not in config.get(step):
157164
file = config.get(step).get("prompt_file", "default")
158165
with open(f"prompts/{step}/{file}.txt") as f:
159166
config[step]["prompt"] = f.read()
160167
# resolve model
161-
if not "model" in config.get(step):
168+
if "model" not in config.get(step):
162169
if "model" in config:
163170
config[step]["model"] = config["model"]
164171

0 commit comments

Comments
 (0)