-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgenerate_2k.py
More file actions
107 lines (88 loc) · 3.67 KB
/
generate_2k.py
File metadata and controls
107 lines (88 loc) · 3.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
"""
Generate 2000+ samples in a separate dataset file
Usage: python generate_2k.py [--quality-check]
"""
import os
# Disable proxy before importing any modules
os.environ.pop('HTTP_PROXY', None)
os.environ.pop('HTTPS_PROXY', None)
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)
os.environ['NO_PROXY'] = '*'
import argparse
from pathlib import Path
from pipeline.runner import PipelineRunner
from pipeline.persistence import PersistenceManager
from config.settings import settings
import json
# Create a separate dataset for 2k generation
DATASET_2K = Path("data/dataset_2k.jsonl")
CURSOR_2K = Path("data/cursor_2k.json")
def setup_2k_dataset():
"""Temporarily switch to 2k dataset files"""
# Save original paths
original_dataset = settings.dataset_path
original_cursor = settings.cursor_path
# Switch to 2k paths
settings.dataset_path = DATASET_2K
settings.cursor_path = CURSOR_2K
# Ensure directory exists
DATASET_2K.parent.mkdir(parents=True, exist_ok=True)
return original_dataset, original_cursor
def restore_original_dataset(original_dataset, original_cursor):
"""Restore original dataset paths"""
settings.dataset_path = original_dataset
settings.cursor_path = original_cursor
def main():
parser = argparse.ArgumentParser(description="Generate 2000+ samples in a separate dataset file")
parser.add_argument("--quality-check", action="store_true",
help="Enable LLM-based content quality validation (cheaper than critique, ~$2-5 for 2000 samples)")
args = parser.parse_args()
# Use flag if provided, otherwise use setting
enable_quality_check = args.quality_check or settings.enable_quality_check
print("=" * 70)
print("Generating 2000+ samples in separate dataset file")
print("=" * 70)
print(f"Output file: {DATASET_2K}")
print(f"Cursor file: {CURSOR_2K}")
print()
# Show critique configuration
critique_status = "ENABLED" if settings.enable_critique else "DISABLED (saves ~98% cost)"
quality_check_status = "ENABLED" if enable_quality_check else "DISABLED"
print(f"Configuration: Critique={critique_status}, Quality Check={quality_check_status}")
if not settings.enable_critique:
print(" ⚠️ Critique disabled by default - saves ~$870 for 2000 samples")
print(" Set ENABLE_CRITIQUE=true in .env to enable")
if enable_quality_check:
print(" ✓ Quality check enabled - validates content quality (~$2-5 for 2000 samples)")
print()
# Setup 2k dataset
original_dataset, original_cursor = setup_2k_dataset()
try:
# Check current progress
persistence = PersistenceManager()
current = persistence.load_cursor()
if current > 0:
print(f"Resuming from {current} samples...")
else:
print("Starting fresh 2k dataset generation...")
# Generate 2000 samples
print(f"Target: 2000 samples")
print("=" * 70)
print()
runner = PipelineRunner(enable_quality_check=enable_quality_check)
runner.run(target_count=2000, show_progress=True)
# Final status
final_count = persistence.load_cursor()
print()
print("=" * 70)
print(f"[OK] Generation complete!")
print(f" Total samples: {final_count}")
print(f" Dataset file: {DATASET_2K}")
print(f" File size: {DATASET_2K.stat().st_size / 1024 / 1024:.2f} MB")
print("=" * 70)
finally:
# Always restore original paths
restore_original_dataset(original_dataset, original_cursor)
if __name__ == "__main__":
main()