|
| 1 | +#!/usr/bin/env python |
| 2 | + |
| 3 | +# how to run |
| 4 | +# python helper_scripts/generate_api_functional_gherkin_test.py tests/test_features/ApiTesting/api_spec.yml --output=helper_scripts/output --model=gpt-4o --number_of_testcase=50 |
| 5 | + |
| 6 | +import os |
| 7 | +import sys |
| 8 | +import argparse |
| 9 | +from openai import OpenAI |
| 10 | +from typing import List |
| 11 | + |
| 12 | + |
| 13 | +def read_openapi_spec(file_path: str) -> str: |
| 14 | + """ |
| 15 | + Reads the OpenAPI specification from a file. |
| 16 | +
|
| 17 | + Args: |
| 18 | + file_path (str): The path to the OpenAPI spec file. |
| 19 | +
|
| 20 | + Returns: |
| 21 | + str: The content of the OpenAPI spec file. |
| 22 | + """ |
| 23 | + with open(file_path, "r", encoding="utf-8") as f: |
| 24 | + content = f.read() |
| 25 | + return content |
| 26 | + |
| 27 | + |
| 28 | +def prepare_prompt(p2: str, openapi_spec: str) -> str: |
| 29 | + """ |
| 30 | + Prepares the prompt for the OpenAI API. |
| 31 | +
|
| 32 | + Args: |
| 33 | + p2 (str): The initial part of the prompt. |
| 34 | + openapi_spec (str): The OpenAPI specification. |
| 35 | +
|
| 36 | + Returns: |
| 37 | + str: The complete prompt. |
| 38 | + """ |
| 39 | + prompt = f"{p2}\n\nOpenAPI Specification:\n{openapi_spec}" |
| 40 | + return prompt |
| 41 | + |
| 42 | + |
| 43 | +def generate_test_cases(prompt: str, model: str) -> str: |
| 44 | + """ |
| 45 | + Generates test cases using the OpenAI API. |
| 46 | +
|
| 47 | + Args: |
| 48 | + prompt (str): The prompt to send to the OpenAI API. |
| 49 | + model (str): The model to use for the OpenAI API. |
| 50 | +
|
| 51 | + Returns: |
| 52 | + str: The generated test cases. |
| 53 | + """ |
| 54 | + client = OpenAI() |
| 55 | + if "o1" in model: |
| 56 | + completion = client.chat.completions.create( |
| 57 | + model=model, |
| 58 | + messages=[{"role": "user", "content": prompt}], |
| 59 | + ) |
| 60 | + else: |
| 61 | + completion = client.chat.completions.create( |
| 62 | + model=model, |
| 63 | + messages=[{"role": "user", "content": prompt}], |
| 64 | + temperature=0.7, |
| 65 | + ) |
| 66 | + |
| 67 | + response = completion.choices[0].message.content |
| 68 | + print(f"Response from OpenAI API: {response}") |
| 69 | + return response |
| 70 | + |
| 71 | + |
| 72 | +def ensure_output_folder(output_folder: str) -> None: |
| 73 | + """ |
| 74 | + Ensures that the output folder exists. |
| 75 | +
|
| 76 | + Args: |
| 77 | + output_folder (str): The path to the output folder. |
| 78 | + """ |
| 79 | + if not os.path.exists(output_folder): |
| 80 | + os.makedirs(output_folder) |
| 81 | + |
| 82 | + |
| 83 | +def split_features(response_text: str) -> List[str]: |
| 84 | + """ |
| 85 | + Splits the response text into individual features. |
| 86 | +
|
| 87 | + Args: |
| 88 | + response_text (str): The response text containing multiple features. |
| 89 | +
|
| 90 | + Returns: |
| 91 | + List[str]: A list of individual features. |
| 92 | + """ |
| 93 | + features = [] |
| 94 | + current_feature = "" |
| 95 | + lines = response_text.split("\n") |
| 96 | + for line in lines: |
| 97 | + if line.strip().startswith("Feature:"): |
| 98 | + if current_feature: |
| 99 | + features.append(current_feature.strip()) |
| 100 | + current_feature = "" |
| 101 | + current_feature += line + "\n" |
| 102 | + if current_feature.strip(): |
| 103 | + features.append(current_feature.strip()) |
| 104 | + return features |
| 105 | + |
| 106 | + |
| 107 | +def get_base_name(file_path: str) -> str: |
| 108 | + """ |
| 109 | + Gets the base name of a file without the extension. |
| 110 | +
|
| 111 | + Args: |
| 112 | + file_path (str): The path to the file. |
| 113 | +
|
| 114 | + Returns: |
| 115 | + str: The base name of the file. |
| 116 | + """ |
| 117 | + base_name = os.path.splitext(os.path.basename(file_path))[0] |
| 118 | + return base_name |
| 119 | + |
| 120 | + |
| 121 | +def create_output_subfolder(output_folder: str, base_name: str) -> str: |
| 122 | + """ |
| 123 | + Creates a subfolder in the output folder. |
| 124 | +
|
| 125 | + Args: |
| 126 | + output_folder (str): The path to the output folder. |
| 127 | + base_name (str): The base name for the subfolder. |
| 128 | +
|
| 129 | + Returns: |
| 130 | + str: The path to the created subfolder. |
| 131 | + """ |
| 132 | + subfolder_path = os.path.join(output_folder, base_name) |
| 133 | + if not os.path.exists(subfolder_path): |
| 134 | + os.makedirs(subfolder_path) |
| 135 | + return subfolder_path |
| 136 | + |
| 137 | + |
| 138 | +def write_feature_files(features: List[str], subfolder_path: str) -> None: |
| 139 | + """ |
| 140 | + Writes the feature files to the subfolder. |
| 141 | +
|
| 142 | + Args: |
| 143 | + features (List[str]): A list of features to write. |
| 144 | + subfolder_path (str): The path to the subfolder. |
| 145 | + """ |
| 146 | + for idx, feature in enumerate(features): |
| 147 | + # Extract the feature name if possible |
| 148 | + first_line = feature.split("\n")[0] |
| 149 | + if first_line.startswith("Feature:"): |
| 150 | + feature_name = first_line[len("Feature:") :].strip().replace(" ", "_") |
| 151 | + file_name = f"{feature_name}.feature" |
| 152 | + else: |
| 153 | + file_name = f"feature_{idx+1}.feature" |
| 154 | + file_path = os.path.join(subfolder_path, file_name) |
| 155 | + with open(file_path, "w", encoding="utf-8") as f: |
| 156 | + feature = feature.replace("```", "").replace("gherkin", "") |
| 157 | + f.write(feature) |
| 158 | + |
| 159 | + |
| 160 | +def main() -> None: |
| 161 | + """ |
| 162 | + The main function to generate Gherkin test cases from OpenAPI spec files. |
| 163 | + """ |
| 164 | + p2 = """Analyse (thoroughly examine and break down) the given API specification to produce detailed Gherkin test cases following the provided testing plan. Focus on creating positive, negative, and business-as-usual scenarios. Ensure your test cases validate data correctness, data types, null value handling, and adherence to the API specification. Cover all testing areas: functional, postive, negative, error handling, and integration. Be extremely direct, clear, DETAILED and corrective in your approach. Your final output should be the strict detailed Gherkin test cases, ready for execution. generate a big spread of testcases at least {number_of_testcase}. for each combination of datatypes, fields and null values generate the testcases |
| 165 | + NEVER PUT ### in the testcases. Always write the testcases in the Gherkin format. follow the example to generate output |
| 166 | + ONLY RETURN THE GHERKIN FILES, NO HEADING OR EXPLINATION NEEDED. |
| 167 | + example output: |
| 168 | + |
| 169 | + Feature: feature details 1 |
| 170 | + Scenario: Scenario_details |
| 171 | + Given ... |
| 172 | + When ... |
| 173 | + Then ... |
| 174 | + Scenario: Scenario_details |
| 175 | + Given ... |
| 176 | + When ... |
| 177 | + Then ... |
| 178 | + Feature: feature details 2 |
| 179 | + Scenario: Scenario_details |
| 180 | + Given ... |
| 181 | + When ... |
| 182 | + Then ... |
| 183 | + Feature: feature details 3 |
| 184 | + Scenario: Scenario_details |
| 185 | + Given ... |
| 186 | + When ... |
| 187 | + And ... |
| 188 | + Then ... |
| 189 | + And ... |
| 190 | + """ |
| 191 | + |
| 192 | + parser = argparse.ArgumentParser( |
| 193 | + description="Generate Gherkin test cases from OpenAPI spec files." |
| 194 | + ) |
| 195 | + parser.add_argument( |
| 196 | + "input_files", |
| 197 | + metavar="input_files", |
| 198 | + type=str, |
| 199 | + nargs="+", |
| 200 | + help="One or more OpenAPI spec files (YAML or JSON).", |
| 201 | + ) |
| 202 | + parser.add_argument( |
| 203 | + "--output", |
| 204 | + metavar="output", |
| 205 | + type=str, |
| 206 | + required=True, |
| 207 | + help="Output folder path where feature files will be generated.", |
| 208 | + ) |
| 209 | + parser.add_argument( |
| 210 | + "--model", |
| 211 | + metavar="model", |
| 212 | + type=str, |
| 213 | + default="o1-preview", |
| 214 | + help="The model to use for the OpenAI API (default: o1-preview).", |
| 215 | + ) |
| 216 | + parser.add_argument( |
| 217 | + "--number_of_testcase", |
| 218 | + metavar="number_of_testcase", |
| 219 | + type=int, |
| 220 | + default=100, |
| 221 | + help="The number of test cases to generate (default: 100).", |
| 222 | + ) |
| 223 | + args = parser.parse_args() |
| 224 | + |
| 225 | + api_key = os.getenv("OPENAI_API_KEY") |
| 226 | + if not api_key: |
| 227 | + print("Error: OPENAI_API_KEY environment variable not set.") |
| 228 | + sys.exit(1) |
| 229 | + |
| 230 | + ensure_output_folder(args.output) |
| 231 | + |
| 232 | + for file_path in args.input_files: |
| 233 | + print(f"Processing file: {file_path}") |
| 234 | + openapi_spec = read_openapi_spec(file_path) |
| 235 | + p2 = p2.replace("{number_of_testcase}", str(args.number_of_testcase)) |
| 236 | + prompt = prepare_prompt(p2, openapi_spec) |
| 237 | + try: |
| 238 | + test_cases = generate_test_cases(prompt, args.model) |
| 239 | + except Exception as e: |
| 240 | + print(f"Error generating test cases for {file_path}: {e}") |
| 241 | + continue |
| 242 | + features = split_features(test_cases) |
| 243 | + base_name = get_base_name(file_path) |
| 244 | + subfolder_path = create_output_subfolder(args.output, base_name) |
| 245 | + write_feature_files(features, subfolder_path) |
| 246 | + print(f"Generated {len(features)} feature files in {subfolder_path}") |
| 247 | + |
| 248 | + |
| 249 | +if __name__ == "__main__": |
| 250 | + main() |
0 commit comments