|
1 | 1 | """ |
2 | | -Types |
| 2 | +Utility Classes for REAMDE generation |
3 | 3 | """ |
4 | 4 |
|
5 | 5 | from enum import Enum |
|
9 | 9 |
|
10 | 10 |
|
11 | 11 | class LLMModels(str, Enum): |
12 | | - """LLM Models""" |
| 12 | + """ |
| 13 | + Supported Large Language Models (LLMs) for README generation task. |
| 14 | +
|
| 15 | + Members: |
| 16 | + - GPT3 (str): OpenAI GPT-3.5-turbo model. |
| 17 | + - GPT4 (str): OpenAI GPT-4 model. |
| 18 | + - GPT432k (str): OpenAI GPT-4-32k model with extended context window. |
| 19 | + - TINYLLAMA_1p1B_CHAT_GGUF (str): TinyLlama 1.1B Chat model from |
| 20 | + TheBloke with GGUF format. |
| 21 | + - GOOGLE_GEMMA_2B_INSTRUCT_GGUF (str): Gemma 2B Instruction model |
| 22 | + in GGUF format by bartowski. |
| 23 | + - LLAMA2_7B_CHAT_GPTQ (str): LLaMA 2 7B Chat model using GPTQ |
| 24 | + from TheBloke. |
| 25 | + - LLAMA2_13B_CHAT_GPTQ (str): LLaMA 2 13B Chat model using GPTQ |
| 26 | + from TheBloke. |
| 27 | + - CODELLAMA_7B_INSTRUCT_GPTQ (str): CodeLlama 7B Instruction model |
| 28 | + using GPTQ from TheBloke. |
| 29 | + - CODELLAMA_13B_INSTRUCT_GPTQ (str): CodeLlama 13B Instruction model |
| 30 | + using GPTQ from TheBloke. |
| 31 | + - LLAMA2_7B_CHAT_HF (str): LLaMA 2 7B Chat model hosted on |
| 32 | + Hugging Face. |
| 33 | + - LLAMA2_13B_CHAT_HF (str): LLaMA 2 13B Chat model hosted on |
| 34 | + Hugging Face. |
| 35 | + - CODELLAMA_7B_INSTRUCT_HF (str): CodeLlama 7B Instruction model |
| 36 | + hosted on Hugging Face. |
| 37 | + - CODELLAMA_13B_INSTRUCT_HF (str): CodeLlama 13B Instruction model |
| 38 | + hosted on Hugging Face. |
| 39 | + - GOOGLE_GEMMA_2B_INSTRUCT (str): Gemma 2B Instruction model by Google. |
| 40 | + - GOOGLE_GEMMA_7B_INSTRUCT (str): Gemma 7B Instruction model by Google. |
| 41 | + - GOOGLE_CODEGEMMA_2B (str): CodeGemma 2B model by Google for |
| 42 | + code-related tasks. |
| 43 | + - GOOGLE_CODEGEMMA_7B_INSTRUCT (str): CodeGemma 7B Instruction |
| 44 | + model by Google. |
| 45 | +
|
| 46 | + Typical usage example: |
| 47 | +
|
| 48 | + model = LLMModels.LLAMA2_7B_CHAT_GPTQ |
| 49 | + """ |
13 | 50 |
|
14 | 51 | GPT3 = "gpt-3.5-turbo" |
15 | 52 | GPT4 = "gpt-4" |
@@ -38,22 +75,116 @@ class Priority(str, Enum): |
38 | 75 |
|
39 | 76 |
|
40 | 77 | class AutodocReadmeConfig: |
41 | | - """AutodocReadmeConfig""" |
| 78 | + """ |
| 79 | + Configuration class for managing README-specific settings in |
| 80 | + the README generation process. |
| 81 | +
|
| 82 | + Attributes: |
| 83 | + headings (str): A comma separated list of headings to |
| 84 | + include in the README. The input string is split by commas |
| 85 | + and stripped of extra whitespace. |
| 86 | +
|
| 87 | + Typical usage example: |
| 88 | +
|
| 89 | + readme_config = AutodocReadmeConfig( |
| 90 | + headings = "Description,Requirements" |
| 91 | + ) |
| 92 | + """ |
42 | 93 |
|
43 | 94 | def __init__(self, headings: str): |
44 | 95 | self.headings = [heading.strip() for heading in headings.split(",")] |
45 | 96 |
|
46 | 97 |
|
47 | 98 | class AutodocUserConfig: |
48 | | - """AutodocUserConfig""" |
| 99 | + """ |
| 100 | + Configuration class for managing user-specific settings in the |
| 101 | + README generation process. |
| 102 | +
|
| 103 | + Attributes: |
| 104 | + llms (List[LLMModels]): A list of language models available for |
| 105 | + the user to utilize. |
| 106 | + streaming (bool): Whether to enable streaming during the |
| 107 | + documentation process. Defaults to False. |
| 108 | +
|
| 109 | + Typical usage example: |
| 110 | +
|
| 111 | + model = LLMModels.LLAMA2_7B_CHAT_GPTQ |
| 112 | + user_config = AutodocUserConfig( |
| 113 | + llms = [model] |
| 114 | + ) |
| 115 | + """ |
49 | 116 |
|
50 | 117 | def __init__(self, llms: List[LLMModels], streaming: bool = False): |
51 | 118 | self.llms = llms |
52 | 119 | self.streaming = streaming |
53 | 120 |
|
54 | 121 |
|
55 | 122 | class AutodocRepoConfig: |
56 | | - """AutodocRepoConfig""" |
| 123 | + """ |
| 124 | + Configuration class for managing the README generation process of |
| 125 | + a repository. |
| 126 | +
|
| 127 | + Attributes: |
| 128 | + name (str): The name of the repository. |
| 129 | + repository_url (str): The URL of the repository to be documented. |
| 130 | + root (str): The root directory of the repository. |
| 131 | + output (str): The directory where the generated README will be stored. |
| 132 | + llms (List[LLMModels]): A list of language models to be used |
| 133 | + in the documentation process. |
| 134 | + priority (Priority): The priority level for processing tasks. |
| 135 | + max_concurrent_calls (int): The maximum number of concurrent calls |
| 136 | + allowed during processing. |
| 137 | + add_questions (bool): Whether to include generated questions in the |
| 138 | + documentation. |
| 139 | + ignore (List[str]): A list of files or directories patterns to be |
| 140 | + excluded from documentation. |
| 141 | + file_prompt (str): The template or prompt to process individual files. |
| 142 | + folder_prompt (str): The template or prompt to process folders. |
| 143 | + chat_prompt (str): The template or prompt for chatbot interactions. |
| 144 | + content_type (str): The type of content being documented |
| 145 | + (e.g., code, docs). |
| 146 | + target_audience (str): The intended audience for the documentation. |
| 147 | + link_hosted (bool): Whether to generate hosted links in the |
| 148 | + documentation. |
| 149 | + peft_model_path (str | None): Path to a PEFT |
| 150 | + (Parameter-Efficient Fine-Tuning) model, if applicable. |
| 151 | + device (str | None): The device to be used for processing |
| 152 | + (e.g., "cpu", "auto"). |
| 153 | +
|
| 154 | + Typical usage example: |
| 155 | +
|
| 156 | + repo_config = AutodocRepoConfig ( |
| 157 | + name = "<REPOSITORY_NAME>", |
| 158 | + root = "<REPOSITORY_ROOT_DIR_PATH>", |
| 159 | + repository_url = "<REPOSITORY_URL>", |
| 160 | + output = "<OUTPUT_DIR_PATH>", |
| 161 | + llms = [model], |
| 162 | + peft_model_path = "<PEFT_MODEL_NAME_OR_PATH>", |
| 163 | + ignore = [ |
| 164 | + ".*", |
| 165 | + "*package-lock.json", |
| 166 | + "*package.json", |
| 167 | + "node_modules", |
| 168 | + "*dist*", |
| 169 | + "*build*", |
| 170 | + "*test*", |
| 171 | + "*.svg", |
| 172 | + "*.md", |
| 173 | + "*.mdx", |
| 174 | + "*.toml" |
| 175 | + ], |
| 176 | + file_prompt = "", |
| 177 | + folder_prompt = "", |
| 178 | + chat_prompt = "", |
| 179 | + content_type = "docs", |
| 180 | + target_audience = "smart developer", |
| 181 | + link_hosted = True, |
| 182 | + priority = None, |
| 183 | + max_concurrent_calls = 50, |
| 184 | + add_questions = False, |
| 185 | + device = "auto", |
| 186 | + ) |
| 187 | + """ |
57 | 188 |
|
58 | 189 | def __init__( |
59 | 190 | self, |
|
0 commit comments