Skip to content

Commit 93cb3ac

Browse files
committed
Updated API documentation.
1 parent d02eab1 commit 93cb3ac

File tree

2 files changed

+148
-5
lines changed

2 files changed

+148
-5
lines changed

docs/reference.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,4 +48,16 @@
4848
- make_readme_chain
4949
show_root_heading: true
5050
show_source: false
51+
separate_signature: true
52+
53+
- ::: readme_ready.types
54+
handler: python
55+
options:
56+
members:
57+
- AutodocReadmeConfig
58+
- AutodocRepoConfig
59+
- AutodocUserConfig
60+
- LLMModels
61+
show_root_heading: true
62+
show_source: false
5163
separate_signature: true

readme_ready/types.py

Lines changed: 136 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
Types
2+
Utility Classes for REAMDE generation
33
"""
44

55
from enum import Enum
@@ -9,7 +9,44 @@
99

1010

1111
class LLMModels(str, Enum):
12-
"""LLM Models"""
12+
"""
13+
Supported Large Language Models (LLMs) for README generation task.
14+
15+
Members:
16+
- GPT3 (str): OpenAI GPT-3.5-turbo model.
17+
- GPT4 (str): OpenAI GPT-4 model.
18+
- GPT432k (str): OpenAI GPT-4-32k model with extended context window.
19+
- TINYLLAMA_1p1B_CHAT_GGUF (str): TinyLlama 1.1B Chat model from
20+
TheBloke with GGUF format.
21+
- GOOGLE_GEMMA_2B_INSTRUCT_GGUF (str): Gemma 2B Instruction model
22+
in GGUF format by bartowski.
23+
- LLAMA2_7B_CHAT_GPTQ (str): LLaMA 2 7B Chat model using GPTQ
24+
from TheBloke.
25+
- LLAMA2_13B_CHAT_GPTQ (str): LLaMA 2 13B Chat model using GPTQ
26+
from TheBloke.
27+
- CODELLAMA_7B_INSTRUCT_GPTQ (str): CodeLlama 7B Instruction model
28+
using GPTQ from TheBloke.
29+
- CODELLAMA_13B_INSTRUCT_GPTQ (str): CodeLlama 13B Instruction model
30+
using GPTQ from TheBloke.
31+
- LLAMA2_7B_CHAT_HF (str): LLaMA 2 7B Chat model hosted on
32+
Hugging Face.
33+
- LLAMA2_13B_CHAT_HF (str): LLaMA 2 13B Chat model hosted on
34+
Hugging Face.
35+
- CODELLAMA_7B_INSTRUCT_HF (str): CodeLlama 7B Instruction model
36+
hosted on Hugging Face.
37+
- CODELLAMA_13B_INSTRUCT_HF (str): CodeLlama 13B Instruction model
38+
hosted on Hugging Face.
39+
- GOOGLE_GEMMA_2B_INSTRUCT (str): Gemma 2B Instruction model by Google.
40+
- GOOGLE_GEMMA_7B_INSTRUCT (str): Gemma 7B Instruction model by Google.
41+
- GOOGLE_CODEGEMMA_2B (str): CodeGemma 2B model by Google for
42+
code-related tasks.
43+
- GOOGLE_CODEGEMMA_7B_INSTRUCT (str): CodeGemma 7B Instruction
44+
model by Google.
45+
46+
Typical usage example:
47+
48+
model = LLMModels.LLAMA2_7B_CHAT_GPTQ
49+
"""
1350

1451
GPT3 = "gpt-3.5-turbo"
1552
GPT4 = "gpt-4"
@@ -38,22 +75,116 @@ class Priority(str, Enum):
3875

3976

4077
class AutodocReadmeConfig:
41-
"""AutodocReadmeConfig"""
78+
"""
79+
Configuration class for managing README-specific settings in
80+
the README generation process.
81+
82+
Attributes:
83+
headings (str): A comma separated list of headings to
84+
include in the README. The input string is split by commas
85+
and stripped of extra whitespace.
86+
87+
Typical usage example:
88+
89+
readme_config = AutodocReadmeConfig(
90+
headings = "Description,Requirements"
91+
)
92+
"""
4293

4394
def __init__(self, headings: str):
4495
self.headings = [heading.strip() for heading in headings.split(",")]
4596

4697

4798
class AutodocUserConfig:
48-
"""AutodocUserConfig"""
99+
"""
100+
Configuration class for managing user-specific settings in the
101+
README generation process.
102+
103+
Attributes:
104+
llms (List[LLMModels]): A list of language models available for
105+
the user to utilize.
106+
streaming (bool): Whether to enable streaming during the
107+
documentation process. Defaults to False.
108+
109+
Typical usage example:
110+
111+
model = LLMModels.LLAMA2_7B_CHAT_GPTQ
112+
user_config = AutodocUserConfig(
113+
llms = [model]
114+
)
115+
"""
49116

50117
def __init__(self, llms: List[LLMModels], streaming: bool = False):
51118
self.llms = llms
52119
self.streaming = streaming
53120

54121

55122
class AutodocRepoConfig:
56-
"""AutodocRepoConfig"""
123+
"""
124+
Configuration class for managing the README generation process of
125+
a repository.
126+
127+
Attributes:
128+
name (str): The name of the repository.
129+
repository_url (str): The URL of the repository to be documented.
130+
root (str): The root directory of the repository.
131+
output (str): The directory where the generated README will be stored.
132+
llms (List[LLMModels]): A list of language models to be used
133+
in the documentation process.
134+
priority (Priority): The priority level for processing tasks.
135+
max_concurrent_calls (int): The maximum number of concurrent calls
136+
allowed during processing.
137+
add_questions (bool): Whether to include generated questions in the
138+
documentation.
139+
ignore (List[str]): A list of files or directories patterns to be
140+
excluded from documentation.
141+
file_prompt (str): The template or prompt to process individual files.
142+
folder_prompt (str): The template or prompt to process folders.
143+
chat_prompt (str): The template or prompt for chatbot interactions.
144+
content_type (str): The type of content being documented
145+
(e.g., code, docs).
146+
target_audience (str): The intended audience for the documentation.
147+
link_hosted (bool): Whether to generate hosted links in the
148+
documentation.
149+
peft_model_path (str | None): Path to a PEFT
150+
(Parameter-Efficient Fine-Tuning) model, if applicable.
151+
device (str | None): The device to be used for processing
152+
(e.g., "cpu", "auto").
153+
154+
Typical usage example:
155+
156+
repo_config = AutodocRepoConfig (
157+
name = "<REPOSITORY_NAME>",
158+
root = "<REPOSITORY_ROOT_DIR_PATH>",
159+
repository_url = "<REPOSITORY_URL>",
160+
output = "<OUTPUT_DIR_PATH>",
161+
llms = [model],
162+
peft_model_path = "<PEFT_MODEL_NAME_OR_PATH>",
163+
ignore = [
164+
".*",
165+
"*package-lock.json",
166+
"*package.json",
167+
"node_modules",
168+
"*dist*",
169+
"*build*",
170+
"*test*",
171+
"*.svg",
172+
"*.md",
173+
"*.mdx",
174+
"*.toml"
175+
],
176+
file_prompt = "",
177+
folder_prompt = "",
178+
chat_prompt = "",
179+
content_type = "docs",
180+
target_audience = "smart developer",
181+
link_hosted = True,
182+
priority = None,
183+
max_concurrent_calls = 50,
184+
add_questions = False,
185+
device = "auto",
186+
)
187+
"""
57188

58189
def __init__(
59190
self,

0 commit comments

Comments
 (0)