22import os
33import shutil
44import git
5- from streamlit_monaco import st_monaco
5+ from streamlit_ace import st_ace
66from pymarkdown .api import PyMarkdownApi
77
88# App title
9- st .set_page_config (page_title = "Readme Generator" , layout = "wide" ,page_icon = "🏎💨" , )
9+ st .set_page_config (page_title = "Readme Generator" , layout = "wide" ,page_icon = ":material/graphic_eq:" )
1010
11- # Replicate Credentials
11+ # Credentials and COnfiguration
1212with st .sidebar :
1313 st .title (':rainbow[Readme Generator]' )
14- st .write ('This is document generator which uses open-source LLM models.' )
15- # if 'REPLICATE_API_TOKEN' in st.secrets:
16- # st.success('API key already provided!', icon='✅')
17- # replicate_api = st.secrets['REPLICATE_API_TOKEN']
18- # else:
19- # replicate_api = st.text_input('Enter Replicate API token:', type='password')
20- # if not (replicate_api.startswith('r8_') and len(replicate_api)==40):
21- # st.warning('Please enter your credentials!', icon='⚠️')
22- # else:
23- # st.success('Proceed to entering your prompt message!', icon='👉')
24- # os.environ['REPLICATE_API_TOKEN'] = replicate_api
14+ st .write ('This is a Readme generator app which uses open-source large language models.' )
2515
2616 openai_api_key = "dummy"
2717 # openai_api_key = st.text_input('Enter OpenAI API Key:', type='password')
3525 from doc_generator .types import AutodocRepoConfig , AutodocUserConfig , LLMModels
3626
3727 with st .form ("my_form" ):
38- st .subheader ('Model and Parameters ' )
28+ st .subheader ('Model' )
3929 options = [
4030 LLMModels .TINYLLAMA_1p1B_CHAT_GGUF .value ,
4131 LLMModels .LLAMA2_7B_CHAT_GPTQ .value ,
4737 LLMModels .CODELLAMA_7B_INSTRUCT_HF .value ,
4838 LLMModels .CODELLAMA_13B_INSTRUCT_HF .value ,
4939 LLMModels .GOOGLE_GEMMA_2B_INSTRUCT .value ,
50- LLMModels .GOOGLE_GEMMA_7B_INSTRUCT .value
40+ LLMModels .GOOGLE_GEMMA_7B_INSTRUCT .value ,
41+ LLMModels .GOOGLE_GEMMA_2B_INSTRUCT_GGUF .value
5142 ]
5243 llm = st .selectbox ('Choose a model' , options , key = 'llm' )
5344 device = st .selectbox ('Choose a device' , ["cpu" , "gpu" ], key = 'device' )
45+ st .subheader ('Parameters' )
5446 temperature = st .slider ('temperature' , min_value = 0.01 , max_value = 1.0 , value = 0.1 , step = 0.01 )
5547 top_p = st .slider ('top_p' , min_value = 0.01 , max_value = 1.0 , value = 0.9 , step = 0.01 )
5648 max_length = st .slider ('max_length' , min_value = 512 , max_value = 4096 , value = 2048 , step = 512 )
5749
58- st .subheader ('Repository Config ' )
59- name = st .text_input (label = 'Repository Name' , placeholder = "repo " )
60- project_url = st .text_input (label = 'Repository URL ' , placeholder = "https://github.com/username/repo " )
50+ st .subheader ('Repository Configuration ' )
51+ name = st .text_input (label = 'Name' , placeholder = "example_repo " )
52+ project_url = st .text_input (label = 'GitHub Link ' , placeholder = "https://github.com/username/example_repo " )
6153 project_root = os .path .join ("." , name )
6254 output_dir = os .path .join ("output" , name )
6355 # is_peft = st.checkbox(label="Is finetuned?")
6456 # peft_model_path = st.text_input(label='Finetuned Model Path', placeholder="./output/model/")
65- submitted = st .form_submit_button ("Submit " )
57+ submitted = st .form_submit_button ("Setup " )
6658 if submitted :
6759 st .toast ('Indexing repository...' )
6860 try :
9183 model = LLMModels .GOOGLE_GEMMA_2B_INSTRUCT
9284 case LLMModels .GOOGLE_GEMMA_7B_INSTRUCT .value :
9385 model = LLMModels .GOOGLE_GEMMA_7B_INSTRUCT
86+ case LLMModels .GOOGLE_GEMMA_2B_INSTRUCT_GGUF .value :
87+ model = LLMModels .GOOGLE_GEMMA_2B_INSTRUCT_GGUF
9488 case _:
9589 model = LLMModels .LLAMA2_7B_CHAT_HF
9690
171165 st .toast ('Repository indexing done.' )
172166
173167
174- # st.markdown('📖 Learn how to build this app in this [blog ](https://blog.streamlit.io/how-to-build-a-llama-2-chatbot/) !')
168+ st .markdown ('📖 Learn more about this app [here ](https://github.com/souradipp76/ReadMeReady)! !' )
175169
170+ left , right = st .columns (2 , vertical_alignment = "top" )
176171
177- # Markdown editor
178- st .title ("Markdown Editor" )
179- default_readme_content = "# " + name
180- if "readme_content" not in st .session_state .keys ():
181- st .session_state .readme_content = default_readme_content
172+ with left :
173+ st .title ("Chat" )
174+ history = st .container (height = 1000 )
175+ # Store LLM generated responses
176+ if "messages" not in st .session_state .keys ():
177+ st .session_state .messages = [{"role" : "assistant" , "content" : "Provide a heading to generate README section starting with ##?" }]
182178
183- content = st_monaco (
184- value = st .session_state .readme_content ,
185- # height="600px",
186- language = "markdown" ,
187- lineNumbers = True ,
188- minimap = False ,
189- theme = "vs-dark" ,
190- )
179+ # Display or clear chat messages
180+ for message in st .session_state .messages :
181+ history .chat_message (message ["role" ]).write (message ["content" ])
191182
192- def validate_markdown ():
193- error_str = ""
194- errors = PyMarkdownApi ().scan_string (st .session_state .readme_content )
195- if len (errors .scan_failures ) > 0 :
196- print (errors .scan_failures )
197- error_str = "\n " .join ([f'Line { failure .line_number } : Col { failure .column_number } : { failure .rule_id } : { failure .rule_description } { failure .extra_error_information } ({ failure .rule_name } )' for failure in errors .scan_failures ])
198- return error_str
183+ def clear_chat_history ():
184+ st .session_state .messages = [{"role" : "assistant" , "content" : "Provide a heading to generate README section starting with ##?" }]
185+ st .sidebar .button ('Clear Chat History' , on_click = clear_chat_history )
199186
200- col1 , col2 , col3 = st .columns (3 )
201187
202- # Add buttons to each column
203- with col1 :
204- if st .button (label = "Save" ):
205- st .session_state .readme_content = content
206- st .success ("Saved" )
207-
208- with col2 :
209- if st .button ("Validate" ):
210- error_str = validate_markdown ()
211- if not error_str :
212- error_str = "No error"
213- validate_container = st .empty ()
214- validate_container .text_area (
215- "Validation Results" ,
216- value = error_str ,
217- height = 150 ,
218- )
219-
220- with col3 :
221- if st .download_button (
222- label = "Download" ,
223- data = st .session_state .readme_content ,
224- file_name = "README.md" ,
225- mime = "text/markdown" ,
226- ):
227- st .success ("Downloaded" )
188+ # User-provided prompt
189+ if prompt := st .chat_input (disabled = not openai_api_key ):
190+ st .session_state .messages .append ({"role" : "user" , "content" : prompt })
191+ history .chat_message ("user" ).write (prompt )
228192
193+ # Generate a new response if last message is not from assistant
194+ if st .session_state .messages [- 1 ]["role" ] != "assistant" :
195+ with st .spinner ("Thinking..." ):
196+ if "chain" not in st .session_state .keys ():
197+ full_response = 'Please setup model and repository!!'
198+ else :
199+ chain = st .session_state .chain
200+ full_response = ''
201+ for chunk in chain .stream ({'input' : prompt }):
202+ print (chunk )
203+ if answer_chunk := chunk .get ("answer" ):
204+ full_response += answer_chunk
205+ history .chat_message ("assistant" ).markdown (full_response )
206+
207+ message = {"role" : "assistant" , "content" : full_response }
208+ st .session_state .messages .append (message )
229209
230- with st .expander ("Preview" , expanded = False ):
231- st .markdown (st .session_state .readme_content , unsafe_allow_html = True )
210+ with right :
211+ # Markdown editor
212+ st .title ("Readme Editor" )
213+ default_readme_content = "# " + name
214+ if "readme_content" not in st .session_state .keys ():
215+ st .session_state .readme_content = default_readme_content
232216
233- # Store LLM generated responses
234- if "messages" not in st .session_state .keys ():
235- st .session_state .messages = [{"role" : "assistant" , "content" : "Provide a heading to generate README section starting with ##?" }]
217+ st .session_state .readme_content = st_ace (
218+ placeholder = st .session_state .readme_content ,
219+ height = 850 ,
220+ language = "markdown" ,
221+ theme = "solarized_dark" ,
222+ keybinding = "vscode" ,
223+ font_size = 14 ,
224+ tab_size = 4 ,
225+ show_gutter = True ,
226+ show_print_margin = False ,
227+ wrap = True ,
228+ auto_update = True ,
229+ readonly = False ,
230+ min_lines = 45 ,
231+ key = "ace" ,
232+ )
236233
237- # Display or clear chat messages
238- for message in st .session_state .messages :
239- with st .chat_message (message ["role" ]):
240- st .write (message ["content" ])
234+ def validate_markdown ():
235+ error_str = ""
236+ errors = PyMarkdownApi ().scan_string (st .session_state .readme_content )
237+ if len (errors .scan_failures ) > 0 :
238+ print (errors .scan_failures )
239+ error_str = "\n " .join ([f'Line { failure .line_number } : Col { failure .column_number } : { failure .rule_id } : { failure .rule_description } { failure .extra_error_information } ({ failure .rule_name } )' for failure in errors .scan_failures ])
240+ return error_str
241241
242- def clear_chat_history ():
243- st .session_state .messages = [{"role" : "assistant" , "content" : "Provide a heading to generate README section starting with ##?" }]
244- st .sidebar .button ('Clear Chat History' , on_click = clear_chat_history )
242+ col1 , col2 , col3 , col4 = st .columns (4 , vertical_alignment = "center" )
245243
244+ with col1 :
245+ if st .button ("Validate" , use_container_width = True ):
246+ st .session_state .error_str = validate_markdown ()
246247
247- # User-provided prompt
248- if prompt := st .chat_input (disabled = not openai_api_key ):
249- st .session_state .messages .append ({"role" : "user" , "content" : prompt })
250- with st .chat_message ("user" ):
251- st .write (prompt )
248+ with col2 :
249+ if st .download_button (
250+ label = "Download" ,
251+ data = st .session_state .readme_content ,
252+ file_name = "README.md" ,
253+ mime = "text/markdown" ,
254+ use_container_width = True
255+ ):
256+ col3 .success ("Downloaded" )
252257
253- # Generate a new response if last message is not from assistant
254- if st .session_state .messages [- 1 ]["role" ] != "assistant" :
255- with st .chat_message ("assistant" ):
256- with st .spinner ("Thinking..." ):
257- if "chain" not in st .session_state .keys ():
258- placeholder = st .empty ()
259- full_response = 'Please initialize model and repository!!'
260- placeholder .text (full_response )
261- else :
262- chain = st .session_state .chain
263- placeholder = st .empty ()
264- full_response = ''
265- for chunk in chain .stream ({'input' : prompt }):
266- print (chunk )
267- if answer_chunk := chunk .get ("answer" ):
268- full_response += answer_chunk
269- placeholder .markdown (full_response )
270- # placeholder.markdown(full_response)
271-
272- message = {"role" : "assistant" , "content" : full_response }
273- st .session_state .messages .append (message )
258+
259+ with st .expander ("Validation" , expanded = False ):
260+ validate_container = st .empty ()
261+ if "error_str" in st .session_state .keys ():
262+ validate_container .text_area (
263+ "Results" ,
264+ value = st .session_state .error_str ,
265+ height = 150 ,
266+ )
267+
268+ with st .expander ("Preview" , expanded = False ):
269+ st .markdown (st .session_state .readme_content , unsafe_allow_html = True )
0 commit comments