Skip to content

Commit 19b19c3

Browse files
committed
removed select state
1 parent 35f3a5a commit 19b19c3

File tree

1 file changed

+4
-6
lines changed

1 file changed

+4
-6
lines changed

script.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
"usePR": False,
2727
"pUSER": 'USER:',
2828
"pBOT": 'ASSISTANT:',
29-
"selectA": [0,0]
29+
# "selectA": [0,0]
3030
}
3131

3232
input_elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream', 'tfs', 'top_a']
@@ -220,7 +220,7 @@ def truncate_prompt(question, prompt):
220220
print(f"prompt:{prompt}")
221221
return prompt, truncated_question
222222

223-
def generate_reply_wrapper_enriched(question, state, selectState, summary, generation_template, eos_token=None, stopping_strings=None):
223+
def generate_reply_wrapper_enriched(question, state, summary, generation_template, eos_token=None, stopping_strings=None):
224224
prompt = generate_prompt(question, summary, generation_template)
225225

226226
prompt, truncated_question = truncate_prompt(question, prompt)
@@ -360,7 +360,6 @@ def load_session(file):
360360

361361

362362
def ui():
363-
params['selectA'] = [0,0]
364363
with gr.Row():
365364
with gr.Column():
366365
with gr.Row():
@@ -469,7 +468,6 @@ def ui():
469468
summarisation_parameters['truncation_length'] = gr.Slider(value=default_req_params['truncation_length'], minimum=shared.settings['truncation_length_min'], maximum=shared.settings['truncation_length_max'], step=1, label='Truncate the prompt up to this length', info='The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.')
470469

471470

472-
selectStateA = gr.State('selectA')
473471
last_input = gr.State('last_input')
474472
summarisation_parameters['interface_state'] = shared.gradio['interface_state']
475473

@@ -482,8 +480,8 @@ def ui():
482480
processing_chapter_str = gr.State('ℹ Processing Chapter')
483481
chapter_processed_successfully_str = gr.State('✔ Chapter Processed Successfully')
484482

485-
input_paramsA = [text_boxA,shared.gradio['interface_state'],selectStateA, text_box_StorySummary, generation_template_dropdown]
486-
last_input_params = [last_input,shared.gradio['interface_state'],selectStateA, text_box_StorySummary, generation_template_dropdown]
483+
input_paramsA = [text_boxA,shared.gradio['interface_state'], text_box_StorySummary, generation_template_dropdown]
484+
last_input_params = [last_input,shared.gradio['interface_state'], text_box_StorySummary, generation_template_dropdown]
487485
output_paramsA =[text_boxA, text_box_LatestContext, token_summary_label1]
488486

489487
generate_btn.click(copy_string, generating_text_str, token_summary_label1).then(fn = modules_ui.gather_interface_values, inputs= [shared.gradio[k] for k in shared.input_elements], outputs = shared.gradio['interface_state']).then(

0 commit comments

Comments
 (0)