22 <description >Call any LLM</description >
33 <macros >
44 <import >macros.xml</import >
5- <token name =" @VERSION_SUFFIX@" >0 </token >
5+ <token name =" @VERSION_SUFFIX@" >1 </token >
66 <token name =" @PROFILE@" >24.0</token >
77 </macros >
88 <requirements >
1414#import json
1515#import os
1616#import re
17+
1718#set LINK_LIST = []
18- #for $input in $context
19- #set file_name = os.path.splitext($input.element_identifier)[0]
20- #set ext = $input.ext if $input.ext in ['html', 'json', 'txt', 'jpg', 'jpeg', 'png', 'gif'] else 'txt'
21- #set LINK = re.sub('[^\w\-]', '_', $file_name)+'.'+$ext
22- ln -s '$input' '$LINK' &&
23- #set type = 'image' if $input.ext in ['jpg', 'jpeg', 'png', 'gif'] else 'text'
24- $LINK_LIST.append([$LINK, $type])
19+ #for $input_type, $param in [('text', 'text_context'), ('image', 'image_context')]
20+ #set context = $getVar($param, None)
21+ #if $model_type in [$input_type, 'multimodal'] and $context
22+ #for $input in ($context if isinstance($context, list) else [$context])
23+ #set file_name = re.sub('[^\w\-]', '_', os.path.splitext($input.element_identifier)[0])
24+ #set link_name = '%s_%s.%s' % ($input.hid, $file_name, $input.ext)
25+ ln -s '$input' '$link_name' &&
26+ $LINK_LIST.append(($link_name, $input_type))
27+ #end for
28+ #end if
2529#end for
2630#set context_files = json.dumps($LINK_LIST)
2731
32+ #if $model_type == 'image'
33+ #set prompt = ''
34+ #end if
35+
2836python '$__tool_directory__/llm_hub.py'
2937'$context_files'
3038'$prompt'
3139'$model.fields.value'
32- '$input_type_selector '
40+ '$model_type '
3341 ]]> </command >
3442 <inputs >
35- <conditional name =" input_type " >
36- <param name =" input_type_selector " type =" select" label =" Choose the model" help =" Multimodal models are capable to have image and text as input." >
43+ <conditional name =" model_condition " >
44+ <param name =" model_type " type =" select" label =" Choose the model" help =" Multimodal models are capable to have image and text as input." >
3745 <option value =" multimodal" selected =" true" >Multimodal models</option >
3846 <option value =" text" >Text models</option >
47+ <option value =" image" >Image models</option >
3948 </param >
4049 <when value =" multimodal" >
4150 <param name =" model" type =" select" optional =" false" label =" Model" help =" Select the model you want to use." >
@@ -44,7 +53,11 @@ python '$__tool_directory__/llm_hub.py'
4453 </options >
4554 <validator message =" No model annotation is available for LLM Hub" type =" no_options" />
4655 </param >
47- <param name =" context" type =" data" multiple =" true" optional =" true" format =" html,json,txt,jpg,png,gif" label =" Context" max =" 500" />
56+ <param name =" text_context" type =" data" multiple =" true" optional =" true" format =" html,json,txt" label =" Text Context" />
57+ <param name =" image_context" type =" data" optional =" true" format =" jpg,png,gif,tiff,bmp" label =" Image Context" />
58+ <param name =" prompt" type =" text" optional =" false" label =" Prompt" help =" Prompts or tasks you want the LLM to perform." area =" true" >
59+ <validator type =" empty_field" />
60+ </param >
4861 </when >
4962 <when value =" text" >
5063 <param name =" model" type =" select" optional =" false" label =" Model" help =" Select the model you want to use." >
@@ -53,24 +66,33 @@ python '$__tool_directory__/llm_hub.py'
5366 </options >
5467 <validator message =" No model annotation is available for LLM Hub" type =" no_options" />
5568 </param >
56- <param name =" context" type =" data" multiple =" true" optional =" true" format =" html,json,txt" label =" Context" max =" 500" />
69+ <param name =" text_context" type =" data" multiple =" true" optional =" true" format =" html,json,txt" label =" Text Context" />
70+ <param name =" prompt" type =" text" optional =" false" label =" Prompt" help =" Prompts or tasks you want the LLM to perform." area =" true" >
71+ <validator type =" empty_field" />
72+ </param >
73+ </when >
74+ <when value =" image" >
75+ <param name =" model" type =" select" optional =" false" label =" Model" help =" Select the model you want to use." >
76+ <options from_data_table =" llm_models" >
77+ <filter type =" static_value" column =" 2" value =" image" />
78+ </options >
79+ <validator message =" No model annotation is available for LLM Hub" type =" no_options" />
80+ </param >
81+ <param name =" image_context" type =" data" optional =" false" format =" jpg,png,gif,tiff,bmp" label =" Image Context" />
5782 </when >
5883 </conditional >
59- <param name =" prompt" type =" text" optional =" false" label =" Prompt" help =" Prompts or tasks you want the LLM to perform." area =" true" >
60- <validator type =" empty_field" />
61- </param >
6284 </inputs >
6385 <outputs >
64- <data name =" output" format =" markdown" label =" ${tool.name} on ${ on_string} " from_work_dir =" ./output.md" />
86+ <data name =" output" format =" markdown" label =" ${tool.name}(${model}) #if $on_string then ' on ' + $ on_string else ''# " from_work_dir =" ./output.md" />
6587 </outputs >
6688 <tests >
6789 <test expect_failure =" true" expect_exit_code =" 1" >
68- <conditional name =" input_type " >
69- <param name =" input_type_selector " value =" text" />
90+ <conditional name =" model_condition " >
91+ <param name =" model_type " value =" text" />
7092 <param name =" model" value =" unknown" />
71- <param name =" context" value =" test.txt" ftype =" txt" />
93+ <param name =" text_context" value =" test.txt" ftype =" txt" />
94+ <param name =" prompt" value =" What is this?" />
7295 </conditional >
73- <param name =" prompt" value =" What is this?" />
7496 <assert_stdout >
7597 <has_text text =" LiteLLM API key is not configured!" />
7698 </assert_stdout >
@@ -98,9 +120,13 @@ Usage
981201. **Select a Model**: Choose the LLM model that best fits your needs.
99121Available models depend on what's configured in the LiteLLM proxy by your Galaxy administrators.
100122
101- 2. **Upload Context Data**: You can upload files in formats such as TXT, HTML, JSON, JPG, PNG, or GIF.
102- This context data serves as additional input for the prompt you wish to execute.
103- Vision-capable models can process image files.
123+ 2. **Upload Context Data**: You can upload context data in different ways depending on the model type:
124+
125+ - **Text models**: Upload multiple text files (TXT, HTML, JSON) as context
126+ - **Image models**: Upload a single image file (JPG, PNG, GIF, TIFF, BMP) as context
127+ - **Multimodal models**: Upload multiple text files and/or a single image file as context
128+
129+ Vision-capable (multimodal and image) models can process image files, but only one image file is supported per request.
104130
1051313. **Provide a Prompt**: Specify the task or question you want the LLM to address.
106132The more specific the prompt, the more tailored the response will be.
0 commit comments