We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 026c855 commit 91431f5Copy full SHA for 91431f5
chat/requirements.txt
@@ -1 +1 @@
1
-llama-cpp-python[server]
+llama-cpp-python[server]==0.2.57
chat/run.sh
@@ -15,4 +15,10 @@
15
# limitations under the License.
16
#
17
# SPDX-License-Identifier: Apache-2.0
18
-python -m llama_cpp.server --model ${MODEL_PATH} --host ${HOST:=0.0.0.0} --port ${PORT:=8000} --n_gpu_layers 0
+if [ ${MODEL_PATH} ]; then
19
+ python -m llama_cpp.server --model ${MODEL_PATH} --host ${HOST:=0.0.0.0} --port ${PORT:=8001} --n_gpu_layers ${GPU_LAYERS:=0} --clip_model_path ${CLIP_MODEL_PATH:=None} --chat_format ${MODEL_CHAT_FORMAT:="llama-2"}
20
+ exit 0
21
+fi
22
+
23
+echo "This image should not be used outside of Podman Desktop AI Lab extension. Missing required MODEL_PATH environment variable."
24
+exit 1
0 commit comments