22# Bootstrap a local Ollama server with the granite3.3:2b model so that
33# `make run_cli` exposes a pre-configured Ollama provider in Langflow.
44#
5- # Behavior:
6- # - If something is already responding on $OLLAMA_PORT, do nothing.
7- # - Else if `docker` is available, start (or restart) a container named
8- # `langflow-ollama` and pull granite3.3:2b on it.
9- # - Else if the `ollama` CLI is installed locally, start `ollama serve`
10- # in the background and pull the model.
11- # - Else print a warning and exit 0 (Langflow still starts; Ollama provider
12- # just won't be reachable).
5+ # Resolution order:
6+ # 1. If something already responds on $OLLAMA_PORT, use it.
7+ # 2. Else if a container engine (docker or podman) is available with a
8+ # running daemon, start (or restart) a container named `langflow-ollama`
9+ # and pull the model.
10+ # 3. Else if the `ollama` CLI is installed, start `ollama serve` in the
11+ # background and pull the model.
12+ # 4. Else, on macOS with Homebrew, install Ollama via `brew install ollama`,
13+ # then go to step 3. Skip this with LANGFLOW_OLLAMA_NO_INSTALL=1.
14+ # 5. Else, fail with explicit installation instructions.
1315#
14- # Exits non-zero only when an explicit step fails (docker run/exec, ollama pull).
16+ # Environment variables:
17+ # OLLAMA_PORT default 11434
18+ # OLLAMA_HOST default 127.0.0.1
19+ # LANGFLOW_OLLAMA_MODEL default granite3.3:2b
20+ # LANGFLOW_OLLAMA_ENGINE override container engine: docker | podman
21+ # (auto-detected when unset)
22+ # LANGFLOW_OLLAMA_NO_INSTALL set to 1 to disable brew autoinstall
23+ # LANGFLOW_OLLAMA_OPTIONAL set to 1 to make this script never exit non-zero
24+ # (useful for CI / contributors who do not need
25+ # the bundled provider)
1526
1627set -euo pipefail
1728
@@ -22,10 +33,22 @@ CONTAINER_NAME="langflow-ollama"
2233VOLUME_NAME=" langflow-ollama-data"
2334HEALTH_TIMEOUT_S=60
2435
36+ # Resolved container engine (docker | podman). Empty means "no engine usable".
37+ ENGINE=" "
38+
2539log () { printf ' \033[1;36m[ollama-bootstrap]\033[0m %s\n' " $* " ; }
2640warn () { printf ' \033[1;33m[ollama-bootstrap]\033[0m %s\n' " $* " >&2 ; }
2741err () { printf ' \033[1;31m[ollama-bootstrap]\033[0m %s\n' " $* " >&2 ; }
2842
43+ abort () {
44+ err " $* "
45+ if [ " ${LANGFLOW_OLLAMA_OPTIONAL:- 0} " = " 1" ]; then
46+ warn " LANGFLOW_OLLAMA_OPTIONAL=1 — continuing without Ollama (provider will be unreachable)."
47+ exit 0
48+ fi
49+ exit 1
50+ }
51+
2952is_server_up () {
3053 curl -fsS --max-time 2 " http://${OLLAMA_HOST} :${OLLAMA_PORT} /api/tags" > /dev/null 2>&1
3154}
@@ -34,21 +57,43 @@ wait_for_server() {
3457 local elapsed=0
3558 while ! is_server_up; do
3659 if [ " $elapsed " -ge " $HEALTH_TIMEOUT_S " ]; then
37- err " Ollama did not become healthy within ${HEALTH_TIMEOUT_S} s"
3860 return 1
3961 fi
4062 sleep 2
4163 elapsed=$(( elapsed + 2 ))
4264 done
4365}
4466
45- ensure_model_docker () {
46- log " Ensuring model '${OLLAMA_MODEL} ' is pulled (docker exec)..."
47- if docker exec " $CONTAINER_NAME " ollama list 2> /dev/null | awk ' NR>1 {print $1}' | grep -Fxq " $OLLAMA_MODEL " ; then
67+ engine_is_usable () {
68+ local cmd=" $1 "
69+ command -v " $cmd " > /dev/null 2>&1 && " $cmd " info > /dev/null 2>&1
70+ }
71+
72+ resolve_engine () {
73+ if [ -n " ${LANGFLOW_OLLAMA_ENGINE:- } " ]; then
74+ if engine_is_usable " $LANGFLOW_OLLAMA_ENGINE " ; then
75+ ENGINE=" $LANGFLOW_OLLAMA_ENGINE "
76+ return 0
77+ fi
78+ warn " LANGFLOW_OLLAMA_ENGINE='${LANGFLOW_OLLAMA_ENGINE} ' is not usable — falling back to auto-detect."
79+ fi
80+ for candidate in docker podman; do
81+ if engine_is_usable " $candidate " ; then
82+ ENGINE=" $candidate "
83+ return 0
84+ fi
85+ done
86+ return 1
87+ }
88+
89+ ensure_model_engine () {
90+ log " Ensuring model '${OLLAMA_MODEL} ' is pulled (${ENGINE} exec)..."
91+ if " $ENGINE " exec " $CONTAINER_NAME " ollama list 2> /dev/null \
92+ | awk ' NR>1 {print $1}' | grep -Fxq " $OLLAMA_MODEL " ; then
4893 log " Model '${OLLAMA_MODEL} ' already present."
4994 return 0
5095 fi
51- docker exec " $CONTAINER_NAME " ollama pull " $OLLAMA_MODEL "
96+ " $ENGINE " exec " $CONTAINER_NAME " ollama pull " $OLLAMA_MODEL "
5297}
5398
5499ensure_model_local () {
@@ -60,49 +105,74 @@ ensure_model_local() {
60105 ollama pull " $OLLAMA_MODEL "
61106}
62107
63- start_with_docker () {
64- log " Starting Ollama container '${CONTAINER_NAME} ' on port ${OLLAMA_PORT} ..."
65- if docker ps --format ' {{.Names}}' | grep -Fxq " $CONTAINER_NAME " ; then
108+ start_with_engine () {
109+ log " Starting Ollama container '${CONTAINER_NAME} ' on port ${OLLAMA_PORT} (engine: ${ENGINE} ) ..."
110+ if " $ENGINE " ps --format ' {{.Names}}' | grep -Fxq " $CONTAINER_NAME " ; then
66111 log " Container already running."
67- elif docker ps -a --format ' {{.Names}}' | grep -Fxq " $CONTAINER_NAME " ; then
112+ elif " $ENGINE " ps -a --format ' {{.Names}}' | grep -Fxq " $CONTAINER_NAME " ; then
68113 log " Container exists but is stopped — restarting."
69- docker start " $CONTAINER_NAME " > /dev/null
114+ " $ENGINE " start " $CONTAINER_NAME " > /dev/null
70115 else
71- docker volume create " $VOLUME_NAME " > /dev/null
72- docker run -d \
116+ " $ENGINE " volume create " $VOLUME_NAME " > /dev/null 2>&1 || true
117+ " $ENGINE " run -d \
73118 --name " $CONTAINER_NAME " \
74119 --restart unless-stopped \
75120 -p " ${OLLAMA_PORT} :11434" \
76121 -v " ${VOLUME_NAME} :/root/.ollama" \
77- ollama/ollama > /dev/null
122+ docker.io/ollama/ollama:latest > /dev/null
123+ fi
124+ if ! wait_for_server; then
125+ abort " Ollama container did not become healthy within ${HEALTH_TIMEOUT_S} s. Inspect with: ${ENGINE} logs ${CONTAINER_NAME} "
78126 fi
79- wait_for_server
80- ensure_model_docker
127+ ensure_model_engine
81128}
82129
83130start_with_local_cli () {
84- log " Starting local 'ollama serve' on port ${OLLAMA_PORT} ..."
131+ log " Starting local 'ollama serve' on port ${OLLAMA_PORT} (logs: /tmp/langflow-ollama.log) ..."
85132 OLLAMA_HOST=" ${OLLAMA_HOST} :${OLLAMA_PORT} " nohup ollama serve > /tmp/langflow-ollama.log 2>&1 &
86- wait_for_server
133+ if ! wait_for_server; then
134+ abort " Local 'ollama serve' did not become healthy within ${HEALTH_TIMEOUT_S} s. Tail /tmp/langflow-ollama.log for details."
135+ fi
87136 ensure_model_local
88137}
89138
139+ try_install_ollama () {
140+ if [ " ${LANGFLOW_OLLAMA_NO_INSTALL:- 0} " = " 1" ]; then
141+ return 1
142+ fi
143+ case " $( uname -s) " in
144+ Darwin)
145+ if command -v brew > /dev/null 2>&1 ; then
146+ log " Ollama not found — installing via Homebrew (brew install ollama)..."
147+ brew install ollama
148+ return 0
149+ fi
150+ ;;
151+ esac
152+ return 1
153+ }
154+
90155main () {
91156 if is_server_up; then
92157 log " Ollama already responding at http://${OLLAMA_HOST} :${OLLAMA_PORT} — skipping bootstrap."
93- log " (If granite3.3:2b is not installed, run: ollama pull ${OLLAMA_MODEL} )"
158+ log " (If ' ${OLLAMA_MODEL} ' is not installed, run: ollama pull ${OLLAMA_MODEL} )"
94159 return 0
95160 fi
96161
97- if command -v docker > /dev/null 2>&1 && docker info > /dev/null 2>&1 ; then
98- start_with_docker
162+ if resolve_engine ; then
163+ start_with_engine
99164 elif command -v ollama > /dev/null 2>&1 ; then
100165 start_with_local_cli
166+ elif try_install_ollama && command -v ollama > /dev/null 2>&1 ; then
167+ start_with_local_cli
101168 else
102- warn " Neither Docker nor the 'ollama' CLI is available."
103- warn " Langflow will start, but the Ollama provider won't be reachable."
104- warn " Install Docker (https://docs.docker.com/get-docker/) or Ollama (https://ollama.com/download) and re-run."
105- return 0
169+ abort " Cannot start Ollama: no usable container engine (docker / podman) and no 'ollama' CLI.
170+ Install one of:
171+ - Podman: https://podman.io/docs/installation (then 'podman machine start')
172+ - Docker Desktop: https://docs.docker.com/get-docker/
173+ - Ollama: https://ollama.com/download (or 'brew install ollama')
174+ Then re-run 'make run_cli'.
175+ To skip this bootstrap on this machine, run: LANGFLOW_OLLAMA_OPTIONAL=1 make run_cli"
106176 fi
107177
108178 log " Ollama is up at http://${OLLAMA_HOST} :${OLLAMA_PORT} with model '${OLLAMA_MODEL} '."
0 commit comments