|
| 1 | +kind: Deployment |
1 | 2 | apiVersion: apps/v1 |
2 | | -kind: Deployment |
3 | 3 | metadata: |
4 | 4 | name: llamastack-deployment |
5 | 5 | spec: |
6 | | - replicas: 1 |
7 | 6 | selector: |
8 | 7 | matchLabels: |
9 | 8 | app: llamastack |
10 | 9 | template: |
11 | 10 | metadata: |
| 11 | + #annotations: |
| 12 | + # sidecar.opentelemetry.io/inject: otelsidecar |
12 | 13 | labels: |
13 | 14 | app: llamastack |
14 | 15 | spec: |
| 16 | + volumes: |
| 17 | + - name: run-config-volume |
| 18 | + configMap: |
| 19 | + name: run-config |
| 20 | + defaultMode: 420 |
| 21 | + - name: llama-persist |
| 22 | + persistentVolumeClaim: |
| 23 | + claimName: llama-persist |
| 24 | + - name: cache |
| 25 | + emptyDir: {} |
| 26 | + - name: pythain |
| 27 | + emptyDir: {} |
15 | 28 | containers: |
16 | | - - args: |
17 | | - - --yaml-config |
18 | | - - /app-config/config.yaml |
19 | | - env: |
20 | | - - name: VLLM_MAX_TOKENS |
21 | | - value: "128000" |
22 | | - - name: INFERENCE_MODEL |
23 | | - value: meta-llama/Llama-3.2-3B-Instruct |
24 | | - - name: VLLM_URL |
25 | | - value: http://vllm:8000/v1 |
26 | | - - name: VLLM_API_TOKEN |
27 | | - value: fake |
28 | | - - name: SAFETY_MODEL |
29 | | - value: meta-llama/Llama-Guard-3-8B |
30 | | - - name: SAFETY_VLLM_URL |
31 | | - value: http://safety.llama-serve.svc.cluster.local:8000/v1 |
32 | | - - name: OTEL_TRACE_ENDPOINT |
33 | | - value: http://otel-collector-collector.observability-hub.svc.cluster.local:4318/v1/traces |
34 | | - - name: OTEL_METRIC_ENDPOINT |
35 | | - value: http://otel-collector-collector.observability-hub.svc.cluster.local:4318/v1/metrics |
36 | | - - name: MILVUS_DB_PATH |
37 | | - value: 'milvus.db' |
38 | | - image: quay.io/redhat-et/llama:vllm-0.1.9 |
39 | | - imagePullPolicy: Always |
40 | | - name: llamastack |
41 | | - ports: |
42 | | - - containerPort: 8321 |
43 | | - protocol: TCP |
44 | | - resources: {} |
45 | | - terminationMessagePath: /dev/termination-log |
46 | | - terminationMessagePolicy: File |
47 | | - volumeMounts: |
48 | | - - mountPath: /app-config |
49 | | - name: run-config-volume |
50 | | - - mountPath: /.llama |
51 | | - name: llama-persist |
52 | | - - mountPath: /.cache |
53 | | - name: cache |
54 | | - dnsPolicy: ClusterFirst |
55 | | - restartPolicy: Always |
56 | | - schedulerName: default-scheduler |
| 29 | + - resources: {} |
| 30 | + terminationMessagePath: /dev/termination-log |
| 31 | + name: llamastack |
| 32 | + env: |
| 33 | + - name: MAX_TOKENS |
| 34 | + value: '128000' |
| 35 | + - name: VLLM_MAX_TOKENS |
| 36 | + value: '128000' |
| 37 | + - name: LLAMA3B_MODEL |
| 38 | + value: meta-llama/Llama-3.2-3B-Instruct |
| 39 | + - name: GRANITE_URL |
| 40 | + value: 'https://granite-8b-llama-serve.apps.ocp-beta-test.nerc.mghpcc.org/v1' |
| 41 | + - name: GRANITE_MODEL |
| 42 | + value: ibm-granite/granite-3.2-8b-instruct |
| 43 | + - name: LLAMA3B_URL |
| 44 | + value: 'https://llama32-3b-llama-serve.apps.ocp-beta-test.nerc.mghpcc.org/v1' |
| 45 | + - name: VLLM_API_TOKEN |
| 46 | + value: fake |
| 47 | + - name: OTEL_SERVICE_NAME |
| 48 | + value: om-llamastack |
| 49 | + - name: OTEL_TRACE_ENDPOINT |
| 50 | + value: 'http://otel-collector-collector.observability-hub.svc.cluster.local:4318/v1/traces' |
| 51 | + - name: SAFETY_MODEL |
| 52 | + value: meta-llama/Llama-Guard-3-8B |
| 53 | + - name: SAFETY_VLLM_URL |
| 54 | + value: 'http://safety.llama-serve.svc.cluster.local:8000/v1' |
| 55 | + - name: MILVUS_DB_PATH |
| 56 | + value: milvus.db |
| 57 | + ports: |
| 58 | + - containerPort: 8321 |
| 59 | + protocol: TCP |
| 60 | + imagePullPolicy: Always |
| 61 | + volumeMounts: |
| 62 | + - name: pythain |
| 63 | + mountPath: /pythainlp-data |
| 64 | + - name: run-config-volume |
| 65 | + mountPath: /app-config |
| 66 | + - name: llama-persist |
| 67 | + mountPath: /.llama |
| 68 | + - name: cache |
| 69 | + mountPath: /.cache |
| 70 | + terminationMessagePolicy: File |
| 71 | + image: 'quay.io/redhat-et/llama:vllm-0.1.9' |
| 72 | + args: |
| 73 | + - '--config' |
| 74 | + - /app-config/config.yaml |
57 | 75 | securityContext: {} |
58 | | - terminationGracePeriodSeconds: 30 |
59 | | - volumes: |
60 | | - - configMap: |
61 | | - defaultMode: 420 |
62 | | - name: run-config |
63 | | - name: run-config-volume |
64 | | - - persistentVolumeClaim: |
65 | | - claimName: llama-persist |
66 | | - name: llama-persist |
67 | | - - emptyDir: {} |
68 | | - name: cache |
|
0 commit comments