forked from opendatahub-io/odh-dashboard
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.go
More file actions
298 lines (266 loc) · 13 KB
/
app.go
File metadata and controls
298 lines (266 loc) · 13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
package api
import (
"context"
"crypto/x509"
"fmt"
"log/slog"
"net/http"
"os"
"path"
"strings"
k8s "github.com/opendatahub-io/automl-library/bff/internal/integrations/kubernetes"
k8mocks "github.com/opendatahub-io/automl-library/bff/internal/integrations/kubernetes/k8mocks"
"github.com/opendatahub-io/automl-library/bff/internal/integrations/modelregistry"
ps "github.com/opendatahub-io/automl-library/bff/internal/integrations/pipelineserver"
psmocks "github.com/opendatahub-io/automl-library/bff/internal/integrations/pipelineserver/psmocks"
s3int "github.com/opendatahub-io/automl-library/bff/internal/integrations/s3"
s3mocks "github.com/opendatahub-io/automl-library/bff/internal/integrations/s3/s3mocks"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/envtest"
helper "github.com/opendatahub-io/automl-library/bff/internal/helpers"
"github.com/opendatahub-io/automl-library/bff/internal/config"
"github.com/opendatahub-io/automl-library/bff/internal/repositories"
"github.com/julienschmidt/httprouter"
)
const (
Version = "1.0.0"
PathPrefix = "/automl"
ApiPathPrefix = "/api/v1"
HealthCheckPath = "/healthcheck"
UserPath = ApiPathPrefix + "/user"
NamespacePath = ApiPathPrefix + "/namespaces"
SecretsPath = ApiPathPrefix + "/secrets"
S3FilePath = ApiPathPrefix + "/s3/file"
S3FileSchemaPath = ApiPathPrefix + "/s3/file/schema"
S3FilesPath = ApiPathPrefix + "/s3/files"
PipelineRunsPath = ApiPathPrefix + "/pipeline-runs"
ModelRegistriesPath = ApiPathPrefix + "/model-registries"
ModelRegistryModelsPath = ModelRegistriesPath + "/:registryId/models"
)
// modelRegistryHTTPClientFactory builds a client for Model Registry register calls.
// If nil, modelregistry.NewHTTPClient is used. Set by tests only.
type modelRegistryHTTPClientFactory func(*slog.Logger, string, http.Header, bool, *x509.CertPool) (modelregistry.HTTPClientInterface, error)
type App struct {
config config.EnvConfig
logger *slog.Logger
kubernetesClientFactory k8s.KubernetesClientFactory
pipelineServerClientFactory ps.PipelineServerClientFactory
s3ClientFactory s3int.S3ClientFactory
repositories *repositories.Repositories
// s3PostMaxFilePartBytes is for package api tests only (see PostS3FileHandler).
s3PostMaxFilePartBytes int64
// s3PostMaxRequestBodyBytes caps total POST body in tests (0 = file max + multipart envelope).
s3PostMaxRequestBodyBytes int64
// s3PostMaxCollisionAttempts limits HeadObject-based key suffix attempts in tests (0 = default cap).
s3PostMaxCollisionAttempts int
//used only on mocked k8s client
testEnv *envtest.Environment
// rootCAs used for outbound TLS connections to Client Service
rootCAs *x509.CertPool
// modelRegistryHTTPClientFactory is nil in production; tests may set it to inject mock clients.
modelRegistryHTTPClientFactory modelRegistryHTTPClientFactory
}
func NewApp(cfg config.EnvConfig, logger *slog.Logger) (*App, error) {
logger.Debug("Initializing app with config", slog.Any("config", cfg))
var k8sFactory k8s.KubernetesClientFactory
var err error
// used only on mocked k8s client
var testEnv *envtest.Environment
var rootCAs *x509.CertPool
// Initialize CA pool if bundle paths are provided
if len(cfg.BundlePaths) > 0 {
// Start with system certs if available
if pool, err := x509.SystemCertPool(); err == nil {
rootCAs = pool
} else {
rootCAs = x509.NewCertPool()
}
var loadedAny bool
for _, p := range cfg.BundlePaths {
p = strings.TrimSpace(p)
if p == "" {
continue
}
// Read and append each PEM bundle; ignore errors per file, log at debug
pemBytes, readErr := os.ReadFile(p)
if readErr != nil {
logger.Debug("CA bundle not readable, skipping", slog.String("path", p), slog.Any("error", readErr))
continue
}
if ok := rootCAs.AppendCertsFromPEM(pemBytes); !ok {
logger.Debug("No certs appended from PEM bundle", slog.String("path", p))
continue
}
loadedAny = true
logger.Info("Added CA bundle", slog.String("path", p))
}
if !loadedAny {
// If none were loaded successfully, keep rootCAs nil to fall back to default transport behavior
rootCAs = nil
logger.Warn("No CA certificates loaded from bundle-paths; falling back to system defaults")
}
}
if cfg.MockK8Client {
//mock all k8s calls with 'env test'
var clientset kubernetes.Interface
ctx, cancel := context.WithCancel(context.Background())
testEnv, clientset, err = k8mocks.SetupEnvTest(k8mocks.TestEnvInput{
Logger: logger,
Ctx: ctx,
Cancel: cancel,
})
if err != nil {
return nil, fmt.Errorf("failed to setup envtest: %w", err)
}
//create mocked kubernetes client factory
k8sFactory, err = k8mocks.NewMockedKubernetesClientFactory(clientset, testEnv, cfg, logger)
} else {
//create kubernetes client factory
k8sFactory, err = k8s.NewKubernetesClientFactory(cfg, logger)
}
if err != nil {
return nil, fmt.Errorf("failed to create Kubernetes client: %w", err)
}
// Initialize Pipeline Server client factory
var pipelineServerClientFactory ps.PipelineServerClientFactory
if cfg.MockPipelineServerClient {
logger.Info("Using mock Pipeline Server client factory")
pipelineServerClientFactory = psmocks.NewMockClientFactory()
} else {
logger.Info("Using real Pipeline Server client factory")
pipelineServerClientFactory = ps.NewRealClientFactory()
}
// Initialize S3 client factory
var s3ClientFactory s3int.S3ClientFactory
if cfg.MockS3Client {
logger.Info("Using mock S3 client factory")
s3ClientFactory = s3mocks.NewMockClientFactory()
} else {
logger.Info("Using real S3 client factory")
// INTENTIONAL SECURITY DECISION: S3 TLS and network defaults are permissive.
//
// InsecureSkipVerify defaults to TRUE because RHOAI's managed MinIO and many
// customer S3-compatible stores use self-signed certificates. Requiring valid
// certs by default would break the most common deployment scenario. The BFF
// runs inside the cluster and communicates over the internal network, so the
// risk of MITM is low. Customers who use a CA-signed S3 endpoint can set
// S3_INSECURE_SKIP_VERIFY=false to enforce certificate verification.
//
// AllowInternalIPs defaults to TRUE because MinIO typically runs on the same
// cluster as the BFF, using RFC-1918 private IPs (e.g. 10.x service IPs).
// Blocking private IPs by default would prevent the most common S3 configuration.
// Loopback, link-local, and reserved ranges remain always blocked regardless
// of this setting. Customers can set S3_ALLOW_INTERNAL_IPS=false to also block
// private ranges if their S3 store is external.
//
// AllowHTTP defaults to TRUE because some in-cluster MinIO deployments expose
// HTTP-only endpoints. Traffic stays within the cluster network, so the risk
// of interception is low. Customers can set S3_ALLOW_HTTP=false to require HTTPS.
s3InsecureSkipVerify := os.Getenv("S3_INSECURE_SKIP_VERIFY") != "false"
s3AllowInternalIPs := os.Getenv("S3_ALLOW_INTERNAL_IPS") != "false"
s3AllowHTTP := os.Getenv("S3_ALLOW_HTTP") != "false"
s3ClientFactory = s3int.NewRealClientFactory(s3int.S3ClientOptions{
DevMode: cfg.DevMode,
InsecureSkipVerify: s3InsecureSkipVerify,
AllowInternalIPs: s3AllowInternalIPs,
AllowHTTP: s3AllowHTTP,
})
}
app := &App{
config: cfg,
logger: logger,
kubernetesClientFactory: k8sFactory,
pipelineServerClientFactory: pipelineServerClientFactory,
s3ClientFactory: s3ClientFactory,
repositories: repositories.NewRepositories(logger),
testEnv: testEnv,
rootCAs: rootCAs,
}
return app, nil
}
func (app *App) Shutdown() error {
app.logger.Info("shutting down app...")
if app.testEnv == nil {
return nil
}
//shutdown the envtest control plane when we are in the mock mode.
app.logger.Info("shutting env test...")
return app.testEnv.Stop()
}
// attachPipelineClientIfNeeded is a best-effort shim for the S3 file routes.
// When the caller supplies an explicit secretName query parameter the handler
// can resolve S3 credentials directly, so DSPA discovery is skipped and next
// is called immediately. Otherwise the full AttachPipelineServerClient
// middleware runs as normal.
func (app *App) attachPipelineClientIfNeeded(next func(http.ResponseWriter, *http.Request, httprouter.Params)) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
if strings.TrimSpace(r.URL.Query().Get("secretName")) != "" {
next(w, r, ps)
return
}
app.AttachPipelineServerClient(next)(w, r, ps)
}
}
func (app *App) Routes() http.Handler {
// Router for /api/v1/*
apiRouter := httprouter.New()
apiRouter.NotFound = http.HandlerFunc(app.notFoundResponse)
apiRouter.MethodNotAllowed = http.HandlerFunc(app.methodNotAllowedResponse)
// Minimal Kubernetes-backed starter endpoints
apiRouter.GET(UserPath, app.UserHandler)
apiRouter.GET(NamespacePath, app.GetNamespacesHandler)
apiRouter.GET(SecretsPath, app.AttachNamespace(app.GetSecretsHandler))
// Model Registry discovery — CRs are namespace-scoped within rhoai-model-registries
// but presented as global in the RHOAI UX; no user-supplied namespace parameter needed.
apiRouter.GET(ModelRegistriesPath, app.GetModelRegistriesHandler)
// Pipeline Runs API endpoints (pipeline server and pipeline are auto-discovered)
apiRouter.GET(PipelineRunsPath+"/:runId", app.AttachNamespace(app.RequireAccessToPipelineServers(app.AttachPipelineServerClient(app.AttachDiscoveredPipeline(app.PipelineRunHandler)))))
apiRouter.GET(PipelineRunsPath, app.AttachNamespace(app.RequireAccessToPipelineServers(app.AttachPipelineServerClient(app.AttachDiscoveredPipeline(app.PipelineRunsHandler)))))
apiRouter.POST(PipelineRunsPath, app.AttachNamespace(app.RequireAccessToPipelineServers(app.AttachPipelineServerClient(app.AttachDiscoveredPipeline(app.CreatePipelineRunHandler)))))
// S3 operations — DSPA discovery is skipped when the caller supplies an explicit
// secretName (the handler resolves credentials directly in that case).
apiRouter.GET(S3FileSchemaPath, app.AttachNamespace(app.RequireAccessToPipelineServers(app.attachPipelineClientIfNeeded(app.GetS3FileSchemaHandler))))
apiRouter.GET(S3FilePath, app.AttachNamespace(app.RequireAccessToPipelineServers(app.attachPipelineClientIfNeeded(app.GetS3FileHandler))))
apiRouter.GET(S3FilesPath, app.AttachNamespace(app.RequireAccessToPipelineServers(app.attachPipelineClientIfNeeded(app.GetS3FilesHandler))))
// POST /s3/file deliberately omits attachPipelineClientIfNeeded: secretName is required; there is
// no DSPA fallback (creation flow uses an explicitly chosen input/target data secret).
apiRouter.POST(S3FilePath, app.AttachNamespace(app.rejectDeclaredOversizedS3Post(app.RequireAccessToPipelineServers(app.PostS3FileHandler))))
// Model Registry - register model binary (target registry via path param + discovered ServerURL)
// Does NOT use AttachPipelineServerClient (which gates on a ready pipeline server and can
// 404/503). The handler performs best-effort DSPA discovery itself via
// injectDSPAObjectStorageIfAvailable — this only needs the DSPA spec (present regardless
// of pipeline server readiness) to resolve bucket, endpoint, and region for the artifact URI.
apiRouter.POST(ModelRegistryModelsPath, app.AttachNamespace(app.RequireAccessToPipelineServers(app.RegisterModelHandler)))
// App Router
appMux := http.NewServeMux()
// handler for api calls
appMux.Handle(ApiPathPrefix+"/", apiRouter)
appMux.Handle(PathPrefix+ApiPathPrefix+"/", http.StripPrefix(PathPrefix, apiRouter))
// file server for the frontend file and SPA routes
staticDir := http.Dir(app.config.StaticAssetsDir)
fileServer := http.FileServer(staticDir)
appMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
ctxLogger := helper.GetContextLoggerFromReq(r)
// Check if the requested file exists
if _, err := staticDir.Open(r.URL.Path); err == nil {
ctxLogger.Debug("Serving static file", slog.String("path", r.URL.Path))
// Serve the file if it exists
fileServer.ServeHTTP(w, r)
return
}
// Fallback to index.html for SPA routes
ctxLogger.Debug("Static asset not found, serving index.html", slog.String("path", r.URL.Path))
http.ServeFile(w, r, path.Join(app.config.StaticAssetsDir, "index.html"))
})
// Create a mux for the healthcheck endpoint
healthcheckMux := http.NewServeMux()
healthcheckRouter := httprouter.New()
healthcheckRouter.GET(HealthCheckPath, app.HealthcheckHandler)
healthcheckMux.Handle(HealthCheckPath, app.RecoverPanic(app.EnableTelemetry(healthcheckRouter)))
// Combines the healthcheck endpoint with the rest of the routes
// Apply middleware to appMux which contains the API routes
combinedMux := http.NewServeMux()
combinedMux.Handle(HealthCheckPath, healthcheckMux)
combinedMux.Handle("/", app.RecoverPanic(app.EnableTelemetry(app.EnableCORS(app.InjectRequestIdentity(appMux)))))
return combinedMux
}