diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/database.go b/apps/jan-api-gateway/application/app/infrastructure/database/database.go index 001c6b4a..aba55521 100644 --- a/apps/jan-api-gateway/application/app/infrastructure/database/database.go +++ b/apps/jan-api-gateway/application/app/infrastructure/database/database.go @@ -41,32 +41,11 @@ func NewDB() (*gorm.DB, error) { Fatalf("unable to connect to setup replica: %v", err) return nil, err } - - if !db.Migrator().HasTable("database_migration") { - err = db.Exec("DROP SCHEMA IF EXISTS public CASCADE;").Error - if err != nil { - logger.GetLogger(). - WithField("error_code", "5644d271-be84-4a92-a5af-489d87324758"). - Fatalf("unable to connect to setup replica: %v", err) - return nil, err - } - err = db.Exec("CREATE SCHEMA public;").Error - if err != nil { - logger.GetLogger(). - WithField("error_code", "07217a04-80f1-466f-8d2c-cdd162dd9ccb"). - Fatalf("unable to connect to setup replica: %v", err) - return nil, err - } - for _, model := range SchemaRegistry { - err = db.AutoMigrate(model) - if err != nil { - logger.GetLogger(). - WithField("error_code", "75333e43-8157-4f0a-8e34-aa34e6e7c285"). - Fatalf("failed to auto migrate schema: %T, error: %v", model, err) - return nil, err - } - } - } DB = db return DB, nil } + +func Migration() error { + migrator := NewDBMigrator(DB) + return migrator.Migrate() +} diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/dbschema/dbmigration.go b/apps/jan-api-gateway/application/app/infrastructure/database/dbschema/dbmigration.go deleted file mode 100644 index a8b4584a..00000000 --- a/apps/jan-api-gateway/application/app/infrastructure/database/dbschema/dbmigration.go +++ /dev/null @@ -1,14 +0,0 @@ -package dbschema - -import ( - "menlo.ai/jan-api-gateway/app/infrastructure/database" -) - -func init() { - database.RegisterSchemaForAutoMigrate(DatabaseMigration{}) -} - -type DatabaseMigration struct { - BaseModel - Version string `gorm:"size:64;not null;uniqueIndex"` -} diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/database_migrations.gen.go b/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/database_migrations.gen.go deleted file mode 100644 index fe4e0b62..00000000 --- a/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/database_migrations.gen.go +++ /dev/null @@ -1,399 +0,0 @@ -// Code generated by gorm.io/gen. DO NOT EDIT. -// Code generated by gorm.io/gen. DO NOT EDIT. -// Code generated by gorm.io/gen. DO NOT EDIT. - -package gormgen - -import ( - "context" - "database/sql" - - "gorm.io/gorm" - "gorm.io/gorm/clause" - "gorm.io/gorm/schema" - - "gorm.io/gen" - "gorm.io/gen/field" - - "gorm.io/plugin/dbresolver" - - "menlo.ai/jan-api-gateway/app/infrastructure/database/dbschema" -) - -func newDatabaseMigration(db *gorm.DB, opts ...gen.DOOption) databaseMigration { - _databaseMigration := databaseMigration{} - - _databaseMigration.databaseMigrationDo.UseDB(db, opts...) - _databaseMigration.databaseMigrationDo.UseModel(&dbschema.DatabaseMigration{}) - - tableName := _databaseMigration.databaseMigrationDo.TableName() - _databaseMigration.ALL = field.NewAsterisk(tableName) - _databaseMigration.ID = field.NewUint(tableName, "id") - _databaseMigration.CreatedAt = field.NewTime(tableName, "created_at") - _databaseMigration.UpdatedAt = field.NewTime(tableName, "updated_at") - _databaseMigration.DeletedAt = field.NewField(tableName, "deleted_at") - _databaseMigration.Version = field.NewString(tableName, "version") - - _databaseMigration.fillFieldMap() - - return _databaseMigration -} - -type databaseMigration struct { - databaseMigrationDo - - ALL field.Asterisk - ID field.Uint - CreatedAt field.Time - UpdatedAt field.Time - DeletedAt field.Field - Version field.String - - fieldMap map[string]field.Expr -} - -func (d databaseMigration) Table(newTableName string) *databaseMigration { - d.databaseMigrationDo.UseTable(newTableName) - return d.updateTableName(newTableName) -} - -func (d databaseMigration) As(alias string) *databaseMigration { - d.databaseMigrationDo.DO = *(d.databaseMigrationDo.As(alias).(*gen.DO)) - return d.updateTableName(alias) -} - -func (d *databaseMigration) updateTableName(table string) *databaseMigration { - d.ALL = field.NewAsterisk(table) - d.ID = field.NewUint(table, "id") - d.CreatedAt = field.NewTime(table, "created_at") - d.UpdatedAt = field.NewTime(table, "updated_at") - d.DeletedAt = field.NewField(table, "deleted_at") - d.Version = field.NewString(table, "version") - - d.fillFieldMap() - - return d -} - -func (d *databaseMigration) GetFieldByName(fieldName string) (field.OrderExpr, bool) { - _f, ok := d.fieldMap[fieldName] - if !ok || _f == nil { - return nil, false - } - _oe, ok := _f.(field.OrderExpr) - return _oe, ok -} - -func (d *databaseMigration) fillFieldMap() { - d.fieldMap = make(map[string]field.Expr, 5) - d.fieldMap["id"] = d.ID - d.fieldMap["created_at"] = d.CreatedAt - d.fieldMap["updated_at"] = d.UpdatedAt - d.fieldMap["deleted_at"] = d.DeletedAt - d.fieldMap["version"] = d.Version -} - -func (d databaseMigration) clone(db *gorm.DB) databaseMigration { - d.databaseMigrationDo.ReplaceConnPool(db.Statement.ConnPool) - return d -} - -func (d databaseMigration) replaceDB(db *gorm.DB) databaseMigration { - d.databaseMigrationDo.ReplaceDB(db) - return d -} - -type databaseMigrationDo struct{ gen.DO } - -type IDatabaseMigrationDo interface { - gen.SubQuery - Debug() IDatabaseMigrationDo - WithContext(ctx context.Context) IDatabaseMigrationDo - WithResult(fc func(tx gen.Dao)) gen.ResultInfo - ReplaceDB(db *gorm.DB) - ReadDB() IDatabaseMigrationDo - WriteDB() IDatabaseMigrationDo - As(alias string) gen.Dao - Session(config *gorm.Session) IDatabaseMigrationDo - Columns(cols ...field.Expr) gen.Columns - Clauses(conds ...clause.Expression) IDatabaseMigrationDo - Not(conds ...gen.Condition) IDatabaseMigrationDo - Or(conds ...gen.Condition) IDatabaseMigrationDo - Select(conds ...field.Expr) IDatabaseMigrationDo - Where(conds ...gen.Condition) IDatabaseMigrationDo - Order(conds ...field.Expr) IDatabaseMigrationDo - Distinct(cols ...field.Expr) IDatabaseMigrationDo - Omit(cols ...field.Expr) IDatabaseMigrationDo - Join(table schema.Tabler, on ...field.Expr) IDatabaseMigrationDo - LeftJoin(table schema.Tabler, on ...field.Expr) IDatabaseMigrationDo - RightJoin(table schema.Tabler, on ...field.Expr) IDatabaseMigrationDo - Group(cols ...field.Expr) IDatabaseMigrationDo - Having(conds ...gen.Condition) IDatabaseMigrationDo - Limit(limit int) IDatabaseMigrationDo - Offset(offset int) IDatabaseMigrationDo - Count() (count int64, err error) - Scopes(funcs ...func(gen.Dao) gen.Dao) IDatabaseMigrationDo - Unscoped() IDatabaseMigrationDo - Create(values ...*dbschema.DatabaseMigration) error - CreateInBatches(values []*dbschema.DatabaseMigration, batchSize int) error - Save(values ...*dbschema.DatabaseMigration) error - First() (*dbschema.DatabaseMigration, error) - Take() (*dbschema.DatabaseMigration, error) - Last() (*dbschema.DatabaseMigration, error) - Find() ([]*dbschema.DatabaseMigration, error) - FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*dbschema.DatabaseMigration, err error) - FindInBatches(result *[]*dbschema.DatabaseMigration, batchSize int, fc func(tx gen.Dao, batch int) error) error - Pluck(column field.Expr, dest interface{}) error - Delete(...*dbschema.DatabaseMigration) (info gen.ResultInfo, err error) - Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error) - UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error) - Updates(value interface{}) (info gen.ResultInfo, err error) - UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error) - UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error) - UpdateColumns(value interface{}) (info gen.ResultInfo, err error) - UpdateFrom(q gen.SubQuery) gen.Dao - Attrs(attrs ...field.AssignExpr) IDatabaseMigrationDo - Assign(attrs ...field.AssignExpr) IDatabaseMigrationDo - Joins(fields ...field.RelationField) IDatabaseMigrationDo - Preload(fields ...field.RelationField) IDatabaseMigrationDo - FirstOrInit() (*dbschema.DatabaseMigration, error) - FirstOrCreate() (*dbschema.DatabaseMigration, error) - FindByPage(offset int, limit int) (result []*dbschema.DatabaseMigration, count int64, err error) - ScanByPage(result interface{}, offset int, limit int) (count int64, err error) - Rows() (*sql.Rows, error) - Row() *sql.Row - Scan(result interface{}) (err error) - Returning(value interface{}, columns ...string) IDatabaseMigrationDo - UnderlyingDB() *gorm.DB - schema.Tabler -} - -func (d databaseMigrationDo) Debug() IDatabaseMigrationDo { - return d.withDO(d.DO.Debug()) -} - -func (d databaseMigrationDo) WithContext(ctx context.Context) IDatabaseMigrationDo { - return d.withDO(d.DO.WithContext(ctx)) -} - -func (d databaseMigrationDo) ReadDB() IDatabaseMigrationDo { - return d.Clauses(dbresolver.Read) -} - -func (d databaseMigrationDo) WriteDB() IDatabaseMigrationDo { - return d.Clauses(dbresolver.Write) -} - -func (d databaseMigrationDo) Session(config *gorm.Session) IDatabaseMigrationDo { - return d.withDO(d.DO.Session(config)) -} - -func (d databaseMigrationDo) Clauses(conds ...clause.Expression) IDatabaseMigrationDo { - return d.withDO(d.DO.Clauses(conds...)) -} - -func (d databaseMigrationDo) Returning(value interface{}, columns ...string) IDatabaseMigrationDo { - return d.withDO(d.DO.Returning(value, columns...)) -} - -func (d databaseMigrationDo) Not(conds ...gen.Condition) IDatabaseMigrationDo { - return d.withDO(d.DO.Not(conds...)) -} - -func (d databaseMigrationDo) Or(conds ...gen.Condition) IDatabaseMigrationDo { - return d.withDO(d.DO.Or(conds...)) -} - -func (d databaseMigrationDo) Select(conds ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.Select(conds...)) -} - -func (d databaseMigrationDo) Where(conds ...gen.Condition) IDatabaseMigrationDo { - return d.withDO(d.DO.Where(conds...)) -} - -func (d databaseMigrationDo) Order(conds ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.Order(conds...)) -} - -func (d databaseMigrationDo) Distinct(cols ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.Distinct(cols...)) -} - -func (d databaseMigrationDo) Omit(cols ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.Omit(cols...)) -} - -func (d databaseMigrationDo) Join(table schema.Tabler, on ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.Join(table, on...)) -} - -func (d databaseMigrationDo) LeftJoin(table schema.Tabler, on ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.LeftJoin(table, on...)) -} - -func (d databaseMigrationDo) RightJoin(table schema.Tabler, on ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.RightJoin(table, on...)) -} - -func (d databaseMigrationDo) Group(cols ...field.Expr) IDatabaseMigrationDo { - return d.withDO(d.DO.Group(cols...)) -} - -func (d databaseMigrationDo) Having(conds ...gen.Condition) IDatabaseMigrationDo { - return d.withDO(d.DO.Having(conds...)) -} - -func (d databaseMigrationDo) Limit(limit int) IDatabaseMigrationDo { - return d.withDO(d.DO.Limit(limit)) -} - -func (d databaseMigrationDo) Offset(offset int) IDatabaseMigrationDo { - return d.withDO(d.DO.Offset(offset)) -} - -func (d databaseMigrationDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IDatabaseMigrationDo { - return d.withDO(d.DO.Scopes(funcs...)) -} - -func (d databaseMigrationDo) Unscoped() IDatabaseMigrationDo { - return d.withDO(d.DO.Unscoped()) -} - -func (d databaseMigrationDo) Create(values ...*dbschema.DatabaseMigration) error { - if len(values) == 0 { - return nil - } - return d.DO.Create(values) -} - -func (d databaseMigrationDo) CreateInBatches(values []*dbschema.DatabaseMigration, batchSize int) error { - return d.DO.CreateInBatches(values, batchSize) -} - -// Save : !!! underlying implementation is different with GORM -// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) -func (d databaseMigrationDo) Save(values ...*dbschema.DatabaseMigration) error { - if len(values) == 0 { - return nil - } - return d.DO.Save(values) -} - -func (d databaseMigrationDo) First() (*dbschema.DatabaseMigration, error) { - if result, err := d.DO.First(); err != nil { - return nil, err - } else { - return result.(*dbschema.DatabaseMigration), nil - } -} - -func (d databaseMigrationDo) Take() (*dbschema.DatabaseMigration, error) { - if result, err := d.DO.Take(); err != nil { - return nil, err - } else { - return result.(*dbschema.DatabaseMigration), nil - } -} - -func (d databaseMigrationDo) Last() (*dbschema.DatabaseMigration, error) { - if result, err := d.DO.Last(); err != nil { - return nil, err - } else { - return result.(*dbschema.DatabaseMigration), nil - } -} - -func (d databaseMigrationDo) Find() ([]*dbschema.DatabaseMigration, error) { - result, err := d.DO.Find() - return result.([]*dbschema.DatabaseMigration), err -} - -func (d databaseMigrationDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*dbschema.DatabaseMigration, err error) { - buf := make([]*dbschema.DatabaseMigration, 0, batchSize) - err = d.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { - defer func() { results = append(results, buf...) }() - return fc(tx, batch) - }) - return results, err -} - -func (d databaseMigrationDo) FindInBatches(result *[]*dbschema.DatabaseMigration, batchSize int, fc func(tx gen.Dao, batch int) error) error { - return d.DO.FindInBatches(result, batchSize, fc) -} - -func (d databaseMigrationDo) Attrs(attrs ...field.AssignExpr) IDatabaseMigrationDo { - return d.withDO(d.DO.Attrs(attrs...)) -} - -func (d databaseMigrationDo) Assign(attrs ...field.AssignExpr) IDatabaseMigrationDo { - return d.withDO(d.DO.Assign(attrs...)) -} - -func (d databaseMigrationDo) Joins(fields ...field.RelationField) IDatabaseMigrationDo { - for _, _f := range fields { - d = *d.withDO(d.DO.Joins(_f)) - } - return &d -} - -func (d databaseMigrationDo) Preload(fields ...field.RelationField) IDatabaseMigrationDo { - for _, _f := range fields { - d = *d.withDO(d.DO.Preload(_f)) - } - return &d -} - -func (d databaseMigrationDo) FirstOrInit() (*dbschema.DatabaseMigration, error) { - if result, err := d.DO.FirstOrInit(); err != nil { - return nil, err - } else { - return result.(*dbschema.DatabaseMigration), nil - } -} - -func (d databaseMigrationDo) FirstOrCreate() (*dbschema.DatabaseMigration, error) { - if result, err := d.DO.FirstOrCreate(); err != nil { - return nil, err - } else { - return result.(*dbschema.DatabaseMigration), nil - } -} - -func (d databaseMigrationDo) FindByPage(offset int, limit int) (result []*dbschema.DatabaseMigration, count int64, err error) { - result, err = d.Offset(offset).Limit(limit).Find() - if err != nil { - return - } - - if size := len(result); 0 < limit && 0 < size && size < limit { - count = int64(size + offset) - return - } - - count, err = d.Offset(-1).Limit(-1).Count() - return -} - -func (d databaseMigrationDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { - count, err = d.Count() - if err != nil { - return - } - - err = d.Offset(offset).Limit(limit).Scan(result) - return -} - -func (d databaseMigrationDo) Scan(result interface{}) (err error) { - return d.DO.Scan(result) -} - -func (d databaseMigrationDo) Delete(models ...*dbschema.DatabaseMigration) (result gen.ResultInfo, err error) { - return d.DO.Delete(models) -} - -func (d *databaseMigrationDo) withDO(do gen.Dao) *databaseMigrationDo { - d.DO = *do.(*gen.DO) - return d -} diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/gen.go b/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/gen.go index 2c59402b..7924d13c 100644 --- a/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/gen.go +++ b/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/gen.go @@ -19,7 +19,6 @@ var ( Q = new(Query) ApiKey *apiKey Conversation *conversation - DatabaseMigration *databaseMigration Invite *invite Item *item Organization *organization @@ -34,7 +33,6 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) { *Q = *Use(db, opts...) ApiKey = &Q.ApiKey Conversation = &Q.Conversation - DatabaseMigration = &Q.DatabaseMigration Invite = &Q.Invite Item = &Q.Item Organization = &Q.Organization @@ -50,7 +48,6 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query { db: db, ApiKey: newApiKey(db, opts...), Conversation: newConversation(db, opts...), - DatabaseMigration: newDatabaseMigration(db, opts...), Invite: newInvite(db, opts...), Item: newItem(db, opts...), Organization: newOrganization(db, opts...), @@ -67,7 +64,6 @@ type Query struct { ApiKey apiKey Conversation conversation - DatabaseMigration databaseMigration Invite invite Item item Organization organization @@ -85,7 +81,6 @@ func (q *Query) clone(db *gorm.DB) *Query { db: db, ApiKey: q.ApiKey.clone(db), Conversation: q.Conversation.clone(db), - DatabaseMigration: q.DatabaseMigration.clone(db), Invite: q.Invite.clone(db), Item: q.Item.clone(db), Organization: q.Organization.clone(db), @@ -110,7 +105,6 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query { db: db, ApiKey: q.ApiKey.replaceDB(db), Conversation: q.Conversation.replaceDB(db), - DatabaseMigration: q.DatabaseMigration.replaceDB(db), Invite: q.Invite.replaceDB(db), Item: q.Item.replaceDB(db), Organization: q.Organization.replaceDB(db), @@ -125,7 +119,6 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query { type queryCtx struct { ApiKey IApiKeyDo Conversation IConversationDo - DatabaseMigration IDatabaseMigrationDo Invite IInviteDo Item IItemDo Organization IOrganizationDo @@ -140,7 +133,6 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx { return &queryCtx{ ApiKey: q.ApiKey.WithContext(ctx), Conversation: q.Conversation.WithContext(ctx), - DatabaseMigration: q.DatabaseMigration.WithContext(ctx), Invite: q.Invite.WithContext(ctx), Item: q.Item.WithContext(ctx), Organization: q.Organization.WithContext(ctx), diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/users.gen.go b/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/users.gen.go index 9fc01edf..254be21f 100644 --- a/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/users.gen.go +++ b/apps/jan-api-gateway/application/app/infrastructure/database/gormgen/users.gen.go @@ -36,6 +36,7 @@ func newUser(db *gorm.DB, opts ...gen.DOOption) user { _user.Email = field.NewString(tableName, "email") _user.PublicID = field.NewString(tableName, "public_id") _user.Enabled = field.NewBool(tableName, "enabled") + _user.IsGuest = field.NewBool(tableName, "is_guest") _user.Organizations = userHasManyOrganizations{ db: db.Session(&gorm.Session{}), @@ -65,6 +66,7 @@ type user struct { Email field.String PublicID field.String Enabled field.Bool + IsGuest field.Bool Organizations userHasManyOrganizations Projects userHasManyProjects @@ -92,6 +94,7 @@ func (u *user) updateTableName(table string) *user { u.Email = field.NewString(table, "email") u.PublicID = field.NewString(table, "public_id") u.Enabled = field.NewBool(table, "enabled") + u.IsGuest = field.NewBool(table, "is_guest") u.fillFieldMap() @@ -108,7 +111,7 @@ func (u *user) GetFieldByName(fieldName string) (field.OrderExpr, bool) { } func (u *user) fillFieldMap() { - u.fieldMap = make(map[string]field.Expr, 10) + u.fieldMap = make(map[string]field.Expr, 11) u.fieldMap["id"] = u.ID u.fieldMap["created_at"] = u.CreatedAt u.fieldMap["updated_at"] = u.UpdatedAt @@ -117,6 +120,7 @@ func (u *user) fillFieldMap() { u.fieldMap["email"] = u.Email u.fieldMap["public_id"] = u.PublicID u.fieldMap["enabled"] = u.Enabled + u.fieldMap["is_guest"] = u.IsGuest } diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/migration.go b/apps/jan-api-gateway/application/app/infrastructure/database/migration.go new file mode 100644 index 00000000..fdfa138c --- /dev/null +++ b/apps/jan-api-gateway/application/app/infrastructure/database/migration.go @@ -0,0 +1,158 @@ +package database + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "gorm.io/gorm" +) + +type DatabaseMigration struct { + gorm.Model + Version string `gorm:"not null;uniqueIndex"` +} + +type SchemaVersion struct { + Migrations []string `json:"migrations"` +} + +func NewSchemaVersion() SchemaVersion { + sv := SchemaVersion{ + // Consider supporting semantic versioning, such as: + // ``` + // Version { + // ReleaseVersion: "v0.0.3", + // DbVersion: 2 + // } + // ``` + Migrations: []string{ + "000001", + "000002", + }, + } + return sv +} + +type DBMigrator struct { + db *gorm.DB +} + +func NewDBMigrator(db *gorm.DB) *DBMigrator { + return &DBMigrator{ + db: db, + } +} + +func (d *DBMigrator) initialize() error { + db := d.db + var reset bool + var record DatabaseMigration + + hasTable := db.Migrator().HasTable("database_migration") + if hasTable { + result := db.Limit(1).Find(&record) + if result.Error != nil && result.Error != gorm.ErrRecordNotFound { + return fmt.Errorf("failed to query migration records: %w", result.Error) + } + if result.RowsAffected == 0 { + reset = true + } + } else { + reset = true + } + + if reset { + // Still experiencing a race condition here, need to consult with DevOps regarding deployment strategy. + if err := db.Exec("DROP SCHEMA IF EXISTS public CASCADE;").Error; err != nil { + return fmt.Errorf("failed to drop public schema: %w", err) + } + if err := db.Exec("CREATE SCHEMA public;").Error; err != nil { + return fmt.Errorf("failed to create public schema: %w", err) + } + if err := db.AutoMigrate(&DatabaseMigration{}); err != nil { + return fmt.Errorf("failed to create 'database_migration' table: %w", err) + } + + initialRecord := DatabaseMigration{Version: "000000"} + if err := db.Create(&initialRecord).Error; err != nil { + return fmt.Errorf("failed to insert initial migration record: %w", err) + } + } + + return nil +} + +func (d *DBMigrator) lockVersion(ctx context.Context, tx *gorm.DB) (DatabaseMigration, error) { + var m DatabaseMigration + + if err := tx.WithContext(ctx). + Raw("SELECT id, version FROM database_migration ORDER BY id LIMIT 1"). + Scan(&m).Error; err != nil { + return m, err + } + + if m.ID == 0 { + return m, fmt.Errorf("no row found in database_migration") + } + + if err := tx.WithContext(ctx). + Raw("SELECT id, version FROM database_migration WHERE id = ? FOR UPDATE", m.ID). + Scan(&m).Error; err != nil { + return m, err + } + + return m, nil +} + +func (d *DBMigrator) Migrate() (err error) { + if err = d.initialize(); err != nil { + return err + } + migrations := NewSchemaVersion().Migrations + ctx := context.Background() + db := d.db + tx := db.WithContext(ctx).Begin() + // select for update + currentVersion, err := d.lockVersion(ctx, tx) + if err != nil { + return + } + _, filename, _, ok := runtime.Caller(0) + if !ok { + return fmt.Errorf("da75e6a4-af0e-46a0-8cf8-569263651443") + } + migrationSqlFolder := filepath.Join(filepath.Dir(filename), "migrationsqls") + + updated := false + for _, migrationVersion := range migrations { + if currentVersion.Version >= migrationVersion { + continue + } + // get version sql file + sqlFile := filepath.Join(migrationSqlFolder, fmt.Sprintf("%s.sql", migrationVersion)) + content, err := os.ReadFile(sqlFile) + if err != nil { + return err + } + + fileContentAsString := string(content) + sqlCommands := strings.Split(fileContentAsString, ";") + for _, command := range sqlCommands { + db.Exec(command) + } + updated = true + } + if updated { + currentVersion.Version = migrations[len(migrations)-1] + if err := tx.Save(currentVersion).Error; err != nil { + tx.Rollback() + return err + } + } + tx.Commit() + return nil +} diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/migrationsqls/000001.sql b/apps/jan-api-gateway/application/app/infrastructure/database/migrationsqls/000001.sql new file mode 100644 index 00000000..85a77f81 --- /dev/null +++ b/apps/jan-api-gateway/application/app/infrastructure/database/migrationsqls/000001.sql @@ -0,0 +1,177 @@ +-- Create "api_key" table +CREATE TABLE "public"."api_key" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "public_id" character varying(128) NOT NULL, + "key_hash" character varying(128) NOT NULL, + "plaintext_hint" character varying(16) NULL, + "description" character varying(255) NULL, + "enabled" boolean NULL DEFAULT true, + "apikey_type" character varying(32) NOT NULL, + "owner_public_id" character varying(50) NOT NULL, + "organization_id" bigint NULL, + "project_id" bigint NULL, + "permissions" json NULL, + "expires_at" timestamp NULL, + "last_used_at" timestamp NULL, + PRIMARY KEY ("id") +); +-- Create index "idx_api_key_apikey_type" to table: "api_key" +CREATE INDEX "idx_api_key_apikey_type" ON "public"."api_key" ("apikey_type"); +-- Create index "idx_api_key_deleted_at" to table: "api_key" +CREATE INDEX "idx_api_key_deleted_at" ON "public"."api_key" ("deleted_at"); +-- Create index "idx_api_key_enabled" to table: "api_key" +CREATE INDEX "idx_api_key_enabled" ON "public"."api_key" ("enabled"); +-- Create index "idx_api_key_key_hash" to table: "api_key" +CREATE UNIQUE INDEX "idx_api_key_key_hash" ON "public"."api_key" ("key_hash"); +-- Create index "idx_api_key_organization_id" to table: "api_key" +CREATE INDEX "idx_api_key_organization_id" ON "public"."api_key" ("organization_id"); +-- Create index "idx_api_key_owner_public_id" to table: "api_key" +CREATE UNIQUE INDEX "idx_api_key_owner_public_id" ON "public"."api_key" ("owner_public_id"); +-- Create index "idx_api_key_project_id" to table: "api_key" +CREATE INDEX "idx_api_key_project_id" ON "public"."api_key" ("project_id"); +-- Create index "idx_api_key_public_id" to table: "api_key" +CREATE UNIQUE INDEX "idx_api_key_public_id" ON "public"."api_key" ("public_id"); +-- Create "user" table +CREATE TABLE "public"."user" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "name" character varying(100) NOT NULL, + "email" character varying(255) NOT NULL, + "public_id" character varying(50) NOT NULL, + "enabled" boolean NULL, + PRIMARY KEY ("id") +); +-- Create index "idx_user_deleted_at" to table: "user" +CREATE INDEX "idx_user_deleted_at" ON "public"."user" ("deleted_at"); +-- Create index "idx_user_email" to table: "user" +CREATE UNIQUE INDEX "idx_user_email" ON "public"."user" ("email"); +-- Create index "idx_user_public_id" to table: "user" +CREATE UNIQUE INDEX "idx_user_public_id" ON "public"."user" ("public_id"); +-- Create "conversation" table +CREATE TABLE "public"."conversation" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "public_id" character varying(50) NOT NULL, + "title" character varying(255) NULL, + "user_id" bigint NOT NULL, + "status" character varying(20) NOT NULL DEFAULT 'active', + "metadata" text NULL, + "is_private" boolean NOT NULL DEFAULT true, + PRIMARY KEY ("id"), + CONSTRAINT "fk_conversation_user" FOREIGN KEY ("user_id") REFERENCES "public"."user" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION +); +-- Create index "idx_conversation_deleted_at" to table: "conversation" +CREATE INDEX "idx_conversation_deleted_at" ON "public"."conversation" ("deleted_at"); +-- Create index "idx_conversation_public_id" to table: "conversation" +CREATE UNIQUE INDEX "idx_conversation_public_id" ON "public"."conversation" ("public_id"); +-- Create index "idx_conversation_user_id" to table: "conversation" +CREATE INDEX "idx_conversation_user_id" ON "public"."conversation" ("user_id"); +-- Create "item" table +CREATE TABLE "public"."item" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "public_id" character varying(50) NOT NULL, + "conversation_id" bigint NOT NULL, + "type" character varying(50) NOT NULL, + "role" character varying(20) NULL, + "content" text NULL, + "status" character varying(50) NULL, + "incomplete_at" bigint NULL, + "incomplete_details" text NULL, + "completed_at" bigint NULL, + PRIMARY KEY ("id"), + CONSTRAINT "fk_conversation_items" FOREIGN KEY ("conversation_id") REFERENCES "public"."conversation" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION +); +-- Create index "idx_item_conversation_id" to table: "item" +CREATE INDEX "idx_item_conversation_id" ON "public"."item" ("conversation_id"); +-- Create index "idx_item_deleted_at" to table: "item" +CREATE INDEX "idx_item_deleted_at" ON "public"."item" ("deleted_at"); +-- Create index "idx_item_public_id" to table: "item" +CREATE UNIQUE INDEX "idx_item_public_id" ON "public"."item" ("public_id"); +-- Create "organization" table +CREATE TABLE "public"."organization" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "name" character varying(128) NOT NULL, + "public_id" character varying(64) NOT NULL, + "enabled" boolean NULL DEFAULT true, + "owner_id" bigint NOT NULL, + PRIMARY KEY ("id"), + CONSTRAINT "fk_organization_owner" FOREIGN KEY ("owner_id") REFERENCES "public"."user" ("id") ON UPDATE CASCADE ON DELETE SET NULL +); +-- Create index "idx_organization_deleted_at" to table: "organization" +CREATE INDEX "idx_organization_deleted_at" ON "public"."organization" ("deleted_at"); +-- Create index "idx_organization_enabled" to table: "organization" +CREATE INDEX "idx_organization_enabled" ON "public"."organization" ("enabled"); +-- Create index "idx_organization_name" to table: "organization" +CREATE UNIQUE INDEX "idx_organization_name" ON "public"."organization" ("name"); +-- Create index "idx_organization_owner_id" to table: "organization" +CREATE INDEX "idx_organization_owner_id" ON "public"."organization" ("owner_id"); +-- Create index "idx_organization_public_id" to table: "organization" +CREATE UNIQUE INDEX "idx_organization_public_id" ON "public"."organization" ("public_id"); +-- Create "organization_member" table +CREATE TABLE "public"."organization_member" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "user_id" bigint NOT NULL, + "organization_id" bigint NOT NULL, + "role" character varying(20) NOT NULL, + PRIMARY KEY ("id", "user_id", "organization_id"), + CONSTRAINT "fk_organization_members" FOREIGN KEY ("organization_id") REFERENCES "public"."organization" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION, + CONSTRAINT "fk_user_organizations" FOREIGN KEY ("user_id") REFERENCES "public"."user" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION +); +-- Create index "idx_organization_member_deleted_at" to table: "organization_member" +CREATE INDEX "idx_organization_member_deleted_at" ON "public"."organization_member" ("deleted_at"); +-- Create "project" table +CREATE TABLE "public"."project" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "name" character varying(128) NOT NULL, + "public_id" character varying(50) NOT NULL, + "status" character varying(20) NOT NULL DEFAULT 'active', + "organization_id" bigint NOT NULL, + "archived_at" timestamptz NULL, + PRIMARY KEY ("id") +); +-- Create index "idx_project_archived_at" to table: "project" +CREATE INDEX "idx_project_archived_at" ON "public"."project" ("archived_at"); +-- Create index "idx_project_deleted_at" to table: "project" +CREATE INDEX "idx_project_deleted_at" ON "public"."project" ("deleted_at"); +-- Create index "idx_project_name" to table: "project" +CREATE UNIQUE INDEX "idx_project_name" ON "public"."project" ("name"); +-- Create index "idx_project_organization_id" to table: "project" +CREATE INDEX "idx_project_organization_id" ON "public"."project" ("organization_id"); +-- Create index "idx_project_public_id" to table: "project" +CREATE UNIQUE INDEX "idx_project_public_id" ON "public"."project" ("public_id"); +-- Create index "idx_project_status" to table: "project" +CREATE INDEX "idx_project_status" ON "public"."project" ("status"); +-- Create "project_member" table +CREATE TABLE "public"."project_member" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "user_id" bigint NOT NULL, + "project_id" bigint NOT NULL, + "role" character varying(20) NOT NULL, + PRIMARY KEY ("id", "user_id", "project_id"), + CONSTRAINT "fk_project_members" FOREIGN KEY ("project_id") REFERENCES "public"."project" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION, + CONSTRAINT "fk_user_projects" FOREIGN KEY ("user_id") REFERENCES "public"."user" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION +); +-- Create index "idx_project_member_deleted_at" to table: "project_member" +CREATE INDEX "idx_project_member_deleted_at" ON "public"."project_member" ("deleted_at"); diff --git a/apps/jan-api-gateway/application/app/infrastructure/database/migrationsqls/000002.sql b/apps/jan-api-gateway/application/app/infrastructure/database/migrationsqls/000002.sql new file mode 100644 index 00000000..f7d443c8 --- /dev/null +++ b/apps/jan-api-gateway/application/app/infrastructure/database/migrationsqls/000002.sql @@ -0,0 +1,106 @@ +-- Drop index "idx_api_key_owner_public_id" from table: "api_key" +DROP INDEX "public"."idx_api_key_owner_public_id"; +-- Create index "idx_conversation_is_private" to table: "conversation" +CREATE INDEX "idx_conversation_is_private" ON "public"."conversation" ("is_private"); +-- Create index "idx_conversation_status" to table: "conversation" +CREATE INDEX "idx_conversation_status" ON "public"."conversation" ("status"); +-- Drop index "idx_organization_name" from table: "organization" +DROP INDEX "public"."idx_organization_name"; +-- Modify "organization_member" table +ALTER TABLE "public"."organization_member" ADD COLUMN "is_primary" boolean NULL DEFAULT false; +-- Drop index "idx_project_name" from table: "project" +DROP INDEX "public"."idx_project_name"; +-- Create "invite" table +CREATE TABLE "public"."invite" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "public_id" character varying(64) NOT NULL, + "email" character varying(128) NOT NULL, + "role" character varying(20) NOT NULL, + "status" character varying(20) NOT NULL, + "invited_at" timestamptz NULL, + "expires_at" timestamptz NULL, + "accepted_at" timestamptz NULL, + "secrets" text NULL, + "projects" jsonb NULL, + "organization_id" bigint NOT NULL, + PRIMARY KEY ("id") +); +-- Create index "idx_invite_deleted_at" to table: "invite" +CREATE INDEX "idx_invite_deleted_at" ON "public"."invite" ("deleted_at"); +-- Create index "idx_invite_organization_id" to table: "invite" +CREATE INDEX "idx_invite_organization_id" ON "public"."invite" ("organization_id"); +-- Create index "idx_invite_public_id" to table: "invite" +CREATE UNIQUE INDEX "idx_invite_public_id" ON "public"."invite" ("public_id"); +-- Create index "idx_invite_status" to table: "invite" +CREATE INDEX "idx_invite_status" ON "public"."invite" ("status"); +-- Modify "user" table +ALTER TABLE "public"."user" ADD COLUMN "is_guest" boolean NULL; +-- Create "responses" table +CREATE TABLE "public"."responses" ( + "id" bigserial NOT NULL, + "created_at" timestamptz NULL, + "updated_at" timestamptz NULL, + "deleted_at" timestamptz NULL, + "public_id" character varying(255) NOT NULL, + "user_id" bigint NOT NULL, + "conversation_id" bigint NULL, + "previous_response_id" character varying(255) NULL, + "model" character varying(255) NOT NULL, + "status" character varying(50) NOT NULL DEFAULT 'pending', + "input" text NOT NULL, + "output" text NULL, + "system_prompt" text NULL, + "max_tokens" bigint NULL, + "temperature" numeric NULL, + "top_p" numeric NULL, + "top_k" bigint NULL, + "repetition_penalty" numeric NULL, + "seed" bigint NULL, + "stop" text NULL, + "presence_penalty" numeric NULL, + "frequency_penalty" numeric NULL, + "logit_bias" text NULL, + "response_format" text NULL, + "tools" text NULL, + "tool_choice" text NULL, + "metadata" text NULL, + "stream" boolean NULL, + "background" boolean NULL, + "timeout" bigint NULL, + "user" character varying(255) NULL, + "usage" text NULL, + "error" text NULL, + "completed_at" timestamptz NULL, + "cancelled_at" timestamptz NULL, + "failed_at" timestamptz NULL, + PRIMARY KEY ("id"), + CONSTRAINT "fk_responses_conversation" FOREIGN KEY ("conversation_id") REFERENCES "public"."conversation" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION, + CONSTRAINT "fk_responses_user_entity" FOREIGN KEY ("user_id") REFERENCES "public"."user" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION +); +-- Create index "idx_responses_conversation_id" to table: "responses" +CREATE INDEX "idx_responses_conversation_id" ON "public"."responses" ("conversation_id"); +-- Create index "idx_responses_deleted_at" to table: "responses" +CREATE INDEX "idx_responses_deleted_at" ON "public"."responses" ("deleted_at"); +-- Create index "idx_responses_model" to table: "responses" +CREATE INDEX "idx_responses_model" ON "public"."responses" ("model"); +-- Create index "idx_responses_previous_response_id" to table: "responses" +CREATE INDEX "idx_responses_previous_response_id" ON "public"."responses" ("previous_response_id"); +-- Create index "idx_responses_public_id" to table: "responses" +CREATE UNIQUE INDEX "idx_responses_public_id" ON "public"."responses" ("public_id"); +-- Create index "idx_responses_status" to table: "responses" +CREATE INDEX "idx_responses_status" ON "public"."responses" ("status"); +-- Create index "idx_responses_user_id" to table: "responses" +CREATE INDEX "idx_responses_user_id" ON "public"."responses" ("user_id"); +-- Modify "item" table +ALTER TABLE "public"."item" ALTER COLUMN "incomplete_at" TYPE timestamp, ALTER COLUMN "completed_at" TYPE timestamp, ADD COLUMN "response_id" bigint NULL, ADD CONSTRAINT "fk_responses_items" FOREIGN KEY ("response_id") REFERENCES "public"."responses" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION; +-- Create index "idx_item_response_id" to table: "item" +CREATE INDEX "idx_item_response_id" ON "public"."item" ("response_id"); +-- Create index "idx_item_role" to table: "item" +CREATE INDEX "idx_item_role" ON "public"."item" ("role"); +-- Create index "idx_item_status" to table: "item" +CREATE INDEX "idx_item_status" ON "public"."item" ("status"); +-- Create index "idx_item_type" to table: "item" +CREATE INDEX "idx_item_type" ON "public"."item" ("type"); diff --git a/apps/jan-api-gateway/application/cmd/codegen/dbmigration/dbmigration.go b/apps/jan-api-gateway/application/cmd/codegen/dbmigration/dbmigration.go index fae83b64..c4c7b4dc 100644 --- a/apps/jan-api-gateway/application/cmd/codegen/dbmigration/dbmigration.go +++ b/apps/jan-api-gateway/application/cmd/codegen/dbmigration/dbmigration.go @@ -31,6 +31,7 @@ func generateHcl(branchName string) { log.Fatalf("failed to create schema: %v", err) return } + db.AutoMigrate(database.DatabaseMigration{}) for _, model := range database.SchemaRegistry { err = db.AutoMigrate(model) if err != nil { diff --git a/apps/jan-api-gateway/application/cmd/server/server.go b/apps/jan-api-gateway/application/cmd/server/server.go index c17f18f9..c17968de 100644 --- a/apps/jan-api-gateway/application/cmd/server/server.go +++ b/apps/jan-api-gateway/application/cmd/server/server.go @@ -9,6 +9,7 @@ import ( "github.com/mileusna/crontab" "menlo.ai/jan-api-gateway/app/domain/healthcheck" + "menlo.ai/jan-api-gateway/app/infrastructure/database" apphttp "menlo.ai/jan-api-gateway/app/interfaces/http" janinference "menlo.ai/jan-api-gateway/app/utils/httpclients/jan_inference" "menlo.ai/jan-api-gateway/app/utils/httpclients/serper" @@ -62,5 +63,9 @@ func main() { if err != nil { panic(err) } + err = database.Migration() + if err != nil { + panic(err) + } application.Start() }