Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def get_existing_object_id(db, *, host: str, bucket: str, key: str) -> int | Non
return db.run_query(
"""
SELECT id
FROM storage.object
FROM storage.objects
WHERE scheme = 's3'
AND host = :host
AND bucket = :bucket
Expand All @@ -123,7 +123,7 @@ def insert_storage_object(
) -> int:
db.run_sql(
"""
INSERT INTO storage.object
INSERT INTO storage.objects
(scheme, host, bucket, key, source, sha256_hash, mime_type)
VALUES
('s3', :host, :bucket, :key, :source, :sha256, :mime_type)
Expand All @@ -139,7 +139,7 @@ def insert_storage_object(
)
object_id = get_existing_object_id(db, host=host, bucket=bucket, key=key)
if object_id is None:
raise RuntimeError("Failed to retrieve storage.object id after insert")
raise RuntimeError("Failed to retrieve storage.objects id after insert")
return object_id


Expand Down Expand Up @@ -187,7 +187,7 @@ def staging_upload_dir(
ingest_process_id: int,
) -> dict:
"""
Upload local files to S3 via MinIO and register them in storage.object.
Upload local files to S3 via MinIO and register them in storage.objects.
Always uploads a single zip archive for the provided data_path.
"""
if ingest_process_id is None:
Expand Down Expand Up @@ -278,7 +278,7 @@ def get_objects_for_slug(db, *, host: str, bucket: str, slug: str) -> list[dict]
db.run_query(
"""
SELECT id, key
FROM storage.object
FROM storage.objects
WHERE scheme = 's3'
AND host = :host
AND bucket = :bucket
Expand Down Expand Up @@ -309,7 +309,7 @@ def unlink_object_from_ingests(db, *, object_id: int) -> None:
def delete_storage_object(db, *, object_id: int) -> None:
db.run_sql(
"""
DELETE FROM storage.object
DELETE FROM storage.objects
WHERE id = :id
""",
dict(id=object_id),
Expand Down Expand Up @@ -409,7 +409,7 @@ def staging_list_dir(
db.run_query(
"""
SELECT id, key
FROM storage.object
FROM storage.objects
WHERE scheme = 's3'
AND host = :host
AND bucket = :bucket
Expand Down Expand Up @@ -460,7 +460,7 @@ def staging_download_dir(db, slug: str, dest_path: Path) -> dict:
db.run_query(
"""
SELECT id, key
FROM storage.object
FROM storage.objects
WHERE scheme = 's3'
AND host = :host
AND bucket = :bucket
Expand Down
2 changes: 1 addition & 1 deletion py-modules/map-integration/tests/test_map_staging.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def test_map_staging(test_db, test_maps):
)

# object_group_id is a foreign key into the storage schema where the curr user postgres does not have access to.
# the storage.sql ALTER TABLE storage.object OWNER TO macrostrat is switching the owner.
# the storage.sql ALTER TABLE storage.objects OWNER TO macrostrat is switching the owner.
# we are temporarily using macrostrat to run the query below
object_group_id = db.run_query(
"INSERT INTO storage.object_group DEFAULT VALUES RETURNING id"
Expand Down
4 changes: 2 additions & 2 deletions schema/_migrations/api_v3/08-storage.sql
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ CREATE TYPE storage.scheme AS ENUM (

ALTER TYPE storage.scheme OWNER TO macrostrat;

CREATE TABLE IF NOT EXISTS storage.object (
CREATE TABLE IF NOT EXISTS storage.objects (
id integer GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
scheme storage.scheme NOT NULL,
host character varying(255) NOT NULL,
Expand All @@ -28,7 +28,7 @@ CREATE TABLE IF NOT EXISTS storage.object (
);


ALTER TABLE storage.object OWNER TO macrostrat;
ALTER TABLE storage.objects OWNER TO macrostrat;

GRANT ALL ON DATABASE macrostrat TO postgrest;

Expand Down
2 changes: 1 addition & 1 deletion schema/_migrations/file_storage_updates/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class FileStorageUpdates(Migration):
exists("maps_metadata", "ingest_process"),
]
postconditions = [
# storage.object no longer has object_group_id
# storage.objects no longer has object_group_id
has_columns(
"storage",
"object",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
ALTER TABLE storage.object DROP COLUMN IF EXISTS object_group_id;
ALTER TABLE storage.objects DROP COLUMN IF EXISTS object_group_id;
DROP TABLE IF EXISTS storage.object_group CASCADE;
CREATE TABLE IF NOT EXISTS maps_metadata.map_files (
id serial PRIMARY KEY,
Expand Down
4 changes: 2 additions & 2 deletions schema/_migrations/storage_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ def apply(self, db: Database):
ALTER TYPE storage.scheme ADD VALUE 'http' AFTER 'https';

-- Lock the table to prevent concurrent updates
LOCK TABLE storage.object IN ACCESS EXCLUSIVE MODE;
LOCK TABLE storage.objects IN ACCESS EXCLUSIVE MODE;

ALTER TABLE storage.object
ALTER TABLE storage.objects
ALTER COLUMN scheme
TYPE storage.scheme USING scheme::text::storage.scheme;

Expand Down
10 changes: 5 additions & 5 deletions schema/core/0002-storage.sql
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ ALTER TYPE storage.scheme OWNER TO macrostrat;
SET default_tablespace = '';
SET default_table_access_method = heap;

CREATE TABLE storage.object (
CREATE TABLE storage.objects (
id integer NOT NULL,
scheme storage.scheme NOT NULL,
host character varying(255) NOT NULL,
Expand All @@ -24,7 +24,7 @@ CREATE TABLE storage.object (
updated_on timestamp with time zone DEFAULT now() NOT NULL,
deleted_on timestamp with time zone
);
ALTER TABLE storage.object OWNER TO macrostrat;
ALTER TABLE storage.objects OWNER TO macrostrat;

CREATE SEQUENCE storage.object_id_seq
AS integer
Expand All @@ -37,11 +37,11 @@ ALTER TABLE storage.object_id_seq OWNER TO macrostrat;

ALTER SEQUENCE storage.object_id_seq OWNED BY storage.object.id;

ALTER TABLE ONLY storage.object ALTER COLUMN id SET DEFAULT nextval('storage.object_id_seq'::regclass);
ALTER TABLE ONLY storage.objects ALTER COLUMN id SET DEFAULT nextval('storage.objects _id_seq'::regclass);

ALTER TABLE ONLY storage.object
ALTER TABLE ONLY storage.objects
ADD CONSTRAINT object_pkey PRIMARY KEY (id);

ALTER TABLE ONLY storage.object
ALTER TABLE ONLY storage.objects
ADD CONSTRAINT unique_file UNIQUE (scheme, host, bucket, key);

Loading