Skip to content

Commit f2d19d1

Browse files
committed
tweaking some of the object workflows like pagination, table names, and further todos
1 parent 246ba52 commit f2d19d1

File tree

9 files changed

+85
-289
lines changed

9 files changed

+85
-289
lines changed

py-modules/map-integration/macrostrat/map_integration/utils/s3_file_management.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def get_existing_object_id(db, *, host: str, bucket: str, key: str) -> int | Non
102102
return db.run_query(
103103
"""
104104
SELECT id
105-
FROM storage.object
105+
FROM storage.objects
106106
WHERE scheme = 's3'
107107
AND host = :host
108108
AND bucket = :bucket
@@ -123,7 +123,7 @@ def insert_storage_object(
123123
) -> int:
124124
db.run_sql(
125125
"""
126-
INSERT INTO storage.object
126+
INSERT INTO storage.objects
127127
(scheme, host, bucket, key, source, sha256_hash, mime_type)
128128
VALUES
129129
('s3', :host, :bucket, :key, :source, :sha256, :mime_type)
@@ -139,7 +139,7 @@ def insert_storage_object(
139139
)
140140
object_id = get_existing_object_id(db, host=host, bucket=bucket, key=key)
141141
if object_id is None:
142-
raise RuntimeError("Failed to retrieve storage.object id after insert")
142+
raise RuntimeError("Failed to retrieve storage.objects id after insert")
143143
return object_id
144144

145145

@@ -187,7 +187,7 @@ def staging_upload_dir(
187187
ingest_process_id: int,
188188
) -> dict:
189189
"""
190-
Upload local files to S3 via MinIO and register them in storage.object.
190+
Upload local files to S3 via MinIO and register them in storage.objects.
191191
Always uploads a single zip archive for the provided data_path.
192192
"""
193193
if ingest_process_id is None:
@@ -278,7 +278,7 @@ def get_objects_for_slug(db, *, host: str, bucket: str, slug: str) -> list[dict]
278278
db.run_query(
279279
"""
280280
SELECT id, key
281-
FROM storage.object
281+
FROM storage.objects
282282
WHERE scheme = 's3'
283283
AND host = :host
284284
AND bucket = :bucket
@@ -309,7 +309,7 @@ def unlink_object_from_ingests(db, *, object_id: int) -> None:
309309
def delete_storage_object(db, *, object_id: int) -> None:
310310
db.run_sql(
311311
"""
312-
DELETE FROM storage.object
312+
DELETE FROM storage.objects
313313
WHERE id = :id
314314
""",
315315
dict(id=object_id),
@@ -409,7 +409,7 @@ def staging_list_dir(
409409
db.run_query(
410410
"""
411411
SELECT id, key
412-
FROM storage.object
412+
FROM storage.objects
413413
WHERE scheme = 's3'
414414
AND host = :host
415415
AND bucket = :bucket
@@ -460,7 +460,7 @@ def staging_download_dir(db, slug: str, dest_path: Path) -> dict:
460460
db.run_query(
461461
"""
462462
SELECT id, key
463-
FROM storage.object
463+
FROM storage.objects
464464
WHERE scheme = 's3'
465465
AND host = :host
466466
AND bucket = :bucket

py-modules/map-integration/tests/test_map_staging.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def test_map_staging(test_db, test_maps):
111111
)
112112

113113
# object_group_id is a foreign key into the storage schema where the curr user postgres does not have access to.
114-
# the storage.sql ALTER TABLE storage.object OWNER TO macrostrat is switching the owner.
114+
# the storage.sql ALTER TABLE storage.objects OWNER TO macrostrat is switching the owner.
115115
# we are temporarily using macrostrat to run the query below
116116
object_group_id = db.run_query(
117117
"INSERT INTO storage.object_group DEFAULT VALUES RETURNING id"

schema/_migrations/api_v3/08-storage.sql

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ CREATE TYPE storage.scheme AS ENUM (
1313

1414
ALTER TYPE storage.scheme OWNER TO macrostrat;
1515

16-
CREATE TABLE IF NOT EXISTS storage.object (
16+
CREATE TABLE IF NOT EXISTS storage.objects (
1717
id integer GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
1818
scheme storage.scheme NOT NULL,
1919
host character varying(255) NOT NULL,
@@ -28,7 +28,7 @@ CREATE TABLE IF NOT EXISTS storage.object (
2828
);
2929

3030

31-
ALTER TABLE storage.object OWNER TO macrostrat;
31+
ALTER TABLE storage.objects OWNER TO macrostrat;
3232

3333
GRANT ALL ON DATABASE macrostrat TO postgrest;
3434

schema/_migrations/file_storage_updates/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class FileStorageUpdates(Migration):
1717
exists("maps_metadata", "ingest_process"),
1818
]
1919
postconditions = [
20-
# storage.object no longer has object_group_id
20+
# storage.objects no longer has object_group_id
2121
has_columns(
2222
"storage",
2323
"object",

schema/_migrations/file_storage_updates/map-files-table.sql

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
ALTER TABLE storage.object DROP COLUMN IF EXISTS object_group_id;
1+
ALTER TABLE storage.objects DROP COLUMN IF EXISTS object_group_id;
22
DROP TABLE IF EXISTS storage.object_group CASCADE;
33
CREATE TABLE IF NOT EXISTS maps_metadata.map_files (
44
id serial PRIMARY KEY,

schema/_migrations/storage_schema.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@ def apply(self, db: Database):
1616
ALTER TYPE storage.scheme ADD VALUE 'http' AFTER 'https';
1717
1818
-- Lock the table to prevent concurrent updates
19-
LOCK TABLE storage.object IN ACCESS EXCLUSIVE MODE;
19+
LOCK TABLE storage.objects IN ACCESS EXCLUSIVE MODE;
2020
21-
ALTER TABLE storage.object
21+
ALTER TABLE storage.objects
2222
ALTER COLUMN scheme
2323
TYPE storage.scheme USING scheme::text::storage.scheme;
2424

schema/core/0002-storage.sql

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ ALTER TYPE storage.scheme OWNER TO macrostrat;
1111
SET default_tablespace = '';
1212
SET default_table_access_method = heap;
1313

14-
CREATE TABLE storage.object (
14+
CREATE TABLE storage.objects (
1515
id integer NOT NULL,
1616
scheme storage.scheme NOT NULL,
1717
host character varying(255) NOT NULL,
@@ -24,7 +24,7 @@ CREATE TABLE storage.object (
2424
updated_on timestamp with time zone DEFAULT now() NOT NULL,
2525
deleted_on timestamp with time zone
2626
);
27-
ALTER TABLE storage.object OWNER TO macrostrat;
27+
ALTER TABLE storage.objects OWNER TO macrostrat;
2828

2929
CREATE SEQUENCE storage.object_id_seq
3030
AS integer
@@ -37,11 +37,11 @@ ALTER TABLE storage.object_id_seq OWNER TO macrostrat;
3737

3838
ALTER SEQUENCE storage.object_id_seq OWNED BY storage.object.id;
3939

40-
ALTER TABLE ONLY storage.object ALTER COLUMN id SET DEFAULT nextval('storage.object_id_seq'::regclass);
40+
ALTER TABLE ONLY storage.objects ALTER COLUMN id SET DEFAULT nextval('storage.objects _id_seq'::regclass);
4141

42-
ALTER TABLE ONLY storage.object
42+
ALTER TABLE ONLY storage.objects
4343
ADD CONSTRAINT object_pkey PRIMARY KEY (id);
4444

45-
ALTER TABLE ONLY storage.object
45+
ALTER TABLE ONLY storage.objects
4646
ADD CONSTRAINT unique_file UNIQUE (scheme, host, bucket, key);
4747

0 commit comments

Comments
 (0)