Skip to content

Commit 17cc174

Browse files
committed
Updates Case Study notebooks to make sure collections work as needed.
1 parent fff2072 commit 17cc174

7 files changed

+487
-128
lines changed

04_Case_Studies/1a-Flooding-case-study.ipynb

+73-19
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,11 @@
1818
{
1919
"cell_type": "code",
2020
"execution_count": null,
21-
"metadata": {},
21+
"metadata": {
22+
"jupyter": {
23+
"source_hidden": true
24+
}
25+
},
2226
"outputs": [],
2327
"source": [
2428
"from warnings import filterwarnings\n",
@@ -55,7 +59,11 @@
5559
{
5660
"cell_type": "code",
5761
"execution_count": null,
58-
"metadata": {},
62+
"metadata": {
63+
"jupyter": {
64+
"source_hidden": true
65+
}
66+
},
5967
"outputs": [],
6068
"source": [
6169
"# Study location\n",
@@ -69,7 +77,11 @@
6977
{
7078
"cell_type": "code",
7179
"execution_count": null,
72-
"metadata": {},
80+
"metadata": {
81+
"jupyter": {
82+
"source_hidden": true
83+
}
84+
},
7385
"outputs": [],
7486
"source": [
7587
"# Visualize location of area of study\n",
@@ -92,9 +104,14 @@
92104
{
93105
"cell_type": "code",
94106
"execution_count": null,
95-
"metadata": {},
107+
"metadata": {
108+
"jupyter": {
109+
"source_hidden": true
110+
}
111+
},
96112
"outputs": [],
97113
"source": [
114+
"%%time\n",
98115
"# The flooding event primarily happened during 04/30 - 05/02\n",
99116
"# We will search for data before and after the event\n",
100117
"start_date = datetime(year=2024, month=4, day=1)\n",
@@ -106,12 +123,8 @@
106123
"\n",
107124
"# Setup PySTAC client\n",
108125
"# POCLOUD refers to the PO DAAC cloud environment that hosts earth observation data\n",
109-
"catalog = Client.open(f'{STAC_URL}/POCLOUD/') \n",
110-
"\n",
111-
"# Setup PySTAC client\n",
112-
"provider_cat = Client.open(STAC_URL)\n",
113126
"catalog = Client.open(f'{STAC_URL}/POCLOUD/')\n",
114-
"collections = [\"OPERA_L3_DSWX-HLS_V1\"]\n",
127+
"collections = [\"OPERA_L3_DSWX-HLS_V1_1.0\"]\n",
115128
"\n",
116129
"search_opts = {\n",
117130
" 'bbox' : Point(*livingston_tx_lonlat).buffer(0.01).bounds, \n",
@@ -127,7 +140,11 @@
127140
{
128141
"cell_type": "code",
129142
"execution_count": null,
130-
"metadata": {},
143+
"metadata": {
144+
"jupyter": {
145+
"source_hidden": true
146+
}
147+
},
131148
"outputs": [],
132149
"source": [
133150
"def search_to_df(results, layer_name):\n",
@@ -150,16 +167,24 @@
150167
{
151168
"cell_type": "code",
152169
"execution_count": null,
153-
"metadata": {},
170+
"metadata": {
171+
"jupyter": {
172+
"source_hidden": true
173+
}
174+
},
154175
"outputs": [],
155176
"source": [
156-
"granules = search_to_df(results=results, layer_name='0_B01_WTR')"
177+
"%time granules = search_to_df(results=results, layer_name='0_B01_WTR')"
157178
]
158179
},
159180
{
160181
"cell_type": "code",
161182
"execution_count": null,
162-
"metadata": {},
183+
"metadata": {
184+
"jupyter": {
185+
"source_hidden": true
186+
}
187+
},
163188
"outputs": [],
164189
"source": [
165190
"# We now filter the dataframe to restrict our results to a single tile_id\n",
@@ -171,7 +196,11 @@
171196
{
172197
"cell_type": "code",
173198
"execution_count": null,
174-
"metadata": {},
199+
"metadata": {
200+
"jupyter": {
201+
"source_hidden": true
202+
}
203+
},
175204
"outputs": [],
176205
"source": [
177206
"def urls_to_dataset(granule_dataframe):\n",
@@ -225,16 +254,37 @@
225254
{
226255
"cell_type": "code",
227256
"execution_count": null,
228-
"metadata": {},
257+
"metadata": {
258+
"jupyter": {
259+
"source_hidden": true
260+
}
261+
},
229262
"outputs": [],
230263
"source": [
231-
"dataset= urls_to_dataset(granules)"
264+
"%time dataset= urls_to_dataset(granules)"
232265
]
233266
},
234267
{
235268
"cell_type": "code",
236269
"execution_count": null,
237-
"metadata": {},
270+
"metadata": {
271+
"jupyter": {
272+
"source_hidden": true
273+
}
274+
},
275+
"outputs": [],
276+
"source": [
277+
"dataset # Examine the attributes of the dataset object"
278+
]
279+
},
280+
{
281+
"cell_type": "code",
282+
"execution_count": null,
283+
"metadata": {
284+
"jupyter": {
285+
"source_hidden": true
286+
}
287+
},
238288
"outputs": [],
239289
"source": [
240290
"# Define a colormap\n",
@@ -247,7 +297,11 @@
247297
{
248298
"cell_type": "code",
249299
"execution_count": null,
250-
"metadata": {},
300+
"metadata": {
301+
"jupyter": {
302+
"source_hidden": true
303+
}
304+
},
251305
"outputs": [],
252306
"source": [
253307
"img = dataset.hvplot.image(title = 'DSWx data for May 2024 Texas floods',\n",
@@ -280,7 +334,7 @@
280334
"name": "python",
281335
"nbconvert_exporter": "python",
282336
"pygments_lexer": "ipython3",
283-
"version": "3.12.3"
337+
"version": "3.12.4"
284338
}
285339
},
286340
"nbformat": 4,

04_Case_Studies/1b-BhakraNangal-Reservoir.ipynb

+57-19
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,11 @@
1717
{
1818
"cell_type": "code",
1919
"execution_count": null,
20-
"metadata": {},
20+
"metadata": {
21+
"jupyter": {
22+
"source_hidden": true
23+
}
24+
},
2125
"outputs": [],
2226
"source": [
2327
"from warnings import filterwarnings\n",
@@ -57,7 +61,11 @@
5761
{
5862
"cell_type": "code",
5963
"execution_count": null,
60-
"metadata": {},
64+
"metadata": {
65+
"jupyter": {
66+
"source_hidden": true
67+
}
68+
},
6169
"outputs": [],
6270
"source": [
6371
"bhakra_dam = (76.46, 31.42)\n",
@@ -70,7 +78,11 @@
7078
{
7179
"cell_type": "code",
7280
"execution_count": null,
73-
"metadata": {},
81+
"metadata": {
82+
"jupyter": {
83+
"source_hidden": true
84+
}
85+
},
7486
"outputs": [],
7587
"source": [
7688
"bhakra_dam_gv = gv.Points([bhakra_dam])\n",
@@ -92,9 +104,14 @@
92104
{
93105
"cell_type": "code",
94106
"execution_count": null,
95-
"metadata": {},
107+
"metadata": {
108+
"jupyter": {
109+
"source_hidden": true
110+
}
111+
},
96112
"outputs": [],
97113
"source": [
114+
"%%time\n",
98115
"# We will query the DSWx product record to understand variations in water levels in the reservoir\n",
99116
"start_date = datetime(year=2023, month=4, day=1)\n",
100117
"stop_date = datetime(year=2024, month=4, day=1)\n",
@@ -105,12 +122,8 @@
105122
"\n",
106123
"# Setup PySTAC client\n",
107124
"# POCLOUD refers to the PO DAAC cloud environment that hosts earth observation data\n",
108-
"catalog = Client.open(f'{STAC_URL}/POCLOUD/') \n",
109-
"\n",
110-
"# Setup PySTAC client\n",
111-
"provider_cat = Client.open(STAC_URL)\n",
112125
"catalog = Client.open(f'{STAC_URL}/POCLOUD/')\n",
113-
"collections = [\"OPERA_L3_DSWX-HLS_V1\"]\n",
126+
"collections = [\"OPERA_L3_DSWX-HLS_V1_1.0\"]\n",
114127
"\n",
115128
"# Setup search options\n",
116129
"opts = {\n",
@@ -128,7 +141,11 @@
128141
{
129142
"cell_type": "code",
130143
"execution_count": null,
131-
"metadata": {},
144+
"metadata": {
145+
"jupyter": {
146+
"source_hidden": true
147+
}
148+
},
132149
"outputs": [],
133150
"source": [
134151
"def filter_search_by_cc(results, cloud_threshold=10):\n",
@@ -160,9 +177,14 @@
160177
{
161178
"cell_type": "code",
162179
"execution_count": null,
163-
"metadata": {},
180+
"metadata": {
181+
"jupyter": {
182+
"source_hidden": true
183+
}
184+
},
164185
"outputs": [],
165186
"source": [
187+
"%%time\n",
166188
"# let's filter our results so that only scenes with less than 10% cloud cover are returned\n",
167189
"results = filter_search_by_cc(results)\n",
168190
"\n",
@@ -172,7 +194,11 @@
172194
{
173195
"cell_type": "code",
174196
"execution_count": null,
175-
"metadata": {},
197+
"metadata": {
198+
"jupyter": {
199+
"source_hidden": true
200+
}
201+
},
176202
"outputs": [],
177203
"source": [
178204
"# Load results into dataframe\n",
@@ -182,16 +208,24 @@
182208
{
183209
"cell_type": "code",
184210
"execution_count": null,
185-
"metadata": {},
211+
"metadata": {
212+
"jupyter": {
213+
"source_hidden": true
214+
}
215+
},
186216
"outputs": [],
187217
"source": [
188-
"dataset= urls_to_dataset(granules)"
218+
"%time dataset= urls_to_dataset(granules)"
189219
]
190220
},
191221
{
192222
"cell_type": "code",
193223
"execution_count": null,
194-
"metadata": {},
224+
"metadata": {
225+
"jupyter": {
226+
"source_hidden": true
227+
}
228+
},
195229
"outputs": [],
196230
"source": [
197231
"# Define a colormap\n",
@@ -204,7 +238,11 @@
204238
{
205239
"cell_type": "code",
206240
"execution_count": null,
207-
"metadata": {},
241+
"metadata": {
242+
"jupyter": {
243+
"source_hidden": true
244+
}
245+
},
208246
"outputs": [],
209247
"source": [
210248
"img = dataset.hvplot.image(title = 'Bhakra Nangal Dam, India - water extent over a year',\n",
@@ -225,7 +263,7 @@
225263
],
226264
"metadata": {
227265
"kernelspec": {
228-
"display_name": "climaterisk",
266+
"display_name": "Python 3 (ipykernel)",
229267
"language": "python",
230268
"name": "python3"
231269
},
@@ -239,9 +277,9 @@
239277
"name": "python",
240278
"nbconvert_exporter": "python",
241279
"pygments_lexer": "ipython3",
242-
"version": "3.12.3"
280+
"version": "3.12.4"
243281
}
244282
},
245283
"nbformat": 4,
246-
"nbformat_minor": 2
284+
"nbformat_minor": 4
247285
}

0 commit comments

Comments
 (0)