|
16 | 16 | "metadata": {}, |
17 | 17 | "outputs": [], |
18 | 18 | "source": [ |
19 | | - "from collections import Counter\n", |
20 | | - "import fsspec\n", |
21 | 19 | "import json\n", |
22 | | - "import os\n", |
23 | 20 | "\n", |
24 | | - "import matplotlib.pyplot as plt\n", |
| 21 | + "import fsspec\n", |
25 | 22 | "import numpy as np\n", |
26 | 23 | "import pandas as pd\n", |
27 | 24 | "import seaborn as sns\n", |
28 | | - "from sklearn.metrics import mean_squared_error\n", |
29 | | - "\n", |
30 | | - "from carbonplan_forest_offsets.analysis.project_crediting_error import get_slag_to_total_scalar\n", |
| 25 | + "from carbonplan_forest_offsets.analysis.project_crediting_error import (\n", |
| 26 | + " get_slag_to_total_scalar,\n", |
| 27 | + ")\n", |
31 | 28 | "from carbonplan_forest_offsets.data import cat\n", |
32 | | - "from carbonplan_forest_offsets.load.issuance import load_issuance_table, ifm_opr_ids" |
| 29 | + "from carbonplan_forest_offsets.load.issuance import ifm_opr_ids, load_issuance_table\n", |
| 30 | + "from sklearn.metrics import mean_squared_error" |
33 | 31 | ] |
34 | 32 | }, |
35 | 33 | { |
|
133 | 131 | "subsets = {\n", |
134 | 132 | " \"all\": np.tile(True, len(df)),\n", |
135 | 133 | " \"all_forest\": df[\"project_type\"] == \"forest\",\n", |
136 | | - " \"compliance_ifm\": (df[\"opr_id\"].isin(ifm_opr_ids)) & (df[\"Early Action/ Compliance\"] == \"COP\"),\n", |
| 134 | + " \"compliance_ifm\": (df[\"opr_id\"].isin(ifm_opr_ids))\n", |
| 135 | + " & (df[\"Early Action/ Compliance\"] == \"COP\"),\n", |
137 | 136 | " \"non_graduated_compliance_ifms\": (df[\"opr_id\"].isin(compliance_opr_ids))\n", |
138 | 137 | " & (df[\"Early Action/ Compliance\"] == \"COP\"),\n", |
139 | 138 | " \"upfront_ifm\": (df[\"opr_id\"].isin(upfront_opr_ids)) & (df[\"arb_rp_id\"].isin([\"A\"])),\n", |
|
202 | 201 | "source": [ |
203 | 202 | "sc_data = cat.rfia_all(assessment_area_id=297).read()\n", |
204 | 203 | "\n", |
205 | | - "sc_data = sc_data[sc_data[\"YEAR\"] == 2010].copy() # use 2010 because comporable to CP data\n", |
| 204 | + "sc_data = sc_data[\n", |
| 205 | + " sc_data[\"YEAR\"] == 2010\n", |
| 206 | + "].copy() # use 2010 because comporable to CP data\n", |
206 | 207 | "\n", |
207 | 208 | "\n", |
208 | 209 | "sc_data[\"CARB_ACRE\"] = sc_data[\"CARB_ACRE\"] * 44 / 12 * 0.907185" |
|
237 | 238 | "source": [ |
238 | 239 | "standing_carbon = {}\n", |
239 | 240 | "for k, v in fortyps_of_interest.items():\n", |
240 | | - " standing_carbon[k] = round(sc_data.loc[sc_data[\"FORTYPCD\"] == v, \"CARB_ACRE\"].item(), 1)\n", |
| 241 | + " standing_carbon[k] = round(\n", |
| 242 | + " sc_data.loc[sc_data[\"FORTYPCD\"] == v, \"CARB_ACRE\"].item(), 1\n", |
| 243 | + " )\n", |
241 | 244 | "display(standing_carbon)" |
242 | 245 | ] |
243 | 246 | }, |
|
258 | 261 | "source": [ |
259 | 262 | "# ak has three assessment areas but lets summarize across all to report inline value\n", |
260 | 263 | "ak_assessment_areas = [285, 286, 287]\n", |
261 | | - "ak_all = pd.concat([cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas])\n", |
| 264 | + "ak_all = pd.concat(\n", |
| 265 | + " [cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas]\n", |
| 266 | + ")\n", |
262 | 267 | "\n", |
263 | 268 | "ak_all = ak_all[ak_all[\"YEAR\"] == 2013].copy() # 2013 to match what used in CP\n", |
264 | 269 | "\n", |
|
340 | 345 | " ).round(1)\n", |
341 | 346 | "\n", |
342 | 347 | " as_frac = crediting_error / project[\"arbocs\"][\"calculated\"]\n", |
343 | | - " print(f\"{project['opr_id']} has a {crediting_error[1]} crediting error ({as_frac[1].round(3)})\")\n", |
| 348 | + " print(\n", |
| 349 | + " f\"{project['opr_id']} has a {crediting_error[1]} crediting error ({as_frac[1].round(3)})\"\n", |
| 350 | + " )\n", |
344 | 351 | " print(f\"CI: {crediting_error[0]}, {crediting_error[2]}\")\n", |
345 | 352 | " print(f\"% CI: {as_frac[0].round(3)}, {as_frac[2].round(3)}\")" |
346 | 353 | ] |
|
886 | 893 | "projects = [\n", |
887 | 894 | " x\n", |
888 | 895 | " for x in db\n", |
889 | | - " if x[\"carbon\"][\"initial_carbon_stock\"][\"value\"] > x[\"carbon\"][\"common_practice\"][\"value\"]\n", |
| 896 | + " if x[\"carbon\"][\"initial_carbon_stock\"][\"value\"]\n", |
| 897 | + " > x[\"carbon\"][\"common_practice\"][\"value\"]\n", |
890 | 898 | "]" |
891 | 899 | ] |
892 | 900 | }, |
|
937 | 945 | } |
938 | 946 | ], |
939 | 947 | "source": [ |
940 | | - "sum((cp_df[\"baseline\"] <= cp_df[\"cp\"] * 1.05)) / len(cp_df.dropna())" |
| 948 | + "sum(cp_df[\"baseline\"] <= cp_df[\"cp\"] * 1.05) / len(cp_df.dropna())" |
941 | 949 | ] |
942 | 950 | }, |
943 | 951 | { |
|
955 | 963 | "metadata": {}, |
956 | 964 | "outputs": [], |
957 | 965 | "source": [ |
958 | | - "fn = f\"https://carbonplan.blob.core.windows.net/carbonplan-forests/offsets/archive/results/common-practice-verification.json\"\n", |
| 966 | + "fn = \"https://carbonplan.blob.core.windows.net/carbonplan-forests/offsets/archive/results/common-practice-verification.json\"\n", |
959 | 967 | "with fsspec.open(fn, mode=\"r\") as f:\n", |
960 | 968 | " cp_verification = json.load(f)" |
961 | 969 | ] |
|
1000 | 1008 | } |
1001 | 1009 | ], |
1002 | 1010 | "source": [ |
1003 | | - "data = pd.DataFrame(cp_verification[\"projects\"])[[\"opr_id\", \"recalculated\", \"project_reported\"]]\n", |
| 1011 | + "data = pd.DataFrame(cp_verification[\"projects\"])[\n", |
| 1012 | + " [\"opr_id\", \"recalculated\", \"project_reported\"]\n", |
| 1013 | + "]\n", |
1004 | 1014 | "mean_squared_error(data[\"recalculated\"], data[\"project_reported\"]) ** 0.5" |
1005 | 1015 | ] |
1006 | 1016 | }, |
|
1011 | 1021 | "metadata": {}, |
1012 | 1022 | "outputs": [], |
1013 | 1023 | "source": [ |
1014 | | - "data[\"diff\"] = (data[\"recalculated\"] - data[\"project_reported\"]) / (data[\"project_reported\"])\n", |
| 1024 | + "data[\"diff\"] = (data[\"recalculated\"] - data[\"project_reported\"]) / (\n", |
| 1025 | + " data[\"project_reported\"]\n", |
| 1026 | + ")\n", |
1015 | 1027 | "data = data[np.isfinite(data[\"diff\"])] # CAR1186 = infite bc original CP = 0." |
1016 | 1028 | ] |
1017 | 1029 | }, |
|
1188 | 1200 | "\n", |
1189 | 1201 | "crediting_df = pd.DataFrame({k: v[\"delta_arbocs\"] for k, v in crediting_error.items()})\n", |
1190 | 1202 | "\n", |
1191 | | - "median_crediting_error = {k: np.median(v[\"delta_arbocs\"]) for k, v in crediting_error.items()}" |
| 1203 | + "median_crediting_error = {\n", |
| 1204 | + " k: np.median(v[\"delta_arbocs\"]) for k, v in crediting_error.items()\n", |
| 1205 | + "}" |
1192 | 1206 | ] |
1193 | 1207 | }, |
1194 | 1208 | { |
|
1199 | 1213 | "outputs": [], |
1200 | 1214 | "source": [ |
1201 | 1215 | "tp = pd.concat(\n", |
1202 | | - " [pd.Series(median_crediting_error).rename(\"crediting_error\"), error_cp0.rename(\"cp\")],\n", |
| 1216 | + " [\n", |
| 1217 | + " pd.Series(median_crediting_error).rename(\"crediting_error\"),\n", |
| 1218 | + " error_cp0.rename(\"cp\"),\n", |
| 1219 | + " ],\n", |
1203 | 1220 | " axis=1,\n", |
1204 | 1221 | ")" |
1205 | 1222 | ] |
|
1332 | 1349 | "outputs": [], |
1333 | 1350 | "source": [ |
1334 | 1351 | "ak_assessment_areas = [285, 286, 287]\n", |
1335 | | - "ak_all = pd.concat([cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas])" |
| 1352 | + "ak_all = pd.concat(\n", |
| 1353 | + " [cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas]\n", |
| 1354 | + ")" |
1336 | 1355 | ] |
1337 | 1356 | }, |
1338 | 1357 | { |
|
1388 | 1407 | } |
1389 | 1408 | ], |
1390 | 1409 | "source": [ |
1391 | | - "sum([project[\"arbocs\"][\"issuance\"] for project in db if 287 in project[\"supersection_ids\"]])" |
| 1410 | + "sum(\n", |
| 1411 | + " [\n", |
| 1412 | + " project[\"arbocs\"][\"issuance\"]\n", |
| 1413 | + " for project in db\n", |
| 1414 | + " if 287 in project[\"supersection_ids\"]\n", |
| 1415 | + " ]\n", |
| 1416 | + ")" |
1392 | 1417 | ] |
1393 | 1418 | }, |
1394 | 1419 | { |
|
0 commit comments