Skip to content

[AUTO] Format files using DocumentFormat #23

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions src/MimiRFFSPs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ function get_model()

set_dimension!(m, :time, 1750:2300)

add_comp!(m, SPs, :rffsp, first = 2020, last = 2300)
add_comp!(m, SPs, :rffsp, first=2020, last=2300)

all_countries = CSVFiles.load(joinpath(@__DIR__, "..", "data", "keys", "MimiRFFSPs_ISO3.csv")) |> DataFrame
all_countries = CSVFiles.load(joinpath(@__DIR__, "..", "data", "keys", "MimiRFFSPs_ISO3.csv")) |> DataFrame

set_dimension!(m, :country, all_countries.ISO3)

Expand All @@ -42,10 +42,9 @@ function get_model()
return m
end

function get_mcs(sampling_ids::Union{Vector{Int}, Nothing} = nothing)
function get_mcs(sampling_ids::Union{Vector{Int},Nothing}=nothing)
# define the Monte Carlo Simulation and add some simple random variables
mcs = @defsim begin
end
mcs = @defsim begin end

distrib = isnothing(sampling_ids) ? Mimi.EmpiricalDistribution(collect(1:10_000)) : Mimi.SampleStore(sampling_ids)
Mimi.add_RV!(mcs, :socio_id_rv, distrib)
Expand Down
8 changes: 4 additions & 4 deletions src/components/RegionAggregatorSum.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@defcomp RegionAggregatorSum begin

inputregions = Index()
outputregions = Index()

Expand All @@ -12,17 +12,17 @@
input = Parameter(index=[time, inputregions])
output = Variable(index=[time, outputregions])

function init(p,v,d)
function init(p, v, d)
idxs = indexin(p.input_output_mapping, p.output_region_names)
!isnothing(findfirst(i -> isnothing(i), idxs)) ? error("All provided region names in the RegionAggregatorSum's input_output_mapping Parameter must exist in the output_region_names Parameter.") : nothing
v.input_output_mapping_int[:] = idxs
end

function run_timestep(p,v,d,t)
function run_timestep(p, v, d, t)
v.output[t, :] .= 0.

for i in d.inputregions
v.output[t, v.input_output_mapping_int[i]] += p.input[t,i]
v.output[t, v.input_output_mapping_int[i]] += p.input[t, i]
end
end
end
109 changes: 55 additions & 54 deletions src/components/SPs.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# (10/25/2021) BEA Table 1.1.9, line 1 GDP annual values as linked here: https://apps.bea.gov/iTable/iTable.cfm?reqid=19&step=3&isuri=1&select_all_years=0&nipa_table_list=13&series=a&first_year=2005&last_year=2020&scale=-99&categories=survey&thetable=
const pricelevel_2011_to_2005 = 87.504/98.164
const pricelevel_2011_to_2005 = 87.504 / 98.164

function fill_socioeconomics!(source_Year, source_Country, source_Pop, source_GDP, population, gdp, country_lookup, start_year, end_year)
for i in 1:length(source_Year)
Expand Down Expand Up @@ -27,8 +27,9 @@ function fill_deathrates!(source_Year, source_ISO3, source_DeathRate, deathrate,
end

function fill_emissions!(source_year, source_value, emissions_var, start_year, end_year)
for (t,v) in zip(source_year, source_value)
if start_year <= t end_year
for (t, v) in zip(source_year, source_value)
if start_year <= t
end_year
year_index = TimestepIndex(t - start_year + 1)
emissions_var[year_index] = v
end
Expand Down Expand Up @@ -58,50 +59,50 @@ end
end_year = Parameter{Int}(default=Int(2300)) # year (annual) data should end
country_names = Parameter{String}(index=[country]) # need the names of the countries from the dimension
id = Parameter{Int64}(default=Int(6546)) # the sample (out of 10,000) to be used
population = Variable(index=[time, country], unit="million")
population_global = Variable(index=[time], unit="million")
deathrate = Variable(index=[time, country], unit="deaths/1000 persons/yr")
gdp = Variable(index=[time, country], unit="billion US\$2005/yr")
gdp_global = Variable(index=[time], unit="billion US\$2005/yr")
population1990 = Variable(index=[country], unit = "million")
gdp1990 = Variable(index=[country], unit = unit="billion US\$2005/yr")
co2_emissions = Variable(index=[time], unit="GtC/yr")
ch4_emissions = Variable(index=[time], unit="MtCH4/yr")
n2o_emissions = Variable(index=[time], unit="MtN2/yr")

function init(p,v,d)

population = Variable(index=[time, country], unit="million")
population_global = Variable(index=[time], unit="million")
deathrate = Variable(index=[time, country], unit="deaths/1000 persons/yr")
gdp = Variable(index=[time, country], unit="billion US\$2005/yr")
gdp_global = Variable(index=[time], unit="billion US\$2005/yr")

population1990 = Variable(index=[country], unit="million")
gdp1990 = Variable(index=[country], unit=unit = "billion US\$2005/yr")

co2_emissions = Variable(index=[time], unit="GtC/yr")
ch4_emissions = Variable(index=[time], unit="MtCH4/yr")
n2o_emissions = Variable(index=[time], unit="MtN2/yr")

function init(p, v, d)

# add countrys to a dictionary where each country key has a value holding it's
# index in country_names
country_lookup = Dict{String,Int}(name=>i for (i,name) in enumerate(p.country_names))
country_lookup = Dict{String,Int}(name => i for (i, name) in enumerate(p.country_names))
country_indices = d.country::Vector{Int} # helper for type stable country indices

# ----------------------------------------------------------------------
# Socioeconomic Data
# population in millions of individuals
# GDP in billions of $2005 USD

# Load Feather File
t = Arrow.Table(joinpath(datadep"rffsps_v5", "pop_income", "rffsp_pop_income_run_$(p.id).feather"))
fill_socioeconomics!(t.Year, t.Country, t.Pop, t.GDP, v.population, v.gdp, country_lookup, p.start_year, p.end_year)

for year in p.start_year:5:p.end_year-5, country in country_indices
year_as_timestep = TimestepIndex(year - p.start_year + 1)
pop_interpolator = LinearInterpolation(Float64[year, year+5], [log(v.population[year_as_timestep,country]), log(v.population[year_as_timestep+5,country])])
gdp_interpolator = LinearInterpolation(Float64[year, year+5], [log(v.gdp[year_as_timestep,country]), log(v.gdp[year_as_timestep+5,country])])
pop_interpolator = LinearInterpolation(Float64[year, year+5], [log(v.population[year_as_timestep, country]), log(v.population[year_as_timestep+5, country])])
gdp_interpolator = LinearInterpolation(Float64[year, year+5], [log(v.gdp[year_as_timestep, country]), log(v.gdp[year_as_timestep+5, country])])
for year2 in year+1:year+4
year2_as_timestep = TimestepIndex(year2 - p.start_year + 1)
v.population[year2_as_timestep,country] = exp(pop_interpolator[year2])
v.gdp[year2_as_timestep,country] = exp(gdp_interpolator[year2])
v.population[year2_as_timestep, country] = exp(pop_interpolator[year2])
v.gdp[year2_as_timestep, country] = exp(gdp_interpolator[year2])
end
end

# add global data for future accessibility and quality control
v.gdp_global[:,:] = sum(v.gdp[:,:], dims = 2) # sum across countries, which are the second dimension
v.population_global[:,:] = sum(v.population[:,:], dims = 2) # sum across countries, which are the second dimension
v.gdp_global[:, :] = sum(v.gdp[:, :], dims=2) # sum across countries, which are the second dimension
v.population_global[:, :] = sum(v.population[:, :], dims=2) # sum across countries, which are the second dimension

# ----------------------------------------------------------------------
# Death Rate Data
Expand All @@ -113,7 +114,7 @@ end
g_datasets[:pop_trajectory_key] = (load(joinpath(datadep"rffsps_v5", "sample_numbers", "sampled_pop_trajectory_numbers.csv")) |> DataFrame).x
end
deathrate_trajectory_id = convert(Int64, g_datasets[:pop_trajectory_key][p.id])

# Load Feather File
t = Arrow.Table(joinpath(datadep"rffsps_v5", "death_rates", "rffsp_death_rates_run_$(deathrate_trajectory_id).feather"))
fill_deathrates!(t.Year, t.ISO3, t.DeathRate, v.deathrate, country_lookup, p.start_year, p.end_year)
Expand All @@ -124,28 +125,28 @@ end
# carbon dioxide emissions in GtC
# nitrous oxide emissions in MtN2
# methane emissions in MtCH4

# add data to the global dataset if it's not there
if !haskey(g_datasets, :ch4)
g_datasets[:ch4] = load(joinpath(datadep"rffsps_v5", "emissions", "rffsp_ch4_emissions.csv")) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(year=_.year, value=_.value)) |>
collect
g_datasets[:ch4] = load(joinpath(datadep"rffsps_v5", "emissions", "rffsp_ch4_emissions.csv")) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(year=_.year, value=_.value)) |>
collect
end
if !haskey(g_datasets, :n2o)
g_datasets[:n2o] = load(joinpath(datadep"rffsps_v5", "emissions", "rffsp_n2o_emissions.csv")) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(year=_.year, value=_.value)) |>
collect
g_datasets[:n2o] = load(joinpath(datadep"rffsps_v5", "emissions", "rffsp_n2o_emissions.csv")) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(year=_.year, value=_.value)) |>
collect
end
if !haskey(g_datasets, :co2)
g_datasets[:co2] = load(joinpath(datadep"rffsps_v5", "emissions", "rffsp_co2_emissions.csv")) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(year=_.year, value=_.value)) |>
collect
g_datasets[:co2] = load(joinpath(datadep"rffsps_v5", "emissions", "rffsp_co2_emissions.csv")) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(year=_.year, value=_.value)) |>
collect
end

# fill in the variales
Expand All @@ -160,15 +161,15 @@ end
# Population and GDP 1990 Values

if !haskey(g_datasets, :ypc1990)
g_datasets[:ypc1990] = load(joinpath(datadep"rffsps_v5", "ypc1990", "rffsp_ypc1990.csv")) |>
DataFrame |>
i -> insertcols!(i, :sample => 1:10_000) |>
i -> DataFrames.stack(i, Not(:sample)) |>
i -> rename!(i, [:sample, :country, :value]) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(country=_.country, value=_.value)) |>
collect
g_datasets[:ypc1990] = load(joinpath(datadep"rffsps_v5", "ypc1990", "rffsp_ypc1990.csv")) |>
DataFrame |>
i -> insertcols!(i, :sample => 1:10_000) |>
i -> DataFrames.stack(i, Not(:sample)) |>
i -> rename!(i, [:sample, :country, :value]) |>
@groupby(_.sample) |>
@orderby(key(_)) |>
@map(DataFrame(country=_.country, value=_.value)) |>
collect
end
if !haskey(g_datasets, :pop1990)
g_datasets[:pop1990] = load(joinpath(@__DIR__, "..", "..", "data/population1990.csv")) |> DataFrame
Expand All @@ -182,7 +183,7 @@ end

end

function run_timestep(p,v,d,t)
function run_timestep(p, v, d, t)

if !(gettime(t) in p.start_year:p.end_year)
error("Cannot run SP component in year $(gettime(t)), SP data is not available for this model and year.")
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ end

@testset "API" begin
include("test_API.jl")
end
end
Loading