From ff9932ef511de2808e6fee52d9f89b91860f2c41 Mon Sep 17 00:00:00 2001 From: Nicole Misek Date: Fri, 4 Apr 2025 15:49:53 -0400 Subject: [PATCH 1/2] Adds Nextmvified notebook for Gurobi price optimization --- .nextmv/workflow-configuration.yml | 6 + python-gurobi-price-optimization/LICENSE | 201 ++++ python-gurobi-price-optimization/README.md | 15 + python-gurobi-price-optimization/main.ipynb | 1184 +++++++++++++++++++ 4 files changed, 1406 insertions(+) create mode 100644 python-gurobi-price-optimization/LICENSE create mode 100644 python-gurobi-price-optimization/README.md create mode 100644 python-gurobi-price-optimization/main.ipynb diff --git a/.nextmv/workflow-configuration.yml b/.nextmv/workflow-configuration.yml index 06afda18..9baacb68 100644 --- a/.nextmv/workflow-configuration.yml +++ b/.nextmv/workflow-configuration.yml @@ -101,6 +101,12 @@ apps: marketplace_app_id: marketplace_major_version: description: Use the Python Nextmv & scikit-learn integration - diabetes. + - name: python-gurobi-price-optimization + type: python + app_id: + marketplace_app_id: + marketplace_major_version: + description: Use a Nextmvified Gurobi notebook for price optimization. - name: python-nextroute type: python app_id: diff --git a/python-gurobi-price-optimization/LICENSE b/python-gurobi-price-optimization/LICENSE new file mode 100644 index 00000000..2c27ec72 --- /dev/null +++ b/python-gurobi-price-optimization/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022-2024 nextmv.io inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/python-gurobi-price-optimization/README.md b/python-gurobi-price-optimization/README.md new file mode 100644 index 00000000..a051d8c9 --- /dev/null +++ b/python-gurobi-price-optimization/README.md @@ -0,0 +1,15 @@ +# Nextmv & Gurobi Price Optimization + +This community app contains a Jupyter Notebook that demonstrates how to +Nextmvify the [Gurobi Avocado Price Optimization notebook][original]. + +## Next steps + +* Visit our [docs][docs] and [blog][blog]. Need more assistance? + [Contact][contact] us! + +[original]: + https://colab.research.google.com/github/Gurobi/modeling-examples/blob/master/price_optimization/price_optimization.ipynb +[docs]: https://docs.nextmv.io +[blog]: https://www.nextmv.io/blog +[contact]: https://www.nextmv.io/contact diff --git a/python-gurobi-price-optimization/main.ipynb b/python-gurobi-price-optimization/main.ipynb new file mode 100644 index 00000000..82ceecbf --- /dev/null +++ b/python-gurobi-price-optimization/main.ipynb @@ -0,0 +1,1184 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7f7d69f9", + "metadata": { + "id": "7f7d69f9" + }, + "source": [ + "# Deploying the Gurobi Avocado Price Optimization Workflow in Nextmv\n", + "\n", + "Gurobi has published a notebook on Avocado price and supply optimization [here](https://colab.research.google.com/github/Gurobi/modeling-examples/blob/master/price_optimization/price_optimization.ipynb). They demonstrate how to use an Ordinary Least Squares (OLS) linear regression model to establish the relationship between price and demand based on data from the Hass Avocado Board. They use this fitted OLS model as input to a price and supply optimization model to optimize the supply and price of avocados by region.\n", + "\n", + "At Nextmv, we have taken their notebook, and adapted it to \"Nextmv-ify\" their solution to this problem. By integrating with Nextmv, we can deploy and operate models on the platform. Following, we unlock automation, collaboration, scalability, and streamlined decision workflows.\n", + "\n", + "While we're demonstrating this process from a Jupyter notebook for ease of exploration, it's important to note that notebooks are best suited for prototyping rather than production use. In a real-world production setting, you would typically develop and refine your model in a notebook before transitioning to a structured Python project within a managed repository for better version control, collaboration, and operational stability.\n", + "\n", + "Now, let's dive in!" + ] + }, + { + "cell_type": "markdown", + "id": "RKuDjLr2oaAK", + "metadata": { + "id": "RKuDjLr2oaAK" + }, + "source": [ + "## Getting Started: Connect this notebook to your Nextmv Account 🐰\n", + "\n", + "In order to run this notebook end to end, you will need to create a secret called `NEXTMV_API_KEY` in this colab notebook. You can do this by running through the following steps:\n", + "\n", + "* Visit https://cloud.nextmv.io\n", + "* Navigate to `Settings` in top nav bar\n", + "* Navigate to `API Keys` in the left nav bar\n", + "* Copy your API key\n", + "* Return to this Colab notebook\n", + "* In the left nav, click on the key icon to expand the Secrets menu\n", + "* Click ` + Add new secret`\n", + "* Type in `NEXTMV_API_KEY` in to “Name” field and copy your API key into “Value”.\n", + "* Then click the “Notebook Access” toggle. It should turn blue when the connection is active\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "d1eda52c", + "metadata": { + "id": "d1eda52c" + }, + "source": [ + "## Part I: Deploy the ML Regressor Model to Nextmv 🐰\n", + "\n", + "First, we need to install some dependencies, like the Nextmv Python SDK." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52e1a95e", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "collapsed": true, + "id": "52e1a95e", + "outputId": "18f7eba3-f579-4143-f8ad-364a738c1dce" + }, + "outputs": [], + "source": [ + "%pip install --upgrade \"nextmv[all]\"\n", + "%pip install --upgrade \"nextmv-gurobipy\"\n", + "%pip install statsmodels\n", + "%pip install gurobipy\n", + "%pip install pandas" + ] + }, + { + "cell_type": "markdown", + "id": "966b4d7f", + "metadata": { + "id": "966b4d7f" + }, + "source": [ + "We import the required packages." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39c4f7bf", + "metadata": { + "id": "39c4f7bf" + }, + "outputs": [], + "source": [ + "import nextmv\n", + "from nextmv import cloud\n", + "import nextmv_gurobipy as ngp\n", + "import json\n", + "import os\n", + "import statsmodels.api as sm\n", + "import statsmodels.formula.api as smf\n", + "import plotly.express as px\n", + "import plotly.graph_objects as go\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.metrics import r2_score\n", + "from google.colab import userdata\n", + "import pandas as pd\n", + "import gurobipy as gp\n", + "import uuid\n", + "from gurobipy import GRB" + ] + }, + { + "cell_type": "markdown", + "id": "Ts5bbJA_-myO", + "metadata": { + "id": "Ts5bbJA_-myO" + }, + "source": [ + "Let's copy over the data processing code Gurobi included in their original notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "mYL_EvbP98U_", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "mYL_EvbP98U_", + "outputId": "1fb493c6-03e7-41b4-db73-a037ceb39295" + }, + "outputs": [], + "source": [ + "avocado = pd.read_csv('https://raw.githubusercontent.com/Gurobi/modeling-examples/master/price_optimization/HABdata_2019_2022.csv') # dataset downloaded directly from HAB\n", + "avocado_old = pd.read_csv('https://raw.githubusercontent.com/Gurobi/modeling-examples/master/price_optimization/kaggledata_till2018.csv') # dataset downloaded from Kaggle\n", + "avocado = pd.concat([avocado, avocado_old], ignore_index=True)\n", + "\n", + "# Add the index for each year from 2015 through 2022\n", + "avocado['date'] = pd.to_datetime(avocado['date'])\n", + "avocado['year'] = pd.DatetimeIndex(avocado['date']).year\n", + "avocado['year_index'] = avocado['year'] - 2015\n", + "avocado = avocado.sort_values(by='date')\n", + "\n", + "# Define the peak season\n", + "avocado['month'] = pd.DatetimeIndex(avocado['date']).month\n", + "peak_months = range(2,8)\n", + "def peak_season(row):\n", + " return 1 if int(row['month']) in peak_months else 0\n", + "\n", + "avocado['peak'] = avocado.apply(lambda row: peak_season(row), axis=1)\n", + "\n", + "# Scale the number of avocados to millions\n", + "avocado['units_sold'] = avocado['units_sold']/1000000\n", + "\n", + "# Select only conventional avocados\n", + "avocado = avocado[avocado['type'] == 'Conventional']\n", + "\n", + "avocado = avocado[['date','units_sold','price','region','year','month','year_index','peak']].reset_index(drop = True)\n", + "regions = ['Great_Lakes','Midsouth','Northeast','Northern_New_England','SouthCentral','Southeast','West','Plains']\n", + "df = avocado[avocado.region.isin(regions)]\n", + "\n", + "for col in df.select_dtypes(include=['datetime']).columns:\n", + " df[col] = df[col].astype(str)" + ] + }, + { + "cell_type": "markdown", + "id": "b525fdd4", + "metadata": { + "id": "b525fdd4" + }, + "source": [ + "Here is where we make very minor modifications to support deployment on the Nextmv Platform.\n", + "\n", + "- We follow the Nextmv convention and define a decision model using the `nextmv.Model` class.\n", + "- We define a `solve` function on this class which conforms to the expected signature: reading in a `nextmv.Input` and writing a `nextmv.Output`.\n", + "\n", + "Following this convention unlocks the ability to push up a model object directly from your notebook environment.\n", + "\n", + "Define the `MLRegressorModel` class. All we're doing here is copying code from above and \"Nextmv-ifying\" it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82I-fJs_NGXk", + "metadata": { + "id": "82I-fJs_NGXk" + }, + "outputs": [], + "source": [ + "# >>>>>>>>>>>> Start Nextmv-ifying\n", + "class MLRegressorModel(nextmv.Model):\n", + " def solve(self, input: nextmv.Input) -> nextmv.Output:\n", + " nextmv.redirect_stdout()\n", + " df = pd.DataFrame(input.data)\n", + "# <<<<<<<<<<<<< Stop Nextmv-ifying\n", + " train, test = train_test_split(df, train_size=0.8, random_state=1)\n", + " df_train = pd.DataFrame(train, columns=df.columns)\n", + " df_test = pd.DataFrame(test, columns=df.columns)\n", + "\n", + " # Train the model\n", + " formula = 'units_sold ~ price + year_index + C(region)+ peak'\n", + " mod = smf.ols(formula,data=df_train)\n", + " result = mod.fit()\n", + " result.summary()\n", + "\n", + " # Get R^2 from test data\n", + " y_true = df_test['units_sold']\n", + " y_pred = result.predict(df_test)\n", + "\n", + " formula = 'units_sold ~ price + year_index + C(region)+ peak'\n", + " mod_full = smf.ols(formula,data=df)\n", + " result_full = mod_full.fit()\n", + "\n", + " y_true_full = df['units_sold']\n", + " y_pred_full = result_full.predict(df)\n", + "\n", + " # Get the weights and store it\n", + " coef_dict = result_full.params.to_dict()\n", + " coef_dict['C(region)[T.Great_Lakes]'] = 0\n", + "\n", + " # >>>>>>>>>>>> Start Nextmv-ifying\n", + " statistics = nextmv.Statistics(\n", + " result=nextmv.ResultStatistics(\n", + " custom={\n", + " \"r2_test\": r2_score(y_true, y_pred),\n", + " \"r2_full\": r2_score(y_true_full, y_pred_full),\n", + " },\n", + " ),\n", + " )\n", + "\n", + " return nextmv.Output(\n", + " options=nextmv.Options(),\n", + " solution=coef_dict,\n", + " statistics=statistics,\n", + " )\n", + " # <<<<<<<<<<<<< Stop Nextmv-ifying\n" + ] + }, + { + "cell_type": "markdown", + "id": "2ezSl-eERQIg", + "metadata": { + "id": "2ezSl-eERQIg" + }, + "source": [ + "We can run this model locally, before deploying it to the Nextmv Platform. Since our model takes in a `nextmv.Input`, we can create it by passing in our `df` and some default `nextmv.Options`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "_DdjKp6cRS48", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "_DdjKp6cRS48", + "outputId": "b4dc171b-5ec9-4431-af9f-55979f1b28dc" + }, + "outputs": [], + "source": [ + "model = MLRegressorModel()\n", + "input = nextmv.Input(data=df.to_dict(), options=nextmv.Options())\n", + "output = model.solve(input)\n", + "nextmv.write_local(output)" + ] + }, + { + "cell_type": "markdown", + "id": "qp_6yGN5r1km", + "metadata": { + "id": "qp_6yGN5r1km" + }, + "source": [ + "Now that we've confirmed everything works locally, we're ready to deploy to the Nextmv Platform. We do this by creating a Nextmv `cloud.Client` and configuring it with the Nextmv API key (step mentioned in the prerequisites).\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5J52AZVttiMk", + "metadata": { + "id": "5J52AZVttiMk" + }, + "outputs": [], + "source": [ + "client = cloud.Client(api_key=userdata.get('NEXTMV_API_KEY'))" + ] + }, + { + "cell_type": "markdown", + "id": "SuGO6YGctpZp", + "metadata": { + "id": "SuGO6YGctpZp" + }, + "source": [ + "Once we create the client, we can:\n", + "* Specify the app we want to push up to. In this case, we're pushing up to the `avocado-ml-regressor` app.\n", + "* Define the model configuration.\n", + "* Create an app manifest based on that configuration.\n", + "* Push the local model up to the `avocado-ml-regressor` app in Nextmv Cloud. This step may take a few minutes depending on your network speed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "thITwf8iY_Gi", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "thITwf8iY_Gi", + "outputId": "ad86fc14-a97f-4689-ffc6-55c2ace3bc5c" + }, + "outputs": [], + "source": [ + "reg_app_name = \"avocado-ml-regressor\"\n", + "if cloud.Application.exists(client, id=reg_app_name):\n", + " regressor_app = cloud.Application(client=client, id=reg_app_name)\n", + "else:\n", + " regressor_app = cloud.Application.new(client=client, id=reg_app_name, name=reg_app_name)\n", + "\n", + "model_configuration = nextmv.ModelConfiguration(\n", + " name=reg_app_name,\n", + " requirements=[\n", + " \"nextmv==0.20.1\",\n", + " \"statsmodels==0.14.4\",\n", + " \"scikit-learn==1.6.1\",\n", + " \"pandas==2.2.2\"\n", + " ],\n", + " options=None,\n", + ")\n", + "manifest = cloud.Manifest.from_model_configuration(model_configuration)\n", + "\n", + "regressor_app.push(\n", + " manifest=manifest,\n", + " model=model,\n", + " model_configuration=model_configuration,\n", + " verbose=True,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "kbPxQiWQtGd5", + "metadata": { + "id": "kbPxQiWQtGd5" + }, + "source": [ + "The app is deployed, let’s create a remote run on the Nextmv Platform and print the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39cKfTcRbDow", + "metadata": { + "id": "39cKfTcRbDow" + }, + "outputs": [], + "source": [ + "regressor_result = regressor_app.new_run_with_result(input=df.to_dict())\n", + "print(json.dumps(regressor_result.output, indent=2))" + ] + }, + { + "cell_type": "markdown", + "id": "6DnX1PmStLrB", + "metadata": { + "id": "6DnX1PmStLrB" + }, + "source": [ + "And that's it 🚀! We've got our fitted regression model running as a standalone application on the Nextmv Platform." + ] + }, + { + "cell_type": "markdown", + "id": "Iy7x3jt1Bk0j", + "metadata": { + "id": "Iy7x3jt1Bk0j" + }, + "source": [ + "## Part II: Deploy the Price and Supply Optimization Model to Nextmv 🐰\n", + "\n", + "Let's try this again with the optimization model. Just as with the ML regressor, we'll:\n", + "\n", + "* Copy the notebook model code into a class, which inherits from `nextmv.Model`.\n", + "* Make minor modifications to \"Nextmv-ify\" it.\n", + "\n", + "Additionally, we are going to:\n", + "* Expose an option called `supply`, so we can set the total amount of avocado supply external to the model.\n", + "* Define a custom visualization on the app.\n", + "\n", + " Nextmv works with `Chart.js`, `Plotly`, and `GeoJSON` visualization assets. We've refactored the original Gurobi visualization of the solution to work with `Plotly`, and we've returned this visualization as a custom asset on the run. Custom assets make it possible for you to manage your visualization alongside your model code!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "be5a5c52", + "metadata": { + "id": "be5a5c52" + }, + "outputs": [], + "source": [ + "# >>>>>>>>>>>> Start Nextmv-ifying\n", + "class AvocadoPriceDecisionModel(nextmv.Model):\n", + " def solve(self, input: nextmv.Input) -> nextmv.Output:\n", + " data = input.data\n", + " B = input.options.supply # total amount of avocado supply\n", + "\n", + " m = ngp.Model(input.options)\n", + "# <<<<<<<<<<<<< Stop Nextmv-ifying\n", + "\n", + " # Sets and parameters\n", + " R = data[\"regions\"] # set of all regions\n", + "\n", + " peak_or_not = data[\"peak\"] # 1 if it is the peak season; 1 if isn't\n", + " year = data[\"year\"]\n", + "\n", + " c_waste = data[\"cost_per_wasted_product\"] # the cost ($) of wasting an avocado\n", + " c_transport = data[\"transport_costs\"] # the cost of transporting an avocado\n", + "\n", + " # Get the lower and upper bounds from the dataset for the price and the number of products to be stocked\n", + " a_min = data[\"minimum_product_price\"] # minimum avocado price in each region\n", + " a_max = data[\"maximum_product_price\"] # maximum avocado price in each region\n", + " b_min = data[\"minimum_product_allocations\"] # minimum number of avocados allocated to each region\n", + " b_max = data[\"maximum_product_allocations\"] # maximum number of avocados allocated to each region\n", + "\n", + " p = m.addVars(R,name=\"p\",lb=a_min, ub=a_max) # price of avocados in each region\n", + " x = m.addVars(R,name=\"x\",lb=b_min,ub=b_max) # quantity supplied to each region\n", + " s = m.addVars(R,name=\"s\",lb=0) # predicted amount of sales in each region for the given price\n", + " w = m.addVars(R,name=\"w\",lb=0) # excess wasteage in each region\n", + "\n", + " d = {r: (data[\"coefficients\"]['Intercept']+data[\"coefficients\"]['price']*p[r] + data[\"coefficients\"]['C(region)[T.%s]'%r] + data[\"coefficients\"]['year_index']*(year-2015) + data[\"coefficients\"]['peak']*peak_or_not) for r in R}\n", + "\n", + " m.setObjective(sum(p[r]*s[r] - c_waste*w[r] - c_transport[r]*x[r] for r in R))\n", + " m.ModelSense = GRB.MAXIMIZE\n", + "\n", + " m.addConstr(sum(x[r] for r in R) == B)\n", + " m.addConstrs((s[r] <= x[r] for r in R))\n", + " m.addConstrs((s[r] <= d[r] for r in R))\n", + " m.addConstrs((w[r] == x[r]-s[r] for r in R))\n", + " m.Params.NonConvex = 2\n", + " m.optimize()\n", + "\n", + " solution = pd.DataFrame()\n", + " solution['Region'] = R\n", + " solution['Price'] = [p[r].X for r in R]\n", + " solution['Allocated'] = [round(x[r].X,8) for r in R]\n", + " solution['Sold'] = [round(s[r].X,8) for r in R]\n", + " solution['Wasted'] = [round(w[r].X,8) for r in R]\n", + " solution['Pred_demand'] = [(data[\"coefficients\"]['Intercept']+data[\"coefficients\"]['price']*p[r].X + data[\"coefficients\"]['C(region)[T.%s]'%r] + data[\"coefficients\"]['year_index']*(year-2015) + data[\"coefficients\"]['peak']*peak_or_not) for r in R]\n", + "\n", + " fig = px.scatter(\n", + " solution,\n", + " x=\"Price\",\n", + " y=\"Sold\",\n", + " color=\"Region\",\n", + " size=\"Sold\", # Size based on sold quantity\n", + " size_max=15, # Adjust for desired size of markers\n", + " title=\"Avocado Sales and Waste by Region\",\n", + " labels={\"Price\": \"Price per avocado ($)\", \"Sold\": \"Number of avocados sold (millions)\"},\n", + " )\n", + "\n", + " colors = px.colors.qualitative.Plotly # Use a color palette from Plotly\n", + " num_regions = len(solution[\"Region\"].unique())\n", + " region_colors = {region: colors[i % len(colors)] for i, region in enumerate(solution[\"Region\"].unique())}\n", + "\n", + "\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=solution[\"Price\"],\n", + " y=solution[\"Wasted\"],\n", + " mode=\"markers\",\n", + " marker=dict(symbol=\"x\", size=10, color=[region_colors[region] for region in solution[\"Region\"]]), # Assign colors based on region\n", + " name=\"Wasted\",\n", + " showlegend=False, # Hide legend for wasted points\n", + " )\n", + " )\n", + "\n", + " fig.update_layout(\n", + " yaxis_range=[0, 5],\n", + " xaxis_range=[1, 2.2],\n", + " legend=dict(x=1.25, y=0.5), # Adjust legend position\n", + " )\n", + "\n", + " json_plot = fig.to_json()\n", + "\n", + "# >>>>>>>>>>>> Start Nextmv-ifying\n", + " statistics = ngp.ModelStatistics(m)\n", + " statistics.result.custom = {\n", + " \"variables\": m.NumVars,\n", + " \"constraints\": m.NumConstrs,\n", + " \"total_waste\": sum(w[r].X for r in R),\n", + " }\n", + "\n", + " asset = nextmv.Asset(\n", + " name=\"Pricing Charts\",\n", + " content_type=\"json\",\n", + " visual=nextmv.Visual(\n", + " visual_schema=nextmv.VisualSchema(value=\"plotly\"),\n", + " label=\"Pricing Charts\",\n", + " visual_type=\"custom-tab\",\n", + " ),\n", + " content=[json.loads(json_plot)],\n", + " )\n", + "\n", + " return nextmv.Output(\n", + " options=input.options,\n", + " solution=solution.to_dict(),\n", + " statistics=statistics,\n", + " assets=[asset]\n", + " )\n", + "# <<<<<<<<<<<<< Stop Nextmv-ifying\n" + ] + }, + { + "cell_type": "markdown", + "id": "10432242", + "metadata": { + "id": "10432242" + }, + "source": [ + "Let's run this optimization model locally, as before. You can define the data to use when making the run. Again, we pulled this data from the original Gurobi notebook and just stored it in a `dict` here. Note how we use the result of the ML regressor to populate this data `dict`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "Rw21_NXxpTW2", + "metadata": { + "id": "Rw21_NXxpTW2" + }, + "outputs": [], + "source": [ + "data = {\n", + " \"regions\": [\n", + " \"Great_Lakes\",\n", + " \"Midsouth\",\n", + " \"Northeast\",\n", + " \"Northern_New_England\",\n", + " \"SouthCentral\",\n", + " \"Southeast\",\n", + " \"West\",\n", + " \"Plains\"\n", + " ],\n", + " \"total_amount_of_supply\": 30,\n", + " \"cost_per_wasted_product\": 0.1,\n", + " \"peak\": 1,\n", + " \"transport_costs\": {\n", + " 'Great_Lakes': .3,\n", + " 'Midsouth':.1,\n", + " 'Northeast':.4,\n", + " 'Northern_New_England':.5,\n", + " 'SouthCentral':.3,\n", + " 'Southeast':.2,\n", + " 'West':.2,\n", + " 'Plains':.2\n", + " },\n", + " \"year\": 2022,\n", + " \"minimum_product_price\": 0,\n", + " \"maximum_product_price\": 2,\n", + " \"minimum_product_allocations\": dict(df.groupby('region')['units_sold'].min()),\n", + " \"maximum_product_allocations\": dict(df.groupby('region')['units_sold'].max()),\n", + " \"coefficients\": regressor_result.output.get('solution'),\n", + "}\n" + ] + }, + { + "cell_type": "markdown", + "id": "D065XdQ6t8hW", + "metadata": { + "id": "D065XdQ6t8hW" + }, + "source": [ + "Let's define our options. In this case, the model expects an option called `supply` which specifies the total amount of avocado supply. We are merging all the supported Gurobi options with this custom option to create the final set of options for our app." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5b3a3e1", + "metadata": { + "id": "f5b3a3e1" + }, + "outputs": [], + "source": [ + "gp_opt = ngp.ModelOptions().to_nextmv()\n", + "nm_opt = nextmv.Options(\n", + " nextmv.Parameter(name=\"supply\", param_type=int, default=30, description=\"Total amount of avocado supply.\", required=False),\n", + ")\n", + "options = nm_opt.merge(gp_opt)" + ] + }, + { + "cell_type": "markdown", + "id": "tDI60xYwA0TG", + "metadata": { + "id": "tDI60xYwA0TG" + }, + "source": [ + "To run locally, we instantiate the `nextmv.Input`, the decision model, and solve." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1kSVsVxpAxjC", + "metadata": { + "collapsed": true, + "id": "1kSVsVxpAxjC" + }, + "outputs": [], + "source": [ + "input = nextmv.Input(data=data, options=options)\n", + "model = AvocadoPriceDecisionModel()\n", + "output = model.solve(input)\n", + "nextmv.write_local(output)" + ] + }, + { + "cell_type": "markdown", + "id": "423da633", + "metadata": { + "id": "423da633" + }, + "source": [ + "All went well locally, now let's push this second model object up to Nextmv Cloud. As before, we need to specify a model configuration and app manifest before pushing. This time, we additionally demonstrate how to cut a version and assign that version to a managed instance called `staging`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fcb3b48e", + "metadata": { + "id": "fcb3b48e" + }, + "outputs": [], + "source": [ + "optimizer_app_name = \"avocado-price-optimizer\"\n", + "if cloud.Application.exists(client, id=optimizer_app_name):\n", + " optimizer_app = cloud.Application(client=client, id=optimizer_app_name)\n", + "else:\n", + " optimizer_app = cloud.Application.new(client=client, id=optimizer_app_name, name=optimizer_app_name)\n", + "\n", + "model_configuration = nextmv.ModelConfiguration(\n", + " name=\"avocado-price-optimizer\",\n", + " requirements=[\n", + " \"nextmv==0.20.1\",\n", + " \"nextmv-gurobipy==0.2.1\",\n", + " \"plotly==6.0.0\"\n", + " ],\n", + " options=options,\n", + ")\n", + "manifest = nextmv.cloud.Manifest.from_model_configuration(model_configuration)\n", + "\n", + "optimizer_app.push(\n", + " manifest=manifest,\n", + " model=model,\n", + " model_configuration=model_configuration,\n", + " verbose=True,\n", + ")\n", + "\n", + "version = str(uuid.uuid4())\n", + "optimizer_app.new_version(id=version, name=version)\n", + "optimizer_app.new_instance(\n", + " version_id=version,\n", + " id=\"staging\",\n", + " name=\"staging\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "a18fe8e4", + "metadata": { + "id": "a18fe8e4" + }, + "source": [ + "Now, let's try running it remotely on the Nextmv Platform." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed651205", + "metadata": { + "collapsed": true, + "id": "ed651205" + }, + "outputs": [], + "source": [ + "optimization_result = optimizer_app.new_run_with_result(input=data)\n", + "print(json.dumps(optimization_result.output, indent=2))" + ] + }, + { + "cell_type": "markdown", + "id": "0ybz3Cb2ulem", + "metadata": { + "id": "0ybz3Cb2ulem" + }, + "source": [ + "## Part III: Create a Nextmv Workflow to Chain the Apps Together 🐰\n", + "\n", + "Up to this point, we have two distinctly managed Nextmv Applications:\n", + "* `avocado-ml-regressor`\n", + "* `avocado-price-optimizer`\n", + "\n", + "Now, we will create a third Nextmv Application to run a workflow which chains these executions together:\n", + "\n", + "Fit the regressor ➡️ Send fitted results ➡️ Price optimization\n", + "\n", + "Let's set up that workflow using Nextmv and `nextpipe`!" + ] + }, + { + "cell_type": "markdown", + "id": "mwBD4UmtvQzW", + "metadata": { + "id": "mwBD4UmtvQzW" + }, + "source": [ + "\n", + "\n", + "---\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "36X32iLKsVTG", + "metadata": { + "id": "36X32iLKsVTG" + }, + "source": [ + "Write the data to the local filesystem, under the `input` folder." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4GAxHZTXmiI0", + "metadata": { + "id": "4GAxHZTXmiI0" + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "avocado = pd.read_csv('https://raw.githubusercontent.com/Gurobi/modeling-examples/master/price_optimization/HABdata_2019_2022.csv') # dataset downloaded directly from HAB\n", + "avocado_old = pd.read_csv('https://raw.githubusercontent.com/Gurobi/modeling-examples/master/price_optimization/kaggledata_till2018.csv') # dataset downloaded from Kaggle\n", + "avocado = pd.concat([avocado, avocado_old], ignore_index=True)\n", + "if not os.path.exists('input'):\n", + " os.mkdir('input')\n", + "\n", + "avocado.to_csv('input/avocado_input.csv', index=False)" + ] + }, + { + "cell_type": "markdown", + "id": "XwPJ95dVsmzr", + "metadata": { + "id": "XwPJ95dVsmzr" + }, + "source": [ + "Install `nextpipe`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "C3RSJ9a6vh8m", + "metadata": { + "collapsed": true, + "id": "C3RSJ9a6vh8m" + }, + "outputs": [], + "source": [ + "%pip install --upgrade nextpipe" + ] + }, + { + "cell_type": "markdown", + "id": "qjXMNbmMXY_i", + "metadata": { + "id": "qjXMNbmMXY_i" + }, + "source": [ + "We're ready to construct our workflow. The workflow is intended to run as a Nextmv Application.\n", + "\n", + "These are the steps of the workflow. Note how each step specifies the predecessors, chaining the logic together.\n", + "\n", + "| Step | Needs | Description |\n", + "| :-: | :- | :- |\n", + "| `load` | | Load CSV data and transform it into a `pd.DataFrame`. |\n", + "| ⬇️ | | |\n", + "| `prepare` | `load` | Receive the `pd.DataFrame`, do all the data preparation form earlier, an return a `dict`. |\n", + "| ⬇️ | | |\n", + "| `regress` | `prepare` | Call the deployed Nextmv Application to fit the ML regressor model. We do this by leveraging the `@app` decorator from `nextpipe`. |\n", + "| ⬇️ | | |\n", + "| `join` | `prepare`, `regress` | Join the output from the `regress` step with the optimization input data. |\n", + "| ⬇️ | | |\n", + "| `optimize` | `join` | Call the deployed Nextmv Application to optimize the price and supply of avocados per region using the `@app` decorator from `nextpipe`. |\n", + "| ⬇️ | | |\n", + "| `postprocess` | `optimize` | Transform the optimization output to a `nextmv.Output`. | " + ] + }, + { + "cell_type": "markdown", + "id": "AlDyyxijvNgw", + "metadata": { + "id": "AlDyyxijvNgw" + }, + "source": [ + "\n", + "\n", + "---\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "2CzTiu6us5Uf", + "metadata": { + "id": "2CzTiu6us5Uf" + }, + "source": [ + "We are going to create a new `workflow` folder, and store the Nextmv Application files in it:\n", + "* `main.py` ➡️ File with the Nextmv Application code.\n", + "* `requirements.txt` ➡️ File that specifies the dependencies the app needs.\n", + "\n", + "The previous applications were created from a `nextmv.Model`, but in this case, we are creating an app based on files." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "hkc4eCi3r1kg", + "metadata": { + "id": "hkc4eCi3r1kg" + }, + "outputs": [], + "source": [ + "if not os.path.exists('workflow'):\n", + " os.mkdir('workflow')" + ] + }, + { + "cell_type": "markdown", + "id": "FZOnvAl4uQ8R", + "metadata": { + "id": "FZOnvAl4uQ8R" + }, + "source": [ + "Write the `main.py` file to the `workflow` folder." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "zXoWZOumvQO3", + "metadata": { + "id": "zXoWZOumvQO3" + }, + "outputs": [], + "source": [ + "%%writefile workflow/main.py\n", + "import os\n", + "\n", + "import nextmv\n", + "import pandas as pd\n", + "from nextmv import cloud\n", + "from nextpipe import FlowSpec, app, needs, step\n", + "\n", + "options = nextmv.Options(\n", + " nextmv.Parameter(\n", + " name=\"input\",\n", + " param_type=str,\n", + " default=\"input\",\n", + " description=\"Path to the input data.\",\n", + " required=False,\n", + " ),\n", + " nextmv.Parameter(\n", + " name=\"supply\",\n", + " param_type=int,\n", + " default=30,\n", + " description=\"Total amount of avocado supply.\",\n", + " required=False,\n", + " ),\n", + ")\n", + "\n", + "\n", + "class Flow(FlowSpec):\n", + " @step\n", + " def load(_):\n", + " \"\"\"Loads the data.\"\"\"\n", + " csv_file = [f for f in os.listdir(options.input) if f.endswith(\".csv\")][0]\n", + " avocado = pd.read_csv(os.path.join(options.input, csv_file))\n", + " return avocado\n", + "\n", + " @needs(predecessors=[load])\n", + " @step\n", + " def prepare(avocado: pd.DataFrame):\n", + " \"\"\"Prepares the data.\"\"\"\n", + " # Add the index for each year from 2015 through 2022\n", + " avocado[\"date\"] = pd.to_datetime(avocado[\"date\"])\n", + " avocado[\"year\"] = pd.DatetimeIndex(avocado[\"date\"]).year\n", + " avocado[\"year_index\"] = avocado[\"year\"] - 2015\n", + " avocado = avocado.sort_values(by=\"date\")\n", + "\n", + " # Define the peak season\n", + " avocado[\"month\"] = pd.DatetimeIndex(avocado[\"date\"]).month\n", + " peak_months = range(2, 8)\n", + "\n", + " def peak_season(row):\n", + " return 1 if int(row[\"month\"]) in peak_months else 0\n", + "\n", + " avocado[\"peak\"] = avocado.apply(lambda row: peak_season(row), axis=1)\n", + "\n", + " # Scale the number of avocados to millions\n", + " avocado[\"units_sold\"] = avocado[\"units_sold\"] / 1000000\n", + "\n", + " # Select only conventional avocados\n", + " avocado = avocado[avocado[\"type\"] == \"Conventional\"]\n", + "\n", + " avocado = avocado[\n", + " [\n", + " \"date\",\n", + " \"units_sold\",\n", + " \"price\",\n", + " \"region\",\n", + " \"year\",\n", + " \"month\",\n", + " \"year_index\",\n", + " \"peak\",\n", + " ]\n", + " ].reset_index(drop=True)\n", + " regions = [\n", + " \"Great_Lakes\",\n", + " \"Midsouth\",\n", + " \"Northeast\",\n", + " \"Northern_New_England\",\n", + " \"SouthCentral\",\n", + " \"Southeast\",\n", + " \"West\",\n", + " \"Plains\",\n", + " ]\n", + " df = avocado[avocado.region.isin(regions)]\n", + "\n", + " for col in df.select_dtypes(include=[\"datetime\"]).columns:\n", + " df[col] = df[col].astype(str)\n", + "\n", + " return df.to_dict()\n", + "\n", + " @app(app_id=\"avocado-ml-regressor\", instance_id=\"devint\")\n", + " @needs(predecessors=[prepare])\n", + " @step\n", + " def regress():\n", + " \"\"\"Fits the ML regressor model.\"\"\"\n", + " pass\n", + "\n", + " @needs(predecessors=[prepare, regress])\n", + " @step\n", + " def join(df: dict, coef_dict: dict):\n", + " \"\"\"Joins the ML regressor results with the optimization input data.\"\"\"\n", + " df = pd.DataFrame(df)\n", + " data = {\n", + " \"regions\": [\n", + " \"Great_Lakes\",\n", + " \"Midsouth\",\n", + " \"Northeast\",\n", + " \"Northern_New_England\",\n", + " \"SouthCentral\",\n", + " \"Southeast\",\n", + " \"West\",\n", + " \"Plains\",\n", + " ],\n", + " \"total_amount_of_supply\": 30,\n", + " \"cost_per_wasted_product\": 0.1,\n", + " \"peak\": 1,\n", + " \"transport_costs\": {\n", + " \"Great_Lakes\": 0.3,\n", + " \"Midsouth\": 0.1,\n", + " \"Northeast\": 0.4,\n", + " \"Northern_New_England\": 0.5,\n", + " \"SouthCentral\": 0.3,\n", + " \"Southeast\": 0.2,\n", + " \"West\": 0.2,\n", + " \"Plains\": 0.2,\n", + " },\n", + " \"year\": 2022,\n", + " \"minimum_product_price\": 0,\n", + " \"maximum_product_price\": 2,\n", + " \"minimum_product_allocations\": dict(\n", + " df.groupby(\"region\")[\"units_sold\"].min()\n", + " ),\n", + " \"maximum_product_allocations\": dict(\n", + " df.groupby(\"region\")[\"units_sold\"].max()\n", + " ),\n", + " \"coefficients\": coef_dict.get(\"solution\"),\n", + " }\n", + "\n", + " return data\n", + "\n", + " @app(\n", + " app_id=\"avocado-price-optimizer\",\n", + " instance_id=\"staging\",\n", + " parameters={\"supply\": options.supply},\n", + " )\n", + " @needs(predecessors=[join])\n", + " @step\n", + " def optimize():\n", + " \"\"\"Optimizes the price and supply of avocados per region.\"\"\"\n", + " pass\n", + "\n", + " @needs(predecessors=[optimize])\n", + " @step\n", + " def postprocess(result: dict):\n", + " \"\"\"Postprocesses the results.\"\"\"\n", + " tabular = pd.DataFrame(result.get(\"solution\"))\n", + " output = nextmv.Output(\n", + " output_format=nextmv.OutputFormat.CSV_ARCHIVE,\n", + " options=result.get(\"solution\", {}).get(\"options\"),\n", + " solution={\"solution\": tabular.to_dict(orient=\"records\")},\n", + " statistics=result.get(\"statistics\", {}),\n", + " assets=result.get(\"assets\", []),\n", + " )\n", + " return output\n", + "\n", + "\n", + "def main() -> None:\n", + " \"\"\"Main entrypoint for the program.\"\"\"\n", + "\n", + " client = cloud.Client(api_key=os.getenv(\"NEXTMV_API_KEY\"))\n", + " flow = Flow(name=\"DecisionFlow\", input=None, conf=None, client=client)\n", + " flow.run()\n", + " output = flow.get_result(flow.postprocess)\n", + " nextmv.write_local(output)\n", + "\n", + "if __name__ == \"__main__\":\n", + " main()" + ] + }, + { + "cell_type": "markdown", + "id": "bqgBCuCUiCb_", + "metadata": { + "id": "bqgBCuCUiCb_" + }, + "source": [ + "Write the `requirements.txt` file to the `workflow` folder." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d-sQSia8V515", + "metadata": { + "id": "d-sQSia8V515" + }, + "outputs": [], + "source": [ + "%%writefile workflow/requirements.txt\n", + "pandas==2.2.2\n", + "nextmv==0.20.1\n", + "nextpipe==0.1.3" + ] + }, + { + "cell_type": "markdown", + "id": "bZAHiYHivmy9", + "metadata": { + "id": "bZAHiYHivmy9" + }, + "source": [ + "Lastly, and similar to the previous applications, we create our `workflow` Nextmv Application. For this application, we are also:\n", + "\n", + "* Creating a secrets collection with the value of our `NEXTMV_API_KEY`\n", + "* Cutting a version of our executable\n", + "* Creating a managed instance to make runs against\n", + "* Configuring the instance with to the newly cut version and secrets collection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "P6i5zq16il2s", + "metadata": { + "id": "P6i5zq16il2s" + }, + "outputs": [], + "source": [ + "workflow_app_name = \"workflow\"\n", + "if cloud.Application.exists(client, id=workflow_app_name):\n", + " workflow_app = cloud.Application(client=client, id=workflow_app_name)\n", + "else:\n", + " workflow_app = cloud.Application.new(client=client, id=workflow_app_name, name=workflow_app_name, is_workflow=True)\n", + "\n", + "# This manifest ends up as an `app.yaml` file in the app.\n", + "manifest = cloud.Manifest(\n", + " type=cloud.ManifestType.PYTHON,\n", + " runtime=cloud.ManifestRuntime.PYTHON,\n", + " files=[\"main.py\"],\n", + " python=cloud.ManifestPython(\n", + " pip_requirements=\"requirements.txt\"\n", + " )\n", + ")\n", + "\n", + "workflow_app.push(\n", + " manifest=manifest,\n", + " app_dir=\"workflow\",\n", + " verbose=True,\n", + ")\n", + "\n", + "secrets = workflow_app.new_secrets_collection(\n", + " secrets=[\n", + " cloud.Secret(\n", + " secret_type=cloud.SecretType.ENV,\n", + " location=\"NEXTMV_API_KEY\",\n", + " value=userdata.get(\"NEXTMV_API_KEY\"),\n", + " ),\n", + " ],\n", + " id=\"workflow-secrets\",\n", + " name=\"workflow-secrets\",\n", + ")\n", + "\n", + "version = str(uuid.uuid4())\n", + "workflow_app.new_version(id=version, name=version)\n", + "workflow_app.new_instance(\n", + " version_id=version,\n", + " id=\"staging\",\n", + " name=\"staging\",\n", + " configuration=cloud.InstanceConfiguration(\n", + " secrets_collection_id=\"workflow-secrets\"\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "nIEWuV9bqAlz", + "metadata": { + "id": "nIEWuV9bqAlz" + }, + "source": [ + "Congrats, you're all done 🚀!\n", + "\n", + "You now have 3 apps deployed in your Nextmv Account, go [here](https://cloud.nextmv.io/) to experiment!" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 340585ddbaa2187224941be074ffc64483a6c229 Mon Sep 17 00:00:00 2001 From: Marius Merschformann Date: Fri, 4 Apr 2025 23:47:31 +0200 Subject: [PATCH 2/2] Ignore linting for colab notebook --- ruff.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ruff.toml b/ruff.toml index 46fb29fd..3bcbfd55 100644 --- a/ruff.toml +++ b/ruff.toml @@ -11,6 +11,12 @@ lint.select = [ "UP", # pyupgrade ] +# Ignore specific directories +exclude = [ + "python-gurobi-price-optimization", +] + + # Rule configuration. line-length = 120