diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/digit_identification.py b/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/digit_identification.py index ff187551a..280f77cc1 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/digit_identification.py +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/digit_identification.py @@ -11,14 +11,17 @@ def init(): global g_tf_sess + + # Disable eager execution + tf.compat.v1.disable_eager_execution() # pull down model from workspace model_path = Model.get_model_path("mnist-prs") # contruct graph to execute - tf.reset_default_graph() - saver = tf.train.import_meta_graph(os.path.join(model_path, 'mnist-tf.model.meta')) - g_tf_sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0})) + tf.compat.v1.reset_default_graph() + saver = tf.compat.v1.train.import_meta_graph(os.path.join(model_path, 'mnist-tf.model.meta')) + g_tf_sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(device_count={'GPU': 0})) saver.restore(g_tf_sess, os.path.join(model_path, 'mnist-tf.model')) @@ -33,7 +36,7 @@ def run(mini_batch): data = Image.open(image) np_im = np.array(data).reshape((1, 784)) # perform inference - inference_result = output.eval(feed_dict={in_tensor: np_im}, session=g_tf_sess) + inference_result = g_tf_sess.run(output, feed_dict={in_tensor: np_im}) # find best probability, and add to result list best_result = np.argmax(inference_result) resultList.append("{}: {}".format(os.path.basename(image), best_result)) diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/iris_score.py b/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/iris_score.py index 5b1b89c05..6dc0c7cad 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/iris_score.py +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/Code/iris_score.py @@ -1,10 +1,6 @@ -import io import pickle import argparse -import numpy as np - from azureml.core.model import Model -from sklearn.linear_model import LogisticRegression from azureml_user.parallel_run import EntryScript diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb index 267d97268..42142b165 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb @@ -306,7 +306,7 @@ "#### An entry script\n", "This script accepts requests, scores the requests by using the model, and returns the results.\n", "- __init()__ - Typically this function loads the model into a global object. This function is run only once at the start of batch processing per worker node/process. Init method can make use of following environment variables (ParallelRunStep input):\n", - " 1.\tAZUREML_BI_OUTPUT_PATH \u00e2\u20ac\u201c output folder path\n", + " 1.\tAZUREML_BI_OUTPUT_PATH - output folder path\n", "- __run(mini_batch)__ - The method to be parallelized. Each invocation will have one minibatch.
\n", "__mini_batch__: Batch inference will invoke run method and pass either a list or Pandas DataFrame as an argument to the method. Each entry in min_batch will be - a filepath if input is a FileDataset, a Pandas DataFrame if input is a TabularDataset.
\n", "__run__ method response: run() method should return a Pandas DataFrame or an array. For append_row output_action, these returned elements are appended into the common output file. For summary_only, the contents of the elements are ignored. For all output actions, each returned output element indicates one successful inference of input element in the input mini-batch.\n", @@ -359,9 +359,9 @@ "from azureml.core import Environment\n", "from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n", "\n", - "batch_conda_deps = CondaDependencies.create(python_version=\"3.7\",\n", + "batch_conda_deps = CondaDependencies.create(python_version=\"3.8\",\n", " conda_packages=['pip==20.2.4'],\n", - " pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \"protobuf==3.20.1\",\n", + " pip_packages=[\"tensorflow==2.13.0\", \"pillow\", \"protobuf==4.23.3\",\n", " \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n", "batch_env = Environment(name=\"batch_environment\")\n", "batch_env.python.conda_dependencies = batch_conda_deps\n", @@ -615,7 +615,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.9" + "version": "3.8.16" }, "tags": [ "Batch Inferencing", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb index 98922dc82..cc793a0d8 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb @@ -390,7 +390,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.9" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb index 103fd61a8..bafa19e52 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb @@ -252,7 +252,7 @@ "#### An entry script\n", "This script accepts requests, scores the requests by using the model, and returns the results.\n", "- __init()__ - Typically this function loads the model into a global object. This function is run only once at the start of batch processing per worker node/process. init method can make use of following environment variables (ParallelRunStep input):\n", - " 1.\tAZUREML_BI_OUTPUT_PATH \u00e2\u20ac\u201c output folder path\n", + " 1.\tAZUREML_BI_OUTPUT_PATH - output folder path\n", "- __run(mini_batch)__ - The method to be parallelized. Each invocation will have one minibatch.
\n", "__mini_batch__: Batch inference will invoke run method and pass either a list or Pandas DataFrame as an argument to the method. Each entry in min_batch will be - a filepath if input is a FileDataset, a Pandas DataFrame if input is a TabularDataset.
\n", "__run__ method response: run() method should return a Pandas DataFrame or an array. For append_row output_action, these returned elements are appended into the common output file. For summary_only, the contents of the elements are ignored. For all output actions, each returned output element indicates one successful inference of input element in the input mini-batch.\n", @@ -308,10 +308,10 @@ "from azureml.core import Environment\n", "from azureml.core.runconfig import CondaDependencies\n", "\n", - "predict_conda_deps = CondaDependencies.create(python_version=\"3.7\", \n", + "predict_conda_deps = CondaDependencies.create(python_version=\"3.8\", \n", " conda_packages=['pip==20.2.4'],\n", - " pip_packages=[\"scikit-learn==0.20.3\",\n", - " \"azureml-core\", \"azureml-dataset-runtime[pandas,fuse]\"])\n", + " pip_packages=[\"numpy==1.19.5\", \"pandas==1.4.4\", \"scikit-learn==0.22.2\",\n", + " \"azureml-core\", \"azureml-dataset-runtime[fuse]\"])\n", "\n", "predict_env = Environment(name=\"predict_environment\")\n", "predict_env.python.conda_dependencies = predict_conda_deps\n", @@ -531,4 +531,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb index 79e355122..224f3d812 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb @@ -413,9 +413,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.8.16" } }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +}