This repository was archived by the owner on Dec 5, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathscore.py
62 lines (44 loc) · 2.09 KB
/
score.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# This script generates the scoring and schema files
# necessary to operationalize your model
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
from keras.models import load_model
import numpy as np
import os
import base64
import json
import glob
# Prepare the web service definition by authoring
# init() and run() functions. Test the functions
# before deploying the web service.
def init():
#deploying
global model
model = load_model("outputs/my_model.h5")
def run(input_bytes):
input_bytes = base64.b64decode(bytes(input_bytes,'utf-8'))
img = np.loads(input_bytes)
prediction = model.predict(x=img)
index = np.argmax(prediction)
outDict = {}
outDict["index"] = str(index)
outJsonString = json.dumps(outDict)
return (str(outJsonString))
def generate_api_schema():
import os
print("create schema")
sample_input = "byestring_representing_image"
inputs = {"input_bytes": SampleDefinition(DataTypes.STANDARD, sample_input)}
os.makedirs('outputs', exist_ok=True)
print(generate_schema(inputs=inputs, filepath=os.path.join("outputs","schema.json"), run_func=run))
# Implement test code to run in IDE or Azure ML Workbench
if __name__ == '__main__':
# Generate API schema
generate_api_schema()
# If you needed the model (large file) in a Workbench run, you would load it from shared directory.
# Files in the outputs folder are not available in the next run, the outputs folder is attached to its own run.
# However, in this case we don't need the model file in Workbench runs. The workbench runs are only there to create the schema.json file.
#When deploying the service...
# In the CLI, we must pass the model file as an argument when creating the service image. This model file will be available in outputs/ folder after returning it.
# This model file will be copied into the container image with the same path name. So in the service, you can also load it from outputs/<filename>.h5, see init().