diff --git a/sdk/batch/azure-batch/MANIFEST.in b/sdk/batch/azure-batch/MANIFEST.in index b722c082fa15..cb1e2b1128cb 100644 --- a/sdk/batch/azure-batch/MANIFEST.in +++ b/sdk/batch/azure-batch/MANIFEST.in @@ -1,5 +1,6 @@ include *.md include LICENSE -include azure/__init__.py -recursive-include tests *.py include azure/batch/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py \ No newline at end of file diff --git a/sdk/batch/azure-batch/_meta.json b/sdk/batch/azure-batch/_meta.json new file mode 100644 index 000000000000..f19c0a56cdec --- /dev/null +++ b/sdk/batch/azure-batch/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "e8136c11848f05e79597bab310539c506b4af9df", + "repository_url": "https://github.com/test-repo-billy/azure-rest-api-specs", + "typespec_src": "specification/batch/Azure.Batch", + "@azure-tools/typespec-python": "0.29.0" +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/azure/__init__.py b/sdk/batch/azure-batch/azure/__init__.py index 69e3be50dac4..d55ccad1f573 100644 --- a/sdk/batch/azure-batch/azure/__init__.py +++ b/sdk/batch/azure-batch/azure/__init__.py @@ -1 +1 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/batch/azure-batch/azure/batch/__init__.py b/sdk/batch/azure-batch/azure/batch/__init__.py index 78e5cafcdeff..13f52e134024 100644 --- a/sdk/batch/azure-batch/azure/batch/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/__init__.py @@ -1,19 +1,26 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._configuration import BatchServiceClientConfiguration -from ._batch_service_client import BatchServiceClient -__all__ = ['BatchServiceClient', 'BatchServiceClientConfiguration'] - +from ._client import BatchClient from ._version import VERSION __version__ = VERSION +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "BatchClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/_batch_service_client.py b/sdk/batch/azure-batch/azure/batch/_batch_service_client.py deleted file mode 100644 index 794335ffedad..000000000000 --- a/sdk/batch/azure-batch/azure/batch/_batch_service_client.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.service_client import SDKClient -from msrest import Serializer, Deserializer - -from ._configuration import BatchServiceClientConfiguration -from .operations import ApplicationOperations -from .operations import PoolOperations -from .operations import AccountOperations -from .operations import CertificateOperations -from .operations import FileOperations -from .operations import JobScheduleOperations -from .operations import JobOperations -from .operations import TaskOperations -from .operations import ComputeNodeOperations -from .operations import ComputeNodeExtensionOperations -from . import models -from .custom.patch import patch_client - -class BatchServiceClient(SDKClient): - """A client for issuing REST requests to the Azure Batch service. - - :ivar config: Configuration for client. - :vartype config: BatchServiceClientConfiguration - - :ivar application: Application operations - :vartype application: azure.batch.operations.ApplicationOperations - :ivar pool: Pool operations - :vartype pool: azure.batch.operations.PoolOperations - :ivar account: Account operations - :vartype account: azure.batch.operations.AccountOperations - :ivar certificate: Certificate operations - :vartype certificate: azure.batch.operations.CertificateOperations - :ivar file: File operations - :vartype file: azure.batch.operations.FileOperations - :ivar job_schedule: JobSchedule operations - :vartype job_schedule: azure.batch.operations.JobScheduleOperations - :ivar job: Job operations - :vartype job: azure.batch.operations.JobOperations - :ivar task: Task operations - :vartype task: azure.batch.operations.TaskOperations - :ivar compute_node: ComputeNode operations - :vartype compute_node: azure.batch.operations.ComputeNodeOperations - :ivar compute_node_extension: ComputeNodeExtension operations - :vartype compute_node_extension: azure.batch.operations.ComputeNodeExtensionOperations - - :param credentials: Credentials needed for the client to connect to Azure. - :type credentials: :mod:`A msrestazure Credentials - object` - :param batch_url: The base URL for all Azure Batch service requests. - :type batch_url: str - """ - - def __init__( - self, credentials, batch_url): - - self.config = BatchServiceClientConfiguration(credentials, batch_url) - super(BatchServiceClient, self).__init__(self.config.credentials, self.config) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2024-02-01.19.0' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.application = ApplicationOperations( - self._client, self.config, self._serialize, self._deserialize) - self.pool = PoolOperations( - self._client, self.config, self._serialize, self._deserialize) - self.account = AccountOperations( - self._client, self.config, self._serialize, self._deserialize) - self.certificate = CertificateOperations( - self._client, self.config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self.config, self._serialize, self._deserialize) - self.job_schedule = JobScheduleOperations( - self._client, self.config, self._serialize, self._deserialize) - self.job = JobOperations( - self._client, self.config, self._serialize, self._deserialize) - self.task = TaskOperations( - self._client, self.config, self._serialize, self._deserialize) - self.compute_node = ComputeNodeOperations( - self._client, self.config, self._serialize, self._deserialize) - self.compute_node_extension = ComputeNodeExtensionOperations( - self._client, self.config, self._serialize, self._deserialize) - -patch_client() diff --git a/sdk/batch/azure-batch/azure/batch/_client.py b/sdk/batch/azure-batch/azure/batch/_client.py new file mode 100644 index 000000000000..d251ec03ac97 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_client.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import BatchClientConfiguration +from ._operations import BatchClientOperationsMixin +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class BatchClient(BatchClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """BatchClient. + + :param endpoint: Batch account endpoint (for example: + https://batchaccount.eastus2.batch.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = BatchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + kwargs["request_id_header_name"] = "client-request-id" + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/batch/azure-batch/azure/batch/_configuration.py b/sdk/batch/azure-batch/azure/batch/_configuration.py index 2ea006b67cb6..cbf6f1d338ef 100644 --- a/sdk/batch/azure-batch/azure/batch/_configuration.py +++ b/sdk/batch/azure-batch/azure/batch/_configuration.py @@ -1,46 +1,65 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from msrestazure import AzureConfiguration + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies from ._version import VERSION +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class BatchClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for BatchClient. -class BatchServiceClientConfiguration(AzureConfiguration): - """Configuration for BatchServiceClient Note that all parameters used to create this instance are saved as instance attributes. - :param credentials: Credentials needed for the client to connect to Azure. - :type credentials: :mod:`A msrestazure Credentials - object` - :param batch_url: The base URL for all Azure Batch service requests. - :type batch_url: str + :param endpoint: Batch account endpoint (for example: + https://batchaccount.eastus2.batch.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str """ - def __init__( - self, credentials, batch_url): - - if credentials is None: - raise ValueError("Parameter 'credentials' must not be None.") - if batch_url is None: - raise ValueError("Parameter 'batch_url' must not be None.") - base_url = '{batchUrl}' - - super(BatchServiceClientConfiguration, self).__init__(base_url) + def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-02-01.19.0") - # Starting Autorest.Python 4.0.64, make connection pool activated by default - self.keep_alive = True + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") - self.add_user_agent('azure-batch/{}'.format(VERSION)) - self.add_user_agent('Azure-SDK-For-Python') + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://batch.core.windows.net//.default"]) + kwargs.setdefault("sdk_moniker", "batch/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) - self.credentials = credentials - self.batch_url = batch_url + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/batch/azure-batch/azure/batch/_model_base.py b/sdk/batch/azure-batch/azure/batch/_model_base.py new file mode 100644 index 000000000000..c4b1008c1e85 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_model_base.py @@ -0,0 +1,895 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] # pylint: disable=protected-access + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # pylint: disable=unused-argument + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") # pylint: disable=no-member + for k, v in mro_class.__annotations__.items() # pylint: disable=no-member + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): # pylint: disable=no-member + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional[str]: + for v in cls.__dict__.values(): + if ( + isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators + ): # pylint: disable=protected-access + return v._rest_name # pylint: disable=protected-access + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): # pylint: disable=no-member + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + exist_discriminators.append(discriminator) + mapped_cls = cls.__mapping__.get(data.get(discriminator), cls) # pyright: ignore # pylint: disable=no-member + if mapped_cls == cls: + return cls(data) + return mapped_cls._deserialize(data, exist_discriminators) # pylint: disable=protected-access + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be JSONify using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation or annotation in [int, float]: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): + try: + if value is None or isinstance(value, _Null): + return None + if deserializer is None: + return value + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility) diff --git a/sdk/batch/azure-batch/azure/batch/_operations/__init__.py b/sdk/batch/azure-batch/azure/batch/_operations/__init__.py new file mode 100644 index 000000000000..7897453a2980 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import BatchClientOperationsMixin + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "BatchClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py new file mode 100644 index 000000000000..d3651df3b52a --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py @@ -0,0 +1,10422 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +import json +import sys +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Type, TypeVar +import urllib.parse + +from azure.core import MatchConditions +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceModifiedError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import BatchClientMixinABC, prep_if_match, prep_if_none_match + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_batch_list_applications_request( + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/applications" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_application_request( + application_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/applications/{applicationId}" + path_format_arguments = { + "applicationId": _SERIALIZER.url("application_id", application_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-long + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + starttime: Optional[datetime.datetime] = None, + endtime: Optional[datetime.datetime] = None, + filter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/poolusagemetrics" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if starttime is not None: + _params["startTime"] = _SERIALIZER.query("starttime", starttime, "iso-8601") + if endtime is not None: + _params["endtime"] = _SERIALIZER.query("endtime", endtime, "iso-8601") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_create_pool_request( + *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_pools_request( + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_pool_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_pool_exists_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_pool_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_update_pool_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-long + pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/disableautoscale" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/enableautoscale" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-long + pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/evaluateautoscale" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_resize_pool_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/resize" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_stop_pool_resize_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/stopresize" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_replace_pool_properties_request( # pylint: disable=name-too-long + pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/updateproperties" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_remove_nodes_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/removenodes" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_supported_images_request( # pylint: disable=name-too-long + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/supportedimages" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/nodecounts" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_update_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_replace_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_disable_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/disable" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_enable_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/enable" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_terminate_job_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/terminate" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_create_job_request( + *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_jobs_request( + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-long + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}/jobs" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_job_preparation_and_release_task_status_request( # pylint: disable=name-too-long + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/jobpreparationandreleasetaskstatus" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_job_task_counts_request( + job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/taskcounts" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_job_schedule_exists_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_job_schedule_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_job_schedule_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_update_job_schedule_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_replace_job_schedule_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_disable_job_schedule_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}/disable" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_enable_job_schedule_request( + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}/enable" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_terminate_job_schedule_request( # pylint: disable=name-too-long + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules/{jobScheduleId}/terminate" + path_format_arguments = { + "jobScheduleId": _SERIALIZER.url("job_schedule_id", job_schedule_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_create_job_schedule_request( + *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_job_schedules_request( + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobschedules" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_create_task_request( + job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_tasks_request( + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_create_task_collection_request( # pylint: disable=name-too-long + job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/addtaskcollection" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_task_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_task_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if expand is not None: + _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_replace_task_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_sub_tasks_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/subtasksinfo" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_terminate_task_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/terminate" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_reactivate_task_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/reactivate" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_task_file_request( + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + recursive: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/files/{filePath}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + "filePath": _SERIALIZER.url("file_path", file_path, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if recursive is not None: + _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_task_file_request( + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + ocp_range: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/octet-stream") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/files/{filePath}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + "filePath": _SERIALIZER.url("file_path", file_path, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if ocp_range is not None: + _headers["ocp-range"] = _SERIALIZER.header("ocp_range", ocp_range, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_task_file_properties_request( # pylint: disable=name-too-long + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/files/{filePath}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + "filePath": _SERIALIZER.url("file_path", file_path, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_task_files_request( + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/jobs/{jobId}/tasks/{taskId}/files" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + "taskId": _SERIALIZER.url("task_id", task_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if recursive is not None: + _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_create_node_user_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/users" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_node_user_request( + pool_id: str, + node_id: str, + user_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/users/{userName}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + "userName": _SERIALIZER.url("user_name", user_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_replace_node_user_request( + pool_id: str, + node_id: str, + user_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/users/{userName}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + "userName": _SERIALIZER.url("user_name", user_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_node_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_reboot_node_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/reboot" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_disable_node_scheduling_request( # pylint: disable=name-too-long + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/disablescheduling" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_enable_node_scheduling_request( # pylint: disable=name-too-long + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/enablescheduling" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_node_remote_login_settings_request( # pylint: disable=name-too-long + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/remoteloginsettings" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_upload_node_logs_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_nodes_request( + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_node_extension_request( + pool_id: str, + node_id: str, + extension_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + "extensionName": _SERIALIZER.url("extension_name", extension_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_node_extensions_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + select: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/extensions" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_delete_node_file_request( + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + recursive: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/files/{filePath}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + "filePath": _SERIALIZER.url("file_path", file_path, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if recursive is not None: + _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_node_file_request( + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + ocp_range: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/octet-stream") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/files/{filePath}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + "filePath": _SERIALIZER.url("file_path", file_path, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if ocp_range is not None: + _headers["ocp-range"] = _SERIALIZER.header("ocp_range", ocp_range, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_get_node_file_properties_request( # pylint: disable=name-too-long + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/files/{filePath}" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + "filePath": _SERIALIZER.url("file_path", file_path, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_list_node_files_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/files" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") + if recursive is not None: + _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-many-public-methods + + @distributed_trace + def list_applications( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + **kwargs: Any + ) -> Iterable["_models.BatchApplication"]: + """Lists all of the applications available in the specified Account. + + This operation returns only Applications and versions that are available for + use on Compute Nodes; that is, that can be used in an Package reference. For + administrator information about applications and versions that are not yet + available to Compute Nodes, use the Azure portal or the Azure Resource Manager + API. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :return: An iterator like instance of BatchApplication + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchApplication] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchApplication]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_applications_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get_application( + self, + application_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.BatchApplication: + """Gets information about the specified Application. + + This operation returns only Applications and versions that are available for + use on Compute Nodes; that is, that can be used in an Package reference. For + administrator information about Applications and versions that are not yet + available to Compute Nodes, use the Azure portal or the Azure Resource Manager + API. + + :param application_id: The ID of the Application. Required. + :type application_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchApplication. The BatchApplication is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchApplication + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchApplication] = kwargs.pop("cls", None) + + _request = build_batch_get_application_request( + application_id=application_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchApplication, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_pool_usage_metrics( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + starttime: Optional[datetime.datetime] = None, + endtime: Optional[datetime.datetime] = None, + filter: Optional[str] = None, + **kwargs: Any + ) -> Iterable["_models.BatchPoolUsageMetrics"]: + """Lists the usage metrics, aggregated by Pool across individual time intervals, + for the specified Account. + + If you do not specify a $filter clause including a poolId, the response + includes all Pools that existed in the Account in the time range of the + returned aggregation intervals. If you do not specify a $filter clause + including a startTime or endTime these filters default to the start and end + times of the last aggregation interval currently available; that is, only the + last aggregation interval is returned. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword starttime: The earliest time from which to include metrics. This must be at least two + and + a half hours before the current time. If not specified this defaults to the + start time of the last aggregation interval currently available. Default value is None. + :paramtype starttime: ~datetime.datetime + :keyword endtime: The latest time from which to include metrics. This must be at least two + hours + before the current time. If not specified this defaults to the end time of the + last aggregation interval currently available. Default value is None. + :paramtype endtime: ~datetime.datetime + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + Default value is None. + :paramtype filter: str + :return: An iterator like instance of BatchPoolUsageMetrics + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchPoolUsageMetrics] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchPoolUsageMetrics]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_pool_usage_metrics_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + starttime=starttime, + endtime=endtime, + filter=filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def create_pool( # pylint: disable=inconsistent-return-statements + self, + pool: _models.BatchPoolCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Pool to the specified Account. + + When naming Pools, avoid including sensitive information such as user names or + secret project names. This information may appear in telemetry logs accessible + to Microsoft Support engineers. + + :param pool: The Pool to be created. Required. + :type pool: ~azure.batch.models.BatchPoolCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_pool_request( + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_pools( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchPool"]: + """Lists all of the Pools in the specified Account. + + Lists all of the Pools in the specified Account. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchPool + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchPool]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_pools_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete_pool( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Pool from the specified Account. + + When you request that a Pool be deleted, the following actions occur: the Pool + state is set to deleting; any ongoing resize operation on the Pool are stopped; + the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks + running on existing Compute Nodes are terminated and requeued (as if a resize + Pool operation had been requested with the default requeue option); finally, + the Pool is removed from the system. Because running Tasks are requeued, the + user can rerun these Tasks by updating their Job to target a different Pool. + The Tasks can then run on the new Pool. If you want to override the requeue + behavior, then you should call resize Pool explicitly to shrink the Pool to + zero size before deleting the Pool. If you call an Update, Patch or Delete API + on a Pool in the deleting state, it will fail with HTTP status code 409 with + error code PoolBeingDeleted. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def pool_exists( + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> bool: + """Gets basic properties of a Pool. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_pool_exists_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace + def get_pool( + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchPool: + """Gets information about the specified Pool. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchPool. The BatchPool is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchPool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchPool] = kwargs.pop("cls", None) + + _request = build_batch_get_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchPool, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def update_pool( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + pool: _models.BatchPoolUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Pool. + + This only replaces the Pool properties specified in the request. For example, + if the Pool has a StartTask associated with it, and a request does not specify + a StartTask element, then the Pool keeps the existing StartTask. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param pool: The pool properties to update. Required. + :type pool: ~azure.batch.models.BatchPoolUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_update_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements + self, pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + ) -> None: + """Disables automatic scaling for a Pool. + + Disables automatic scaling for a Pool. + + :param pool_id: The ID of the Pool on which to disable automatic scaling. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_disable_pool_auto_scale_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + content: _models.BatchPoolEnableAutoScaleContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Enables automatic scaling for a Pool. + + You cannot enable automatic scaling on a Pool if a resize operation is in + progress on the Pool. If automatic scaling of the Pool is currently disabled, + you must specify a valid autoscale formula as part of the request. If automatic + scaling of the Pool is already enabled, you may specify a new autoscale formula + and/or a new evaluation interval. You cannot call this API for the same Pool + more than once every 30 seconds. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param content: The options to use for enabling automatic scaling. Required. + :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_enable_pool_auto_scale_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def evaluate_pool_auto_scale( + self, + pool_id: str, + content: _models.BatchPoolEvaluateAutoScaleContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.AutoScaleRun: + """Gets the result of evaluating an automatic scaling formula on the Pool. + + This API is primarily for validating an autoscale formula, as it simply returns + the result without applying the formula to the Pool. The Pool must have auto + scaling enabled in order to evaluate a formula. + + :param pool_id: The ID of the Pool on which to evaluate the automatic scaling formula. + Required. + :type pool_id: str + :param content: The options to use for evaluating the automatic scaling formula. Required. + :type content: ~azure.batch.models.BatchPoolEvaluateAutoScaleContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: AutoScaleRun. The AutoScaleRun is compatible with MutableMapping + :rtype: ~azure.batch.models.AutoScaleRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[_models.AutoScaleRun] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_evaluate_pool_auto_scale_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AutoScaleRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def resize_pool( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + content: _models.BatchPoolResizeContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Changes the number of Compute Nodes that are assigned to a Pool. + + You can only resize a Pool when its allocation state is steady. If the Pool is + already resizing, the request fails with status code 409. When you resize a + Pool, the Pool's allocation state changes from steady to resizing. You cannot + resize Pools which are configured for automatic scaling. If you try to do this, + the Batch service returns an error 409. If you resize a Pool downwards, the + Batch service chooses which Compute Nodes to remove. To remove specific Compute + Nodes, use the Pool remove Compute Nodes API instead. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param content: The options to use for resizing the pool. Required. + :type content: ~azure.batch.models.BatchPoolResizeContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_resize_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def stop_pool_resize( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Stops an ongoing resize operation on the Pool. + + This does not restore the Pool to its previous state before the resize + operation: it only stops any further changes being made, and the Pool maintains + its current state. After stopping, the Pool stabilizes at the number of Compute + Nodes it was at when the stop operation was done. During the stop operation, + the Pool allocation state changes first to stopping and then to steady. A + resize operation need not be an explicit resize Pool request; this API can also + be used to halt the initial sizing of the Pool when it is created. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_stop_pool_resize_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def replace_pool_properties( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + pool: _models.BatchPoolReplaceContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Pool. + + This fully replaces all the updatable properties of the Pool. For example, if + the Pool has a StartTask associated with it and if StartTask is not specified + with this request, then the Batch service will remove the existing StartTask. + + :param pool_id: The ID of the Pool to update. Required. + :type pool_id: str + :param pool: The options to use for replacing properties on the pool. Required. + :type pool: ~azure.batch.models.BatchPoolReplaceContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_pool_properties_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def remove_nodes( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + content: _models.BatchNodeRemoveContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Removes Compute Nodes from the specified Pool. + + This operation can only run when the allocation state of the Pool is steady. + When this operation runs, the allocation state changes from steady to resizing. + Each request may remove up to 100 nodes. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param content: The options to use for removing the node. Required. + :type content: ~azure.batch.models.BatchNodeRemoveContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_remove_nodes_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_supported_images( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + **kwargs: Any + ) -> Iterable["_models.BatchSupportedImage"]: + """Lists all Virtual Machine Images supported by the Azure Batch service. + + Lists all Virtual Machine Images supported by the Azure Batch service. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + Default value is None. + :paramtype filter: str + :return: An iterator like instance of BatchSupportedImage + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchSupportedImage] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchSupportedImage]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_supported_images_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_pool_node_counts( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + **kwargs: Any + ) -> Iterable["_models.BatchPoolNodeCounts"]: + """Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + numbers returned may not always be up to date. If you need exact node counts, + use a list query. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + Default value is None. + :paramtype filter: str + :return: An iterator like instance of BatchPoolNodeCounts + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchPoolNodeCounts] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchPoolNodeCounts]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_pool_node_counts_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Job. + + Deleting a Job also deletes all Tasks that are part of that Job, and all Job + statistics. This also overrides the retention period for Task data; that is, if + the Job contains Tasks which are still retained on Compute Nodes, the Batch + services deletes those Tasks' working directories and all their contents. When + a Delete Job request is received, the Batch service sets the Job to the + deleting state. All update operations on a Job that is in deleting state will + fail with status code 409 (Conflict), with additional information indicating + that the Job is being deleted. + + :param job_id: The ID of the Job to delete. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_job( + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchJob: + """Gets information about the specified Job. + + Gets information about the specified Job. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchJob. The BatchJob is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchJob + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchJob] = kwargs.pop("cls", None) + + _request = build_batch_get_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchJob, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def update_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + job: _models.BatchJobUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job. + + This replaces only the Job properties specified in the request. For example, if + the Job has constraints, and a request does not specify the constraints + element, then the Job keeps the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. Required. + :type job_id: str + :param job: The options to use for updating the Job. Required. + :type job: ~azure.batch.models.BatchJobUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_update_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def replace_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + job: _models.BatchJob, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job. + + This fully replaces all the updatable properties of the Job. For example, if + the Job has constraints associated with it and if constraints is not specified + with this request, then the Batch service will remove the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. Required. + :type job_id: str + :param job: A job with updated properties. Required. + :type job: ~azure.batch.models.BatchJob + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def disable_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + content: _models.BatchJobDisableContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Disables the specified Job, preventing new Tasks from running. + + The Batch Service immediately moves the Job to the disabling state. Batch then + uses the disableTasks parameter to determine what to do with the currently + running Tasks of the Job. The Job remains in the disabling state until the + disable operation is completed and all Tasks have been dealt with according to + the disableTasks option; the Job then moves to the disabled state. No new Tasks + are started under the Job until it moves back to active state. If you try to + disable a Job that is in any state other than active, disabling, or disabled, + the request fails with status code 409. + + :param job_id: The ID of the Job to disable. Required. + :type job_id: str + :param content: The options to use for disabling the Job. Required. + :type content: ~azure.batch.models.BatchJobDisableContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_disable_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def enable_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Enables the specified Job, allowing new Tasks to run. + + When you call this API, the Batch service sets a disabled Job to the enabling + state. After the this operation is completed, the Job moves to the active + state, and scheduling of new Tasks under the Job resumes. The Batch service + does not allow a Task to remain in the active state for more than 180 days. + Therefore, if you enable a Job containing active Tasks which were added more + than 180 days ago, those Tasks will not run. + + :param job_id: The ID of the Job to enable. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_enable_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def terminate_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + parameters: Optional[_models.BatchJobTerminateContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Terminates the specified Job, marking it as completed. + + When a Terminate Job request is received, the Batch service sets the Job to the + terminating state. The Batch service then terminates any running Tasks + associated with the Job and runs any required Job release Tasks. Then the Job + moves into the completed state. If there are any Tasks in the Job in the active + state, they will remain in the active state. Once a Job is terminated, new + Tasks cannot be added and any remaining active Tasks will not be scheduled. + + :param job_id: The ID of the Job to terminate. Required. + :type job_id: str + :param parameters: The options to use for terminating the Job. Default value is None. + :type parameters: ~azure.batch.models.BatchJobTerminateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_terminate_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def create_job( # pylint: disable=inconsistent-return-statements + self, + job: _models.BatchJobCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Job to the specified Account. + + The Batch service supports two ways to control the work done as part of a Job. + In the first approach, the user specifies a Job Manager Task. The Batch service + launches this Task when it is ready to start the Job. The Job Manager Task + controls all other Tasks that run under this Job, by using the Task APIs. In + the second approach, the user directly controls the execution of Tasks under an + active Job, by using the Task APIs. Also note: when naming Jobs, avoid + including sensitive information such as user names or secret project names. + This information may appear in telemetry logs accessible to Microsoft Support + engineers. + + :param job: The Job to be created. Required. + :type job: ~azure.batch.models.BatchJobCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_job_request( + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_jobs( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchJob"]: + """Lists all of the Jobs in the specified Account. + + Lists all of the Jobs in the specified Account. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchJob + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJob] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_jobs_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_jobs_from_schedule( + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchJob"]: + """Lists the Jobs that have been created under the specified Job Schedule. + + Lists the Jobs that have been created under the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule from which you want to get a list of Jobs. + Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchJob + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJob] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_jobs_from_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_job_preparation_and_release_task_status( # pylint: disable=name-too-long + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchJobPreparationAndReleaseTaskStatus"]: + """Lists the execution status of the Job Preparation and Job Release Task for the + specified Job across the Compute Nodes where the Job has run. + + This API returns the Job Preparation and Job Release Task status on all Compute + Nodes that have run the Job Preparation or Job Release Task. This includes + Compute Nodes which have since been removed from the Pool. If this API is + invoked on a Job which has no Job Preparation or Job Release Task, the Batch + service returns HTTP status code 409 (Conflict) with an error code of + JobPreparationTaskNotSpecified. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchJobPreparationAndReleaseTaskStatus + :rtype: + ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJobPreparationAndReleaseTaskStatus] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJobPreparationAndReleaseTaskStatus]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_job_preparation_and_release_task_status_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get_job_task_counts( + self, job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + ) -> _models.BatchTaskCountsResult: + """Gets the Task counts for the specified Job. + + Task counts provide a count of the Tasks by active, running or completed Task + state, and a count of Tasks which succeeded or failed. Tasks in the preparing + state are counted as running. Note that the numbers returned may not always be + up to date. If you need exact task counts, use a list query. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchTaskCountsResult. The BatchTaskCountsResult is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchTaskCountsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchTaskCountsResult] = kwargs.pop("cls", None) + + _request = build_batch_get_job_task_counts_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchTaskCountsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def job_schedule_exists( + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> bool: + """Checks the specified Job Schedule exists. + + Checks the specified Job Schedule exists. + + :param job_schedule_id: The ID of the Job Schedule which you want to check. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_job_schedule_exists_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace + def delete_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Job Schedule from the specified Account. + + When you delete a Job Schedule, this also deletes all Jobs and Tasks under that + schedule. When Tasks are deleted, all the files in their working directories on + the Compute Nodes are also deleted (the retention period is ignored). The Job + Schedule statistics are no longer accessible once the Job Schedule is deleted, + though they are still counted towards Account lifetime statistics. + + :param job_schedule_id: The ID of the Job Schedule to delete. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_job_schedule( + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchJobSchedule: + """Gets information about the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to get. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchJobSchedule. The BatchJobSchedule is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchJobSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchJobSchedule] = kwargs.pop("cls", None) + + _request = build_batch_get_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchJobSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def update_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + job_schedule: _models.BatchJobScheduleUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job Schedule. + + This replaces only the Job Schedule properties specified in the request. For + example, if the schedule property is not specified with this request, then the + Batch service will keep the existing schedule. Changes to a Job Schedule only + impact Jobs created by the schedule after the update has taken place; currently + running Jobs are unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. Required. + :type job_schedule_id: str + :param job_schedule: The options to use for updating the Job Schedule. Required. + :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_update_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def replace_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + job_schedule: _models.BatchJobSchedule, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job Schedule. + + This fully replaces all the updatable properties of the Job Schedule. For + example, if the schedule property is not specified with this request, then the + Batch service will remove the existing schedule. Changes to a Job Schedule only + impact Jobs created by the schedule after the update has taken place; currently + running Jobs are unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. Required. + :type job_schedule_id: str + :param job_schedule: A Job Schedule with updated properties. Required. + :type job_schedule: ~azure.batch.models.BatchJobSchedule + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def disable_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Disables a Job Schedule. + + No new Jobs will be created until the Job Schedule is enabled again. + + :param job_schedule_id: The ID of the Job Schedule to disable. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_disable_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def enable_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Enables a Job Schedule. + + Enables a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to enable. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_enable_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def terminate_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Terminates a Job Schedule. + + Terminates a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to terminates. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_terminate_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def create_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule: _models.BatchJobScheduleCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Job Schedule to the specified Account. + + Creates a Job Schedule to the specified Account. + + :param job_schedule: The Job Schedule to be created. Required. + :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_job_schedule_request( + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_job_schedules( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchJobSchedule"]: + """Lists all of the Job Schedules in the specified Account. + + Lists all of the Job Schedules in the specified Account. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchJobSchedule + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJobSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJobSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_job_schedules_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def create_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task: _models.BatchTaskCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Task to the specified Job. + + The maximum lifetime of a Task from addition to completion is 180 days. If a + Task has not completed within 180 days of being added it will be terminated by + the Batch service and left in whatever state it was in at that time. + + :param job_id: The ID of the Job to which the Task is to be created. Required. + :type job_id: str + :param task: The Task to be created. Required. + :type task: ~azure.batch.models.BatchTaskCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(task, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_task_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_tasks( + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchTask"]: + """Lists all of the Tasks that are associated with the specified Job. + + For multi-instance Tasks, information such as affinityId, executionInfo and + nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + information about subtasks. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchTask + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchTask] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchTask]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_tasks_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def create_task_collection( + self, + job_id: str, + task_collection: _models.BatchTaskGroup, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.BatchTaskAddCollectionResult: + """Adds a collection of Tasks to the specified Job. + + Note that each Task must have a unique ID. The Batch service may not return the + results for each Task in the same order the Tasks were submitted in this + request. If the server times out or the connection is closed during the + request, the request may have been partially or fully processed, or not at all. + In such cases, the user should re-issue the request. Note that it is up to the + user to correctly handle failures when re-issuing a request. For example, you + should use the same Task IDs during a retry so that if the prior operation + succeeded, the retry will not create extra Tasks unexpectedly. If the response + contains any Tasks which failed to add, a client can retry the request. In a + retry, it is most efficient to resubmit only Tasks that failed to add, and to + omit Tasks that were successfully added on the first attempt. The maximum + lifetime of a Task from addition to completion is 180 days. If a Task has not + completed within 180 days of being added it will be terminated by the Batch + service and left in whatever state it was in at that time. + + :param job_id: The ID of the Job to which the Task collection is to be added. Required. + :type job_id: str + :param task_collection: The Tasks to be added. Required. + :type task_collection: ~azure.batch.models.BatchTaskGroup + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with + MutableMapping + :rtype: ~azure.batch.models.BatchTaskAddCollectionResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[_models.BatchTaskAddCollectionResult] = kwargs.pop("cls", None) + + _content = json.dumps(task_collection, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_task_collection_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchTaskAddCollectionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Task from the specified Job. + + When a Task is deleted, all of the files in its directory on the Compute Node + where it ran are also deleted (regardless of the retention time). For + multi-instance Tasks, the delete Task operation applies synchronously to the + primary task; subtasks and their files are then deleted asynchronously in the + background. + + :param job_id: The ID of the Job from which to delete the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to delete. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_task( + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchTask: + """Gets information about the specified Task. + + For multi-instance Tasks, information such as affinityId, executionInfo and + nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + information about subtasks. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to get information about. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchTask. The BatchTask is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchTask + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchTask] = kwargs.pop("cls", None) + + _request = build_batch_get_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchTask, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def replace_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + task: _models.BatchTask, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Task. + + :param job_id: The ID of the Job containing the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to update. Required. + :type task_id: str + :param task: The Task to update. Required. + :type task: ~azure.batch.models.BatchTask + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(task, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_sub_tasks( + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchSubtask"]: + """Lists all of the subtasks that are associated with the specified multi-instance + Task. + + If the Task is not a multi-instance Task then this returns an empty collection. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :param task_id: The ID of the Task. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchSubtask + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchSubtask] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchSubtask]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_sub_tasks_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def terminate_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Terminates the specified Task. + + When the Task has been terminated, it moves to the completed state. For + multi-instance Tasks, the terminate Task operation applies synchronously to the + primary task; subtasks are then terminated asynchronously in the background. + + :param job_id: The ID of the Job containing the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to terminate. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_terminate_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def reactivate_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Reactivates a Task, allowing it to run again even if its retry count has been + exhausted. + + Reactivation makes a Task eligible to be retried again up to its maximum retry + count. The Task's state is changed to active. As the Task is no longer in the + completed state, any previous exit code or failure information is no longer + available after reactivation. Each time a Task is reactivated, its retry count + is reset to 0. Reactivation will fail for Tasks that are not completed or that + previously completed successfully (with an exit code of 0). Additionally, it + will fail if the Job has completed (or is terminating or deleting). + + :param job_id: The ID of the Job containing the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to reactivate. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_reactivate_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def delete_task_file( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Deletes the specified Task file from the Compute Node where the Task ran. + + Deletes the specified Task file from the Compute Node where the Task ran. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. Required. + :type task_id: str + :param file_path: The path to the Task file that you want to get the content of. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword recursive: Whether to delete children of a directory. If the filePath parameter + represents + a directory instead of a file, you can set recursive to true to delete the + directory and all of the files and subdirectories in it. If recursive is false + then the directory must be empty or deletion will fail. Default value is None. + :paramtype recursive: bool + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_task_file_request( + job_id=job_id, + task_id=task_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_task_file( + self, + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + ocp_range: Optional[str] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """Returns the content of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. Required. + :type task_id: str + :param file_path: The path to the Task file that you want to get the content of. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. + The + format is bytes=startRange-endRange. Default value is None. + :paramtype ocp_range: str + :return: Iterator[bytes] + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_batch_get_task_file_request( + job_id=job_id, + task_id=task_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + ocp_range=ocp_range, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_task_file_properties( + self, + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> bool: + """Gets the properties of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. Required. + :type task_id: str + :param file_path: The path to the Task file that you want to get the content of. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_get_task_file_properties_request( + job_id=job_id, + task_id=task_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace + def list_task_files( + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> Iterable["_models.BatchNodeFile"]: + """Lists the files in a Task's directory on its Compute Node. + + Lists the files in a Task's directory on its Compute Node. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose files you want to list. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + Default value is None. + :paramtype filter: str + :keyword recursive: Whether to list children of the Task directory. This parameter can be used + in + combination with the filter parameter to list specific type of files. Default value is None. + :paramtype recursive: bool + :return: An iterator like instance of BatchNodeFile + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNodeFile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_task_files_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def create_node_user( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + user: _models.BatchNodeUserCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Adds a user Account to the specified Compute Node. + + You can add a user Account to a Compute Node only when it is in the idle or + running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the machine on which you want to create a user Account. Required. + :type node_id: str + :param user: The options to use for creating the user. Required. + :type user: ~azure.batch.models.BatchNodeUserCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(user, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_node_user_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def delete_node_user( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + user_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Deletes a user Account from the specified Compute Node. + + You can delete a user Account to a Compute Node only when it is in the idle or + running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the machine on which you want to delete a user Account. Required. + :type node_id: str + :param user_name: The name of the user Account to delete. Required. + :type user_name: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_node_user_request( + pool_id=pool_id, + node_id=node_id, + user_name=user_name, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def replace_node_user( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + user_name: str, + content: _models.BatchNodeUserUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Updates the password and expiration time of a user Account on the specified Compute Node. + + This operation replaces of all the updatable properties of the Account. For + example, if the expiryTime element is not specified, the current value is + replaced with the default value, not left unmodified. You can update a user + Account on a Compute Node only when it is in the idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the machine on which you want to update a user Account. Required. + :type node_id: str + :param user_name: The name of the user Account to update. Required. + :type user_name: str + :param content: The options to use for updating the user. Required. + :type content: ~azure.batch.models.BatchNodeUserUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_node_user_request( + pool_id=pool_id, + node_id=node_id, + user_name=user_name, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_node( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> _models.BatchNode: + """Gets information about the specified Compute Node. + + Gets information about the specified Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to get information about. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: BatchNode. The BatchNode is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchNode + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchNode] = kwargs.pop("cls", None) + + _request = build_batch_get_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchNode, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def reboot_node( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeRebootContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Restarts the specified Compute Node. + + You can restart a Compute Node only if it is in an idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :param parameters: The options to use for rebooting the Compute Node. Default value is None. + :type parameters: ~azure.batch.models.BatchNodeRebootContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_reboot_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def disable_node_scheduling( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeDisableSchedulingContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Disables Task scheduling on the specified Compute Node. + + You can disable Task scheduling on a Compute Node only if its current + scheduling state is enabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to disable Task scheduling. + Required. + :type node_id: str + :param parameters: The options to use for disabling scheduling on the Compute Node. Default + value is None. + :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_disable_node_scheduling_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def enable_node_scheduling( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Enables Task scheduling on the specified Compute Node. + + You can enable Task scheduling on a Compute Node only if its current scheduling + state is disabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to enable Task scheduling. + Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_enable_node_scheduling_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_node_remote_login_settings( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.BatchNodeRemoteLoginSettings: + """Gets the settings required for remote login to a Compute Node. + + Before you can remotely login to a Compute Node using the remote login + settings, you must create a user Account on the Compute Node. This API can be + invoked only on Pools created with the virtual machine configuration property. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node for which to obtain the remote login settings. + Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchNodeRemoteLoginSettings. The BatchNodeRemoteLoginSettings is compatible with + MutableMapping + :rtype: ~azure.batch.models.BatchNodeRemoteLoginSettings + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchNodeRemoteLoginSettings] = kwargs.pop("cls", None) + + _request = build_batch_get_node_remote_login_settings_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchNodeRemoteLoginSettings, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def upload_node_logs( + self, + pool_id: str, + node_id: str, + content: _models.UploadBatchServiceLogsContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.UploadBatchServiceLogsResult: + """Upload Azure Batch service log files from the specified Compute Node to Azure + Blob Storage. + + This is for gathering Azure Batch service log files in an automated fashion + from Compute Nodes if you are experiencing an error and wish to escalate to + Azure support. The Azure Batch service log files should be shared with Azure + support to aid in debugging issues with the Batch service. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node for which you want to get the Remote Desktop + Protocol file. Required. + :type node_id: str + :param content: The Azure Batch service log files upload options. Required. + :type content: ~azure.batch.models.UploadBatchServiceLogsContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: UploadBatchServiceLogsResult. The UploadBatchServiceLogsResult is compatible with + MutableMapping + :rtype: ~azure.batch.models.UploadBatchServiceLogsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[_models.UploadBatchServiceLogsResult] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_upload_node_logs_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.UploadBatchServiceLogsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_nodes( + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchNode"]: + """Lists the Compute Nodes in the specified Pool. + + Lists the Compute Nodes in the specified Pool. + + :param pool_id: The ID of the Pool from which you want to list Compute Nodes. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchNode + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNode] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNode]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_nodes_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get_node_extension( + self, + pool_id: str, + node_id: str, + extension_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> _models.BatchNodeVMExtension: + """Gets information about the specified Compute Node Extension. + + Gets information about the specified Compute Node Extension. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that contains the extensions. Required. + :type node_id: str + :param extension_name: The name of the Compute Node Extension that you want to get information + about. Required. + :type extension_name: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: BatchNodeVMExtension. The BatchNodeVMExtension is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchNodeVMExtension + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchNodeVMExtension] = kwargs.pop("cls", None) + + _request = build_batch_get_node_extension_request( + pool_id=pool_id, + node_id=node_id, + extension_name=extension_name, + timeout=timeout, + ocpdate=ocpdate, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchNodeVMExtension, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_node_extensions( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> Iterable["_models.BatchNodeVMExtension"]: + """Lists the Compute Nodes Extensions in the specified Pool. + + Lists the Compute Nodes Extensions in the specified Pool. + + :param pool_id: The ID of the Pool that contains Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to list extensions. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchNodeVMExtension + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNodeVMExtension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNodeVMExtension]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_node_extensions_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete_node_file( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Deletes the specified file from the Compute Node. + + Deletes the specified file from the Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node. Required. + :type node_id: str + :param file_path: The path to the file or directory. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword recursive: Whether to delete children of a directory. If the filePath parameter + represents + a directory instead of a file, you can set recursive to true to delete the + directory and all of the files and subdirectories in it. If recursive is false + then the directory must be empty or deletion will fail. Default value is None. + :paramtype recursive: bool + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_node_file_request( + pool_id=pool_id, + node_id=node_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def get_node_file( + self, + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + ocp_range: Optional[str] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """Returns the content of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node. Required. + :type node_id: str + :param file_path: The path to the file or directory. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. + The + format is bytes=startRange-endRange. Default value is None. + :paramtype ocp_range: str + :return: Iterator[bytes] + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_batch_get_node_file_request( + pool_id=pool_id, + node_id=node_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + ocp_range=ocp_range, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_node_file_properties( + self, + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> bool: + """Gets the properties of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node. Required. + :type node_id: str + :param file_path: The path to the file or directory. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_get_node_file_properties_request( + pool_id=pool_id, + node_id=node_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace + def list_node_files( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> Iterable["_models.BatchNodeFile"]: + """Lists all of the files in Task directories on the specified Compute Node. + + Lists all of the files in Task directories on the specified Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node whose files you want to list. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + Default value is None. + :paramtype filter: str + :keyword recursive: Whether to list children of a directory. Default value is None. + :paramtype recursive: bool + :return: An iterator like instance of BatchNodeFile + :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNodeFile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_node_files_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/batch/azure-batch/azure/batch/_patch.py b/sdk/batch/azure-batch/azure/batch/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/batch/azure-batch/azure/batch/_serialization.py b/sdk/batch/azure-batch/azure/batch/_serialization.py new file mode 100644 index 000000000000..8139854b97bb --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_serialization.py @@ -0,0 +1,2000 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, + Mapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node.""" + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + """ + return key.replace("\\.", ".") + + +class Serializer(object): + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :keyword bool skip_quote: Whether to skip quote the serialized result. + Defaults to False. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :keyword bool do_quote: Whether to quote the serialized result of each iterable element. + Defaults to False. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + else: + return date_obj diff --git a/sdk/batch/azure-batch/azure/batch/_vendor.py b/sdk/batch/azure-batch/azure/batch/_vendor.py new file mode 100644 index 000000000000..54bd8a902e0d --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/_vendor.py @@ -0,0 +1,58 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Optional, TYPE_CHECKING + +from azure.core import MatchConditions + +from ._configuration import BatchClientConfiguration + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core import PipelineClient + + from ._serialization import Deserializer, Serializer + + +class BatchClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: BatchClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +def quote_etag(etag: Optional[str]) -> Optional[str]: + if not etag or etag == "*": + return etag + if etag.startswith("W/"): + return etag + if etag.startswith('"') and etag.endswith('"'): + return etag + if etag.startswith("'") and etag.endswith("'"): + return etag + return '"' + etag + '"' + + +def prep_if_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfNotModified: + if_match = quote_etag(etag) if etag else None + return if_match + if match_condition == MatchConditions.IfPresent: + return "*" + return None + + +def prep_if_none_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfModified: + if_none_match = quote_etag(etag) if etag else None + return if_none_match + if match_condition == MatchConditions.IfMissing: + return "*" + return None diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index f94a6c795829..be71c81bd282 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -1,12 +1,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "14.2.0" +VERSION = "1.0.0b1" diff --git a/sdk/batch/azure-batch/azure/batch/aio/__init__.py b/sdk/batch/azure-batch/azure/batch/aio/__init__.py new file mode 100644 index 000000000000..8ebcd33efe11 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import BatchClient + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "BatchClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/aio/_client.py b/sdk/batch/azure-batch/azure/batch/aio/_client.py new file mode 100644 index 000000000000..ed1891564486 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_client.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import BatchClientConfiguration +from ._operations import BatchClientOperationsMixin + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class BatchClient(BatchClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """BatchClient. + + :param endpoint: Batch account endpoint (for example: + https://batchaccount.eastus2.batch.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = BatchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + kwargs["request_id_header_name"] = "client-request-id" + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/batch/azure-batch/azure/batch/aio/_configuration.py b/sdk/batch/azure-batch/azure/batch/aio/_configuration.py new file mode 100644 index 000000000000..c731f9655b43 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_configuration.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class BatchClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for BatchClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Batch account endpoint (for example: + https://batchaccount.eastus2.batch.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-02-01.19.0") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://batch.core.windows.net//.default"]) + kwargs.setdefault("sdk_moniker", "batch/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py new file mode 100644 index 000000000000..7897453a2980 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import BatchClientOperationsMixin + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "BatchClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py new file mode 100644 index 000000000000..1f314be0e9fc --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py @@ -0,0 +1,7537 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +import json +import sys +from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, List, Optional, Type, TypeVar +import urllib.parse + +from azure.core import MatchConditions +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceModifiedError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._operations._operations import ( + build_batch_create_job_request, + build_batch_create_job_schedule_request, + build_batch_create_node_user_request, + build_batch_create_pool_request, + build_batch_create_task_collection_request, + build_batch_create_task_request, + build_batch_delete_job_request, + build_batch_delete_job_schedule_request, + build_batch_delete_node_file_request, + build_batch_delete_node_user_request, + build_batch_delete_pool_request, + build_batch_delete_task_file_request, + build_batch_delete_task_request, + build_batch_disable_job_request, + build_batch_disable_job_schedule_request, + build_batch_disable_node_scheduling_request, + build_batch_disable_pool_auto_scale_request, + build_batch_enable_job_request, + build_batch_enable_job_schedule_request, + build_batch_enable_node_scheduling_request, + build_batch_enable_pool_auto_scale_request, + build_batch_evaluate_pool_auto_scale_request, + build_batch_get_application_request, + build_batch_get_job_request, + build_batch_get_job_schedule_request, + build_batch_get_job_task_counts_request, + build_batch_get_node_extension_request, + build_batch_get_node_file_properties_request, + build_batch_get_node_file_request, + build_batch_get_node_remote_login_settings_request, + build_batch_get_node_request, + build_batch_get_pool_request, + build_batch_get_task_file_properties_request, + build_batch_get_task_file_request, + build_batch_get_task_request, + build_batch_job_schedule_exists_request, + build_batch_list_applications_request, + build_batch_list_job_preparation_and_release_task_status_request, + build_batch_list_job_schedules_request, + build_batch_list_jobs_from_schedule_request, + build_batch_list_jobs_request, + build_batch_list_node_extensions_request, + build_batch_list_node_files_request, + build_batch_list_nodes_request, + build_batch_list_pool_node_counts_request, + build_batch_list_pool_usage_metrics_request, + build_batch_list_pools_request, + build_batch_list_sub_tasks_request, + build_batch_list_supported_images_request, + build_batch_list_task_files_request, + build_batch_list_tasks_request, + build_batch_pool_exists_request, + build_batch_reactivate_task_request, + build_batch_reboot_node_request, + build_batch_remove_nodes_request, + build_batch_replace_job_request, + build_batch_replace_job_schedule_request, + build_batch_replace_node_user_request, + build_batch_replace_pool_properties_request, + build_batch_replace_task_request, + build_batch_resize_pool_request, + build_batch_stop_pool_resize_request, + build_batch_terminate_job_request, + build_batch_terminate_job_schedule_request, + build_batch_terminate_task_request, + build_batch_update_job_request, + build_batch_update_job_schedule_request, + build_batch_update_pool_request, + build_batch_upload_node_logs_request, +) +from .._vendor import BatchClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-many-public-methods + + @distributed_trace + def list_applications( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchApplication"]: + """Lists all of the applications available in the specified Account. + + This operation returns only Applications and versions that are available for + use on Compute Nodes; that is, that can be used in an Package reference. For + administrator information about applications and versions that are not yet + available to Compute Nodes, use the Azure portal or the Azure Resource Manager + API. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :return: An iterator like instance of BatchApplication + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchApplication] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchApplication]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_applications_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchApplication], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get_application( + self, + application_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.BatchApplication: + """Gets information about the specified Application. + + This operation returns only Applications and versions that are available for + use on Compute Nodes; that is, that can be used in an Package reference. For + administrator information about Applications and versions that are not yet + available to Compute Nodes, use the Azure portal or the Azure Resource Manager + API. + + :param application_id: The ID of the Application. Required. + :type application_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchApplication. The BatchApplication is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchApplication + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchApplication] = kwargs.pop("cls", None) + + _request = build_batch_get_application_request( + application_id=application_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchApplication, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_pool_usage_metrics( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + starttime: Optional[datetime.datetime] = None, + endtime: Optional[datetime.datetime] = None, + filter: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchPoolUsageMetrics"]: + """Lists the usage metrics, aggregated by Pool across individual time intervals, + for the specified Account. + + If you do not specify a $filter clause including a poolId, the response + includes all Pools that existed in the Account in the time range of the + returned aggregation intervals. If you do not specify a $filter clause + including a startTime or endTime these filters default to the start and end + times of the last aggregation interval currently available; that is, only the + last aggregation interval is returned. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword starttime: The earliest time from which to include metrics. This must be at least two + and + a half hours before the current time. If not specified this defaults to the + start time of the last aggregation interval currently available. Default value is None. + :paramtype starttime: ~datetime.datetime + :keyword endtime: The latest time from which to include metrics. This must be at least two + hours + before the current time. If not specified this defaults to the end time of the + last aggregation interval currently available. Default value is None. + :paramtype endtime: ~datetime.datetime + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + Default value is None. + :paramtype filter: str + :return: An iterator like instance of BatchPoolUsageMetrics + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchPoolUsageMetrics] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchPoolUsageMetrics]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_pool_usage_metrics_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + starttime=starttime, + endtime=endtime, + filter=filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchPoolUsageMetrics], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def create_pool( # pylint: disable=inconsistent-return-statements + self, + pool: _models.BatchPoolCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Pool to the specified Account. + + When naming Pools, avoid including sensitive information such as user names or + secret project names. This information may appear in telemetry logs accessible + to Microsoft Support engineers. + + :param pool: The Pool to be created. Required. + :type pool: ~azure.batch.models.BatchPoolCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_pool_request( + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_pools( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchPool"]: + """Lists all of the Pools in the specified Account. + + Lists all of the Pools in the specified Account. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchPool + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchPool] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchPool]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_pools_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchPool], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete_pool( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Pool from the specified Account. + + When you request that a Pool be deleted, the following actions occur: the Pool + state is set to deleting; any ongoing resize operation on the Pool are stopped; + the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks + running on existing Compute Nodes are terminated and requeued (as if a resize + Pool operation had been requested with the default requeue option); finally, + the Pool is removed from the system. Because running Tasks are requeued, the + user can rerun these Tasks by updating their Job to target a different Pool. + The Tasks can then run on the new Pool. If you want to override the requeue + behavior, then you should call resize Pool explicitly to shrink the Pool to + zero size before deleting the Pool. If you call an Update, Patch or Delete API + on a Pool in the deleting state, it will fail with HTTP status code 409 with + error code PoolBeingDeleted. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def pool_exists( + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> bool: + """Gets basic properties of a Pool. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_pool_exists_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace_async + async def get_pool( + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchPool: + """Gets information about the specified Pool. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchPool. The BatchPool is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchPool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchPool] = kwargs.pop("cls", None) + + _request = build_batch_get_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchPool, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def update_pool( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + pool: _models.BatchPoolUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Pool. + + This only replaces the Pool properties specified in the request. For example, + if the Pool has a StartTask associated with it, and a request does not specify + a StartTask element, then the Pool keeps the existing StartTask. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param pool: The pool properties to update. Required. + :type pool: ~azure.batch.models.BatchPoolUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_update_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements + self, pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + ) -> None: + """Disables automatic scaling for a Pool. + + Disables automatic scaling for a Pool. + + :param pool_id: The ID of the Pool on which to disable automatic scaling. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_disable_pool_auto_scale_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + content: _models.BatchPoolEnableAutoScaleContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Enables automatic scaling for a Pool. + + You cannot enable automatic scaling on a Pool if a resize operation is in + progress on the Pool. If automatic scaling of the Pool is currently disabled, + you must specify a valid autoscale formula as part of the request. If automatic + scaling of the Pool is already enabled, you may specify a new autoscale formula + and/or a new evaluation interval. You cannot call this API for the same Pool + more than once every 30 seconds. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param content: The options to use for enabling automatic scaling. Required. + :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_enable_pool_auto_scale_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def evaluate_pool_auto_scale( + self, + pool_id: str, + content: _models.BatchPoolEvaluateAutoScaleContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.AutoScaleRun: + """Gets the result of evaluating an automatic scaling formula on the Pool. + + This API is primarily for validating an autoscale formula, as it simply returns + the result without applying the formula to the Pool. The Pool must have auto + scaling enabled in order to evaluate a formula. + + :param pool_id: The ID of the Pool on which to evaluate the automatic scaling formula. + Required. + :type pool_id: str + :param content: The options to use for evaluating the automatic scaling formula. Required. + :type content: ~azure.batch.models.BatchPoolEvaluateAutoScaleContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: AutoScaleRun. The AutoScaleRun is compatible with MutableMapping + :rtype: ~azure.batch.models.AutoScaleRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[_models.AutoScaleRun] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_evaluate_pool_auto_scale_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AutoScaleRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def resize_pool( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + content: _models.BatchPoolResizeContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Changes the number of Compute Nodes that are assigned to a Pool. + + You can only resize a Pool when its allocation state is steady. If the Pool is + already resizing, the request fails with status code 409. When you resize a + Pool, the Pool's allocation state changes from steady to resizing. You cannot + resize Pools which are configured for automatic scaling. If you try to do this, + the Batch service returns an error 409. If you resize a Pool downwards, the + Batch service chooses which Compute Nodes to remove. To remove specific Compute + Nodes, use the Pool remove Compute Nodes API instead. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param content: The options to use for resizing the pool. Required. + :type content: ~azure.batch.models.BatchPoolResizeContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_resize_pool_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def stop_pool_resize( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Stops an ongoing resize operation on the Pool. + + This does not restore the Pool to its previous state before the resize + operation: it only stops any further changes being made, and the Pool maintains + its current state. After stopping, the Pool stabilizes at the number of Compute + Nodes it was at when the stop operation was done. During the stop operation, + the Pool allocation state changes first to stopping and then to steady. A + resize operation need not be an explicit resize Pool request; this API can also + be used to halt the initial sizing of the Pool when it is created. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_stop_pool_resize_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def replace_pool_properties( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + pool: _models.BatchPoolReplaceContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Pool. + + This fully replaces all the updatable properties of the Pool. For example, if + the Pool has a StartTask associated with it and if StartTask is not specified + with this request, then the Batch service will remove the existing StartTask. + + :param pool_id: The ID of the Pool to update. Required. + :type pool_id: str + :param pool: The options to use for replacing properties on the pool. Required. + :type pool: ~azure.batch.models.BatchPoolReplaceContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_pool_properties_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def remove_nodes( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + content: _models.BatchNodeRemoveContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Removes Compute Nodes from the specified Pool. + + This operation can only run when the allocation state of the Pool is steady. + When this operation runs, the allocation state changes from steady to resizing. + Each request may remove up to 100 nodes. + + :param pool_id: The ID of the Pool to get. Required. + :type pool_id: str + :param content: The options to use for removing the node. Required. + :type content: ~azure.batch.models.BatchNodeRemoveContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_remove_nodes_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_supported_images( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchSupportedImage"]: + """Lists all Virtual Machine Images supported by the Azure Batch service. + + Lists all Virtual Machine Images supported by the Azure Batch service. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + Default value is None. + :paramtype filter: str + :return: An iterator like instance of BatchSupportedImage + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchSupportedImage] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchSupportedImage]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_supported_images_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchSupportedImage], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_pool_node_counts( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchPoolNodeCounts"]: + """Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + numbers returned may not always be up to date. If you need exact node counts, + use a list query. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + Default value is None. + :paramtype filter: str + :return: An iterator like instance of BatchPoolNodeCounts + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchPoolNodeCounts] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchPoolNodeCounts]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_pool_node_counts_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchPoolNodeCounts], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Job. + + Deleting a Job also deletes all Tasks that are part of that Job, and all Job + statistics. This also overrides the retention period for Task data; that is, if + the Job contains Tasks which are still retained on Compute Nodes, the Batch + services deletes those Tasks' working directories and all their contents. When + a Delete Job request is received, the Batch service sets the Job to the + deleting state. All update operations on a Job that is in deleting state will + fail with status code 409 (Conflict), with additional information indicating + that the Job is being deleted. + + :param job_id: The ID of the Job to delete. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_job( + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchJob: + """Gets information about the specified Job. + + Gets information about the specified Job. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchJob. The BatchJob is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchJob + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchJob] = kwargs.pop("cls", None) + + _request = build_batch_get_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchJob, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def update_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + job: _models.BatchJobUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job. + + This replaces only the Job properties specified in the request. For example, if + the Job has constraints, and a request does not specify the constraints + element, then the Job keeps the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. Required. + :type job_id: str + :param job: The options to use for updating the Job. Required. + :type job: ~azure.batch.models.BatchJobUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_update_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def replace_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + job: _models.BatchJob, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job. + + This fully replaces all the updatable properties of the Job. For example, if + the Job has constraints associated with it and if constraints is not specified + with this request, then the Batch service will remove the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. Required. + :type job_id: str + :param job: A job with updated properties. Required. + :type job: ~azure.batch.models.BatchJob + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def disable_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + content: _models.BatchJobDisableContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Disables the specified Job, preventing new Tasks from running. + + The Batch Service immediately moves the Job to the disabling state. Batch then + uses the disableTasks parameter to determine what to do with the currently + running Tasks of the Job. The Job remains in the disabling state until the + disable operation is completed and all Tasks have been dealt with according to + the disableTasks option; the Job then moves to the disabled state. No new Tasks + are started under the Job until it moves back to active state. If you try to + disable a Job that is in any state other than active, disabling, or disabled, + the request fails with status code 409. + + :param job_id: The ID of the Job to disable. Required. + :type job_id: str + :param content: The options to use for disabling the Job. Required. + :type content: ~azure.batch.models.BatchJobDisableContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_disable_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def enable_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Enables the specified Job, allowing new Tasks to run. + + When you call this API, the Batch service sets a disabled Job to the enabling + state. After the this operation is completed, the Job moves to the active + state, and scheduling of new Tasks under the Job resumes. The Batch service + does not allow a Task to remain in the active state for more than 180 days. + Therefore, if you enable a Job containing active Tasks which were added more + than 180 days ago, those Tasks will not run. + + :param job_id: The ID of the Job to enable. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_enable_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def terminate_job( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + parameters: Optional[_models.BatchJobTerminateContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Terminates the specified Job, marking it as completed. + + When a Terminate Job request is received, the Batch service sets the Job to the + terminating state. The Batch service then terminates any running Tasks + associated with the Job and runs any required Job release Tasks. Then the Job + moves into the completed state. If there are any Tasks in the Job in the active + state, they will remain in the active state. Once a Job is terminated, new + Tasks cannot be added and any remaining active Tasks will not be scheduled. + + :param job_id: The ID of the Job to terminate. Required. + :type job_id: str + :param parameters: The options to use for terminating the Job. Default value is None. + :type parameters: ~azure.batch.models.BatchJobTerminateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_terminate_job_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def create_job( # pylint: disable=inconsistent-return-statements + self, + job: _models.BatchJobCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Job to the specified Account. + + The Batch service supports two ways to control the work done as part of a Job. + In the first approach, the user specifies a Job Manager Task. The Batch service + launches this Task when it is ready to start the Job. The Job Manager Task + controls all other Tasks that run under this Job, by using the Task APIs. In + the second approach, the user directly controls the execution of Tasks under an + active Job, by using the Task APIs. Also note: when naming Jobs, avoid + including sensitive information such as user names or secret project names. + This information may appear in telemetry logs accessible to Microsoft Support + engineers. + + :param job: The Job to be created. Required. + :type job: ~azure.batch.models.BatchJobCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_job_request( + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_jobs( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchJob"]: + """Lists all of the Jobs in the specified Account. + + Lists all of the Jobs in the specified Account. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchJob + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJob] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_jobs_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_jobs_from_schedule( + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchJob"]: + """Lists the Jobs that have been created under the specified Job Schedule. + + Lists the Jobs that have been created under the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule from which you want to get a list of Jobs. + Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchJob + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJob] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_jobs_from_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJob], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_job_preparation_and_release_task_status( # pylint: disable=name-too-long + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchJobPreparationAndReleaseTaskStatus"]: + """Lists the execution status of the Job Preparation and Job Release Task for the + specified Job across the Compute Nodes where the Job has run. + + This API returns the Job Preparation and Job Release Task status on all Compute + Nodes that have run the Job Preparation or Job Release Task. This includes + Compute Nodes which have since been removed from the Pool. If this API is + invoked on a Job which has no Job Preparation or Job Release Task, the Batch + service returns HTTP status code 409 (Conflict) with an error code of + JobPreparationTaskNotSpecified. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchJobPreparationAndReleaseTaskStatus + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJobPreparationAndReleaseTaskStatus] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJobPreparationAndReleaseTaskStatus]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_job_preparation_and_release_task_status_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJobPreparationAndReleaseTaskStatus], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get_job_task_counts( + self, job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + ) -> _models.BatchTaskCountsResult: + """Gets the Task counts for the specified Job. + + Task counts provide a count of the Tasks by active, running or completed Task + state, and a count of Tasks which succeeded or failed. Tasks in the preparing + state are counted as running. Note that the numbers returned may not always be + up to date. If you need exact task counts, use a list query. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchTaskCountsResult. The BatchTaskCountsResult is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchTaskCountsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchTaskCountsResult] = kwargs.pop("cls", None) + + _request = build_batch_get_job_task_counts_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchTaskCountsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def job_schedule_exists( + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> bool: + """Checks the specified Job Schedule exists. + + Checks the specified Job Schedule exists. + + :param job_schedule_id: The ID of the Job Schedule which you want to check. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_job_schedule_exists_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace_async + async def delete_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Job Schedule from the specified Account. + + When you delete a Job Schedule, this also deletes all Jobs and Tasks under that + schedule. When Tasks are deleted, all the files in their working directories on + the Compute Nodes are also deleted (the retention period is ignored). The Job + Schedule statistics are no longer accessible once the Job Schedule is deleted, + though they are still counted towards Account lifetime statistics. + + :param job_schedule_id: The ID of the Job Schedule to delete. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_job_schedule( + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchJobSchedule: + """Gets information about the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to get. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchJobSchedule. The BatchJobSchedule is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchJobSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchJobSchedule] = kwargs.pop("cls", None) + + _request = build_batch_get_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchJobSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def update_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + job_schedule: _models.BatchJobScheduleUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job Schedule. + + This replaces only the Job Schedule properties specified in the request. For + example, if the schedule property is not specified with this request, then the + Batch service will keep the existing schedule. Changes to a Job Schedule only + impact Jobs created by the schedule after the update has taken place; currently + running Jobs are unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. Required. + :type job_schedule_id: str + :param job_schedule: The options to use for updating the Job Schedule. Required. + :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_update_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def replace_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + job_schedule: _models.BatchJobSchedule, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Job Schedule. + + This fully replaces all the updatable properties of the Job Schedule. For + example, if the schedule property is not specified with this request, then the + Batch service will remove the existing schedule. Changes to a Job Schedule only + impact Jobs created by the schedule after the update has taken place; currently + running Jobs are unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. Required. + :type job_schedule_id: str + :param job_schedule: A Job Schedule with updated properties. Required. + :type job_schedule: ~azure.batch.models.BatchJobSchedule + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def disable_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Disables a Job Schedule. + + No new Jobs will be created until the Job Schedule is enabled again. + + :param job_schedule_id: The ID of the Job Schedule to disable. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_disable_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def enable_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Enables a Job Schedule. + + Enables a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to enable. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_enable_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def terminate_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Terminates a Job Schedule. + + Terminates a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to terminates. Required. + :type job_schedule_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_terminate_job_schedule_request( + job_schedule_id=job_schedule_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def create_job_schedule( # pylint: disable=inconsistent-return-statements + self, + job_schedule: _models.BatchJobScheduleCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Job Schedule to the specified Account. + + Creates a Job Schedule to the specified Account. + + :param job_schedule: The Job Schedule to be created. Required. + :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_job_schedule_request( + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_job_schedules( + self, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchJobSchedule"]: + """Lists all of the Job Schedules in the specified Account. + + Lists all of the Job Schedules in the specified Account. + + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchJobSchedule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJobSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchJobSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_job_schedules_request( + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchJobSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def create_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task: _models.BatchTaskCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Creates a Task to the specified Job. + + The maximum lifetime of a Task from addition to completion is 180 days. If a + Task has not completed within 180 days of being added it will be terminated by + the Batch service and left in whatever state it was in at that time. + + :param job_id: The ID of the Job to which the Task is to be created. Required. + :type job_id: str + :param task: The Task to be created. Required. + :type task: ~azure.batch.models.BatchTaskCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(task, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_task_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_tasks( + self, + job_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchTask"]: + """Lists all of the Tasks that are associated with the specified Job. + + For multi-instance Tasks, information such as affinityId, executionInfo and + nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + information about subtasks. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :return: An iterator like instance of BatchTask + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchTask] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchTask]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_tasks_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + expand=expand, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchTask], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def create_task_collection( + self, + job_id: str, + task_collection: _models.BatchTaskGroup, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.BatchTaskAddCollectionResult: + """Adds a collection of Tasks to the specified Job. + + Note that each Task must have a unique ID. The Batch service may not return the + results for each Task in the same order the Tasks were submitted in this + request. If the server times out or the connection is closed during the + request, the request may have been partially or fully processed, or not at all. + In such cases, the user should re-issue the request. Note that it is up to the + user to correctly handle failures when re-issuing a request. For example, you + should use the same Task IDs during a retry so that if the prior operation + succeeded, the retry will not create extra Tasks unexpectedly. If the response + contains any Tasks which failed to add, a client can retry the request. In a + retry, it is most efficient to resubmit only Tasks that failed to add, and to + omit Tasks that were successfully added on the first attempt. The maximum + lifetime of a Task from addition to completion is 180 days. If a Task has not + completed within 180 days of being added it will be terminated by the Batch + service and left in whatever state it was in at that time. + + :param job_id: The ID of the Job to which the Task collection is to be added. Required. + :type job_id: str + :param task_collection: The Tasks to be added. Required. + :type task_collection: ~azure.batch.models.BatchTaskGroup + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with + MutableMapping + :rtype: ~azure.batch.models.BatchTaskAddCollectionResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[_models.BatchTaskAddCollectionResult] = kwargs.pop("cls", None) + + _content = json.dumps(task_collection, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_task_collection_request( + job_id=job_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchTaskAddCollectionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a Task from the specified Job. + + When a Task is deleted, all of the files in its directory on the Compute Node + where it ran are also deleted (regardless of the retention time). For + multi-instance Tasks, the delete Task operation applies synchronously to the + primary task; subtasks and their files are then deleted asynchronously in the + background. + + :param job_id: The ID of the Job from which to delete the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to delete. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_task( + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + expand: Optional[List[str]] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.BatchTask: + """Gets information about the specified Task. + + For multi-instance Tasks, information such as affinityId, executionInfo and + nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + information about subtasks. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to get information about. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :keyword expand: An OData $expand clause. Default value is None. + :paramtype expand: list[str] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: BatchTask. The BatchTask is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchTask + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchTask] = kwargs.pop("cls", None) + + _request = build_batch_get_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + select=select, + expand=expand, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchTask, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def replace_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + task: _models.BatchTask, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Updates the properties of the specified Task. + + :param job_id: The ID of the Job containing the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to update. Required. + :type task_id: str + :param task: The Task to update. Required. + :type task: ~azure.batch.models.BatchTask + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(task, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def list_sub_tasks( + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchSubtask"]: + """Lists all of the subtasks that are associated with the specified multi-instance + Task. + + If the Task is not a multi-instance Task then this returns an empty collection. + + :param job_id: The ID of the Job. Required. + :type job_id: str + :param task_id: The ID of the Task. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchSubtask + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchSubtask] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchSubtask]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_sub_tasks_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchSubtask], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def terminate_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Terminates the specified Task. + + When the Task has been terminated, it moves to the completed state. For + multi-instance Tasks, the terminate Task operation applies synchronously to the + primary task; subtasks are then terminated asynchronously in the background. + + :param job_id: The ID of the Job containing the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to terminate. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_terminate_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def reactivate_task( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Reactivates a Task, allowing it to run again even if its retry count has been + exhausted. + + Reactivation makes a Task eligible to be retried again up to its maximum retry + count. The Task's state is changed to active. As the Task is no longer in the + completed state, any previous exit code or failure information is no longer + available after reactivation. Each time a Task is reactivated, its retry count + is reset to 0. Reactivation will fail for Tasks that are not completed or that + previously completed successfully (with an exit code of 0). Additionally, it + will fail if the Job has completed (or is terminating or deleting). + + :param job_id: The ID of the Job containing the Task. Required. + :type job_id: str + :param task_id: The ID of the Task to reactivate. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_reactivate_task_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def delete_task_file( # pylint: disable=inconsistent-return-statements + self, + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Deletes the specified Task file from the Compute Node where the Task ran. + + Deletes the specified Task file from the Compute Node where the Task ran. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. Required. + :type task_id: str + :param file_path: The path to the Task file that you want to get the content of. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword recursive: Whether to delete children of a directory. If the filePath parameter + represents + a directory instead of a file, you can set recursive to true to delete the + directory and all of the files and subdirectories in it. If recursive is false + then the directory must be empty or deletion will fail. Default value is None. + :paramtype recursive: bool + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_task_file_request( + job_id=job_id, + task_id=task_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_task_file( + self, + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + ocp_range: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """Returns the content of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. Required. + :type task_id: str + :param file_path: The path to the Task file that you want to get the content of. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. + The + format is bytes=startRange-endRange. Default value is None. + :paramtype ocp_range: str + :return: AsyncIterator[bytes] + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_batch_get_task_file_request( + job_id=job_id, + task_id=task_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + ocp_range=ocp_range, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_task_file_properties( + self, + job_id: str, + task_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> bool: + """Gets the properties of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. Required. + :type task_id: str + :param file_path: The path to the Task file that you want to get the content of. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_get_task_file_properties_request( + job_id=job_id, + task_id=task_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace + def list_task_files( + self, + job_id: str, + task_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchNodeFile"]: + """Lists the files in a Task's directory on its Compute Node. + + Lists the files in a Task's directory on its Compute Node. + + :param job_id: The ID of the Job that contains the Task. Required. + :type job_id: str + :param task_id: The ID of the Task whose files you want to list. Required. + :type task_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + Default value is None. + :paramtype filter: str + :keyword recursive: Whether to list children of the Task directory. This parameter can be used + in + combination with the filter parameter to list specific type of files. Default value is None. + :paramtype recursive: bool + :return: An iterator like instance of BatchNodeFile + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNodeFile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_task_files_request( + job_id=job_id, + task_id=task_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def create_node_user( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + user: _models.BatchNodeUserCreateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Adds a user Account to the specified Compute Node. + + You can add a user Account to a Compute Node only when it is in the idle or + running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the machine on which you want to create a user Account. Required. + :type node_id: str + :param user: The options to use for creating the user. Required. + :type user: ~azure.batch.models.BatchNodeUserCreateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(user, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_create_node_user_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def delete_node_user( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + user_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Deletes a user Account from the specified Compute Node. + + You can delete a user Account to a Compute Node only when it is in the idle or + running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the machine on which you want to delete a user Account. Required. + :type node_id: str + :param user_name: The name of the user Account to delete. Required. + :type user_name: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_node_user_request( + pool_id=pool_id, + node_id=node_id, + user_name=user_name, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def replace_node_user( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + user_name: str, + content: _models.BatchNodeUserUpdateContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Updates the password and expiration time of a user Account on the specified Compute Node. + + This operation replaces of all the updatable properties of the Account. For + example, if the expiryTime element is not specified, the current value is + replaced with the default value, not left unmodified. You can update a user + Account on a Compute Node only when it is in the idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the machine on which you want to update a user Account. Required. + :type node_id: str + :param user_name: The name of the user Account to update. Required. + :type user_name: str + :param content: The options to use for updating the user. Required. + :type content: ~azure.batch.models.BatchNodeUserUpdateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_replace_node_user_request( + pool_id=pool_id, + node_id=node_id, + user_name=user_name, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_node( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> _models.BatchNode: + """Gets information about the specified Compute Node. + + Gets information about the specified Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to get information about. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: BatchNode. The BatchNode is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchNode + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchNode] = kwargs.pop("cls", None) + + _request = build_batch_get_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchNode, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def reboot_node( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeRebootContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Restarts the specified Compute Node. + + You can restart a Compute Node only if it is in an idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :param parameters: The options to use for rebooting the Compute Node. Default value is None. + :type parameters: ~azure.batch.models.BatchNodeRebootContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_reboot_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def disable_node_scheduling( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeDisableSchedulingContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Disables Task scheduling on the specified Compute Node. + + You can disable Task scheduling on a Compute Node only if its current + scheduling state is enabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to disable Task scheduling. + Required. + :type node_id: str + :param parameters: The options to use for disabling scheduling on the Compute Node. Default + value is None. + :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_disable_node_scheduling_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def enable_node_scheduling( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Enables Task scheduling on the specified Compute Node. + + You can enable Task scheduling on a Compute Node only if its current scheduling + state is disabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to enable Task scheduling. + Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_enable_node_scheduling_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_node_remote_login_settings( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.BatchNodeRemoteLoginSettings: + """Gets the settings required for remote login to a Compute Node. + + Before you can remotely login to a Compute Node using the remote login + settings, you must create a user Account on the Compute Node. This API can be + invoked only on Pools created with the virtual machine configuration property. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node for which to obtain the remote login settings. + Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: BatchNodeRemoteLoginSettings. The BatchNodeRemoteLoginSettings is compatible with + MutableMapping + :rtype: ~azure.batch.models.BatchNodeRemoteLoginSettings + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchNodeRemoteLoginSettings] = kwargs.pop("cls", None) + + _request = build_batch_get_node_remote_login_settings_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchNodeRemoteLoginSettings, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def upload_node_logs( + self, + pool_id: str, + node_id: str, + content: _models.UploadBatchServiceLogsContent, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> _models.UploadBatchServiceLogsResult: + """Upload Azure Batch service log files from the specified Compute Node to Azure + Blob Storage. + + This is for gathering Azure Batch service log files in an automated fashion + from Compute Nodes if you are experiencing an error and wish to escalate to + Azure support. The Azure Batch service log files should be shared with Azure + support to aid in debugging issues with the Batch service. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node for which you want to get the Remote Desktop + Protocol file. Required. + :type node_id: str + :param content: The Azure Batch service log files upload options. Required. + :type content: ~azure.batch.models.UploadBatchServiceLogsContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: UploadBatchServiceLogsResult. The UploadBatchServiceLogsResult is compatible with + MutableMapping + :rtype: ~azure.batch.models.UploadBatchServiceLogsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[_models.UploadBatchServiceLogsResult] = kwargs.pop("cls", None) + + _content = json.dumps(content, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_batch_upload_node_logs_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.UploadBatchServiceLogsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_nodes( + self, + pool_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchNode"]: + """Lists the Compute Nodes in the specified Pool. + + Lists the Compute Nodes in the specified Pool. + + :param pool_id: The ID of the Pool from which you want to list Compute Nodes. Required. + :type pool_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + Default value is None. + :paramtype filter: str + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchNode + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNode] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNode]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_nodes_request( + pool_id=pool_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNode], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get_node_extension( + self, + pool_id: str, + node_id: str, + extension_name: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> _models.BatchNodeVMExtension: + """Gets information about the specified Compute Node Extension. + + Gets information about the specified Compute Node Extension. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that contains the extensions. Required. + :type node_id: str + :param extension_name: The name of the Compute Node Extension that you want to get information + about. Required. + :type extension_name: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: BatchNodeVMExtension. The BatchNodeVMExtension is compatible with MutableMapping + :rtype: ~azure.batch.models.BatchNodeVMExtension + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BatchNodeVMExtension] = kwargs.pop("cls", None) + + _request = build_batch_get_node_extension_request( + pool_id=pool_id, + node_id=node_id, + extension_name=extension_name, + timeout=timeout, + ocpdate=ocpdate, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BatchNodeVMExtension, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_node_extensions( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + select: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchNodeVMExtension"]: + """Lists the Compute Nodes Extensions in the specified Pool. + + Lists the Compute Nodes Extensions in the specified Pool. + + :param pool_id: The ID of the Pool that contains Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to list extensions. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword select: An OData $select clause. Default value is None. + :paramtype select: list[str] + :return: An iterator like instance of BatchNodeVMExtension + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNodeVMExtension] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNodeVMExtension]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_node_extensions_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + select=select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNodeVMExtension], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete_node_file( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Deletes the specified file from the Compute Node. + + Deletes the specified file from the Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node. Required. + :type node_id: str + :param file_path: The path to the file or directory. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword recursive: Whether to delete children of a directory. If the filePath parameter + represents + a directory instead of a file, you can set recursive to true to delete the + directory and all of the files and subdirectories in it. If recursive is false + then the directory must be empty or deletion will fail. Default value is None. + :paramtype recursive: bool + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_delete_node_file_request( + pool_id=pool_id, + node_id=node_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def get_node_file( + self, + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + ocp_range: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """Returns the content of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node. Required. + :type node_id: str + :param file_path: The path to the file or directory. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. + The + format is bytes=startRange-endRange. Default value is None. + :paramtype ocp_range: str + :return: AsyncIterator[bytes] + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_batch_get_node_file_request( + pool_id=pool_id, + node_id=node_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + ocp_range=ocp_range, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_node_file_properties( + self, + pool_id: str, + node_id: str, + file_path: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> bool: + """Gets the properties of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node. Required. + :type node_id: str + :param file_path: The path to the file or directory. Required. + :type file_path: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword if_modified_since: A timestamp indicating the last modified time of the resource known + to the + client. The operation will be performed only if the resource on the service has + been modified since the specified time. Default value is None. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource + known to the + client. The operation will be performed only if the resource on the service has + not been modified since the specified time. Default value is None. + :paramtype if_unmodified_since: ~datetime.datetime + :return: bool + :rtype: bool + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_get_node_file_properties_request( + pool_id=pool_id, + node_id=node_id, + file_path=file_path, + timeout=timeout, + ocpdate=ocpdate, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["ocp-batch-file-isdirectory"] = self._deserialize( + "bool", response.headers.get("ocp-batch-file-isdirectory") + ) + response_headers["ocp-batch-file-mode"] = self._deserialize("str", response.headers.get("ocp-batch-file-mode")) + response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) + response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + return 200 <= response.status_code <= 299 + + @distributed_trace + def list_node_files( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + max_results: Optional[int] = None, + filter: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> AsyncIterable["_models.BatchNodeFile"]: + """Lists all of the files in Task directories on the specified Compute Node. + + Lists all of the files in Task directories on the specified Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node whose files you want to list. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 + applications can be returned. Default value is None. + :paramtype max_results: int + :keyword filter: An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + Default value is None. + :paramtype filter: str + :keyword recursive: Whether to list children of a directory. Default value is None. + :paramtype recursive: bool + :return: An iterator like instance of BatchNodeFile + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNodeFile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_batch_list_node_files_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + max_results=max_results, + filter=filter, + recursive=recursive, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BatchNodeFile], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("odata.nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/batch/azure-batch/azure/batch/aio/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py b/sdk/batch/azure-batch/azure/batch/aio/_vendor.py new file mode 100644 index 000000000000..9a400859182e --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/aio/_vendor.py @@ -0,0 +1,58 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Optional, TYPE_CHECKING + +from azure.core import MatchConditions + +from ._configuration import BatchClientConfiguration + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core import AsyncPipelineClient + + from .._serialization import Deserializer, Serializer + + +class BatchClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: BatchClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +def quote_etag(etag: Optional[str]) -> Optional[str]: + if not etag or etag == "*": + return etag + if etag.startswith("W/"): + return etag + if etag.startswith('"') and etag.endswith('"'): + return etag + if etag.startswith("'") and etag.endswith("'"): + return etag + return '"' + etag + '"' + + +def prep_if_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfNotModified: + if_match = quote_etag(etag) if etag else None + return if_match + if match_condition == MatchConditions.IfPresent: + return "*" + return None + + +def prep_if_none_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfModified: + if_none_match = quote_etag(etag) if etag else None + return if_none_match + if match_condition == MatchConditions.IfMissing: + return "*" + return None diff --git a/sdk/batch/azure-batch/azure/batch/batch_auth.py b/sdk/batch/azure-batch/azure/batch/batch_auth.py deleted file mode 100644 index 928a3c15f846..000000000000 --- a/sdk/batch/azure-batch/azure/batch/batch_auth.py +++ /dev/null @@ -1,125 +0,0 @@ -# coding=utf-8 -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -import time -import ast -import base64 -import hmac -import hashlib -import datetime - -import requests -from requests.auth import AuthBase -from msrest.authentication import Authentication -from msrest import Serializer -from msrest.serialization import TZ_UTC - -try: - from urlparse import urlparse, parse_qs - -except ImportError: - from urllib.parse import urlparse, parse_qs - -class SharedKeyAuth(AuthBase): - - headers_to_sign = [ - 'content-encoding', - 'content-language', - 'content-length', - 'content-md5', - 'content-type', - 'date', - 'if-modified-since', - 'if-match', - 'if-none-match', - 'if-unmodified-since', - 'range'] - - def __init__(self, header, account_name, key): - self._header = header - self._account_name = account_name - self._key = key - - def __call__(self, request): - - if not request.headers.get('ocp-date'): - now = datetime.datetime.utcnow() - now = now.replace(tzinfo=TZ_UTC) - request.headers['ocp-date'] = Serializer.serialize_rfc(now) - - url = urlparse(request.url) - uri_path = url.path - - # method to sign - string_to_sign = request.method + '\n' - - # get headers to sign - request_header_dict = { - key.lower(): val for key, val in request.headers.items() if val} - - request_headers = [ - str(request_header_dict.get(x, '')) for x in self.headers_to_sign] - - string_to_sign += '\n'.join(request_headers) + '\n' - - # get ocp- header to sign - ocp_headers = [] - for name, value in request.headers.items(): - if 'ocp-' in name and value: - ocp_headers.append((name.lower(), value)) - - for name, value in sorted(ocp_headers): - string_to_sign += "{}:{}\n".format(name, value) - - # get account_name and uri path to sign - string_to_sign += "/{}{}".format(self._account_name, uri_path) - - # get query string to sign if it is not table service - query_to_sign = parse_qs(url.query) - - for name in sorted(query_to_sign.keys()): - value = query_to_sign[name][0] - if value: - string_to_sign += "\n{}:{}".format(name, value) - - # sign the request - auth_string = "SharedKey {}:{}".format( - self._account_name, self._sign_string(string_to_sign)) - - request.headers[self._header] = auth_string - - return request - - def _sign_string(self, string_to_sign): - - _key = self._key.encode('utf-8') - string_to_sign = string_to_sign.encode('utf-8') - - try: - key = base64.b64decode(_key) - except TypeError: - raise ValueError("Invalid key value: {}".format(self._key)) - - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - - return base64.b64encode(digest).decode('utf-8') - - -class SharedKeyCredentials(Authentication): - - def __init__(self, account_name, key): - super(SharedKeyCredentials, self).__init__() - self.auth = SharedKeyAuth(self.header, account_name, key) - - def signed_session(self, session=None): - - session = super(SharedKeyCredentials, self).signed_session(session=session) - session.auth = self.auth - - return session - diff --git a/sdk/batch/azure-batch/azure/batch/custom/__init__.py b/sdk/batch/azure-batch/azure/batch/custom/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/batch/azure-batch/azure/batch/custom/custom_errors.py b/sdk/batch/azure-batch/azure/batch/custom/custom_errors.py deleted file mode 100644 index f32e9cd3e7d5..000000000000 --- a/sdk/batch/azure-batch/azure/batch/custom/custom_errors.py +++ /dev/null @@ -1,42 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - - -class CreateTasksErrorException(Exception): - """ Aggregate Exception containing details for any failures from a task add operation. - - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_tasks: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination - """ - def __init__(self, pending_tasks=None, failure_tasks=None, errors=None): - self.pending_tasks = list(pending_tasks) - self.failure_tasks = list(failure_tasks) - self.errors = list(errors) - if failure_tasks and errors: - self.message = \ - "Multiple errors encountered. Check the `failure_tasks` and " \ - "`errors` properties for additional details." - elif errors: - if len(errors) > 1: - self.message = \ - "Multiple errors occurred when submitting add_collection " \ - "requests. Check the `errors` property for the inner " \ - "exceptions." - else: - self.message = str(errors[0]) - elif failure_tasks: - if len(failure_tasks) > 1: - self.message = \ - "Multiple client side errors occurred when adding the " \ - "tasks. Check the `failure_tasks` property for details on" \ - " these tasks." - else: - result = failure_tasks[0] - self.message = \ - "Task with id `%s` failed due to client error - %s::%s" % \ - (result.task_id, result.error.code, result.error.message) - super(CreateTasksErrorException, self).__init__(self.message) diff --git a/sdk/batch/azure-batch/azure/batch/custom/patch.py b/sdk/batch/azure-batch/azure/batch/custom/patch.py deleted file mode 100644 index 44e2916865be..000000000000 --- a/sdk/batch/azure-batch/azure/batch/custom/patch.py +++ /dev/null @@ -1,311 +0,0 @@ -import collections -import importlib -import logging -import threading -import types -import sys - -from ..models import BatchErrorException, TaskAddCollectionResult, TaskAddStatus -from ..custom.custom_errors import CreateTasksErrorException -from ..operations._task_operations import TaskOperations - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -class _TaskWorkflowManager(object): - """Worker class for one add_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_add_collection_options: Additional parameters for the - operation - :type task_add_collection_options: :class:`TaskAddCollectionOptions - ` - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - """ - - def __init__( - self, - client, - original_add_collection, - job_id, - tasks_to_add, - task_add_collection_options=None, - custom_headers=None, - raw=False, - **kwargs): - # Append operations thread safe - Only read once all threads have completed - # List of tasks which failed to add due to a returned client error - self.failure_tasks = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(tasks_to_add) - - self._error_lock = threading.Lock() - self._max_tasks_lock = threading.Lock() - self._pending_queue_lock = threading.Lock() - - # Variables to be used for task add_collection requests - self._client = client - self._original_add_collection = original_add_collection - self._job_id = job_id - self._task_add_collection_options = task_add_collection_options - self._custom_headers = custom_headers - self._raw = raw - self._kwargs = dict(**kwargs) - - def _bulk_add_tasks(self, results_queue, chunk_tasks_to_add): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~TrackedCloudTask] - """ - - try: - add_collection_response = self._original_add_collection( - self._client, - self._job_id, - chunk_tasks_to_add, - self._task_add_collection_options, - self._custom_headers, - self._raw) - except BatchErrorException as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error("Failed to add task with ID %s due to the body" - " exceeding the maximum request size", failed_task.id) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - # Restrict one thread at a time to do this compare and set, - # therefore forcing max_tasks_per_request to be strictly decreasing - with self._max_tasks_lock: - if midpoint < self._max_tasks_per_request: - self._max_tasks_per_request = midpoint - _LOGGER.info("Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", str(self._max_tasks_per_request), - str(midpoint)) - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another thread . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - try: - add_collection_response = add_collection_response.output - except AttributeError: - pass - - for task_result in add_collection_response.value: # pylint: disable=no-member - if task_result.status == TaskAddStatus.server_error: - # Server error will be retried - with self._pending_queue_lock: - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif (task_result.status == TaskAddStatus.client_error - and not task_result.error.code == "TaskExists"): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - def task_collection_thread_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - with self._pending_queue_lock: - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - self._bulk_add_tasks(results_queue, chunk_tasks_to_add) - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted add_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results - - -def build_new_add_collection(original_add_collection): - def bulk_add_collection( - self, - job_id, - value, - task_add_collection_options=None, - custom_headers=None, - raw=False, - threads=0, - **operation_config): - """Adds a collection of tasks to the specified job. - - Note that each task must have a unique ID. The Batch service may not - return the results for each task in the same order the tasks were - submitted in this request. If the server times out or the connection is - closed during the request, the request may have been partially or fully - processed, or not at all. In such cases, the user should re-issue the - request. Note that it is up to the user to correctly handle failures - when re-issuing a request. For example, you should use the same task - IDs during a retry so that if the prior operation succeeded, the retry - will not create extra tasks unexpectedly. If the response contains any - tasks which failed to add, a client can retry the request. In a retry, - it is most efficient to resubmit only tasks that failed to add, and to - omit tasks that were successfully added on the first attempt. The - maximum lifetime of a task from addition to completion is 180 days. - If a task has not completed within 180 days of being added it will be - terminated by the Batch service and left in whatever state it was in at - that time. - - :param job_id: The ID of the job to which the task collection is to be - added. - :type job_id: str - :param value: The collection of tasks to add. The total serialized - size of this collection must be less than 4MB. If it is greater than - 4MB (for example if each task has 100's of resource files or - environment variables), the request will fail with code - 'RequestBodyTooLarge' and should be retried again with fewer tasks. - :type value: list of :class:`TaskAddParameter - ` - :param task_add_collection_options: Additional parameters for the - operation - :type task_add_collection_options: :class:`TaskAddCollectionOptions - ` - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param int threads: number of threads to use in parallel when adding tasks. If specified - and greater than 0, will start additional threads to submit requests and wait for them to finish. - Otherwise will submit add_collection requests sequentially on main thread - :return: :class:`TaskAddCollectionResult - ` or - :class:`ClientRawResponse` if - raw=true - :rtype: :class:`TaskAddCollectionResult - ` or - :class:`ClientRawResponse` - :raises: - :class:`CreateTasksErrorException` - """ - - results_queue = collections.deque() # deque operations(append/pop) are thread-safe - task_workflow_manager = _TaskWorkflowManager( - self, - original_add_collection, - job_id, - value, - task_add_collection_options, - custom_headers, - raw, - **operation_config) - - # multi-threaded behavior - if threads: - if threads < 0: - raise ValueError("Threads must be positive or 0") - - active_threads = [] - for i in range(threads): - active_threads.append(threading.Thread( - target=task_workflow_manager.task_collection_thread_handler, - args=(results_queue,))) - active_threads[-1].start() - for thread in active_threads: - thread.join() - # single-threaded behavior - else: - task_workflow_manager.task_collection_thread_handler(results_queue) - - # Only define error if all threads have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise CreateTasksErrorException( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors) - else: - submitted_tasks = _handle_output(results_queue) - return TaskAddCollectionResult(value=submitted_tasks) - bulk_add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} - return bulk_add_collection - - -def batch_error_exception_string(self): - ret = "Request encountered an exception.\nCode: {}\nMessage: {}\n".format( - self.error.code, - self.error.message) - if self.error.values: - for error_detail in self.error.values: - ret += "{}: {}\n".format(error_detail.key, error_detail.value) - return ret - - -def patch_client(): - try: - models = sys.modules['azure.batch.models'] - except KeyError: - models = importlib.import_module('azure.batch.models') - setattr(models, 'CreateTasksErrorException', CreateTasksErrorException) - sys.modules['azure.batch.models'] = models - - operations_modules = importlib.import_module('azure.batch.operations') - operations_modules.TaskOperations.add_collection = build_new_add_collection(operations_modules.TaskOperations.add_collection) - models = importlib.import_module('azure.batch.models') - models.BatchErrorException.__str__ = batch_error_exception_string \ No newline at end of file diff --git a/sdk/batch/azure-batch/azure/batch/models/__init__.py b/sdk/batch/azure-batch/azure/batch/models/__init__.py index c2d879d2efea..cfd9de7019c1 100644 --- a/sdk/batch/azure-batch/azure/batch/models/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/models/__init__.py @@ -1,832 +1,395 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -try: - from ._models_py3 import AccountListPoolNodeCountsOptions - from ._models_py3 import AccountListSupportedImagesOptions - from ._models_py3 import AffinityInformation - from ._models_py3 import ApplicationGetOptions - from ._models_py3 import ApplicationListOptions - from ._models_py3 import ApplicationPackageReference - from ._models_py3 import ApplicationSummary - from ._models_py3 import AuthenticationTokenSettings - from ._models_py3 import AutomaticOSUpgradePolicy - from ._models_py3 import AutoPoolSpecification - from ._models_py3 import AutoScaleRun - from ._models_py3 import AutoScaleRunError - from ._models_py3 import AutoUserSpecification - from ._models_py3 import AzureBlobFileSystemConfiguration - from ._models_py3 import AzureFileShareConfiguration - from ._models_py3 import BatchError, BatchErrorException - from ._models_py3 import BatchErrorDetail - from ._models_py3 import BatchPoolIdentity - from ._models_py3 import Certificate - from ._models_py3 import CertificateAddOptions - from ._models_py3 import CertificateAddParameter - from ._models_py3 import CertificateCancelDeletionOptions - from ._models_py3 import CertificateDeleteOptions - from ._models_py3 import CertificateGetOptions - from ._models_py3 import CertificateListOptions - from ._models_py3 import CertificateReference - from ._models_py3 import CIFSMountConfiguration - from ._models_py3 import CloudJob - from ._models_py3 import CloudJobSchedule - from ._models_py3 import CloudPool - from ._models_py3 import CloudServiceConfiguration - from ._models_py3 import CloudTask - from ._models_py3 import CloudTaskListSubtasksResult - from ._models_py3 import ComputeNode - from ._models_py3 import ComputeNodeAddUserOptions - from ._models_py3 import ComputeNodeDeleteUserOptions - from ._models_py3 import ComputeNodeDisableSchedulingOptions - from ._models_py3 import ComputeNodeEnableSchedulingOptions - from ._models_py3 import ComputeNodeEndpointConfiguration - from ._models_py3 import ComputeNodeError - from ._models_py3 import ComputeNodeExtensionGetOptions - from ._models_py3 import ComputeNodeExtensionListOptions - from ._models_py3 import ComputeNodeGetOptions - from ._models_py3 import ComputeNodeGetRemoteDesktopOptions - from ._models_py3 import ComputeNodeGetRemoteLoginSettingsOptions - from ._models_py3 import ComputeNodeGetRemoteLoginSettingsResult - from ._models_py3 import ComputeNodeIdentityReference - from ._models_py3 import ComputeNodeInformation - from ._models_py3 import ComputeNodeListOptions - from ._models_py3 import ComputeNodeRebootOptions - from ._models_py3 import ComputeNodeReimageOptions - from ._models_py3 import ComputeNodeUpdateUserOptions - from ._models_py3 import ComputeNodeUploadBatchServiceLogsOptions - from ._models_py3 import ComputeNodeUser - from ._models_py3 import ContainerConfiguration - from ._models_py3 import ContainerRegistry - from ._models_py3 import DataDisk - from ._models_py3 import DeleteCertificateError - from ._models_py3 import DiffDiskSettings - from ._models_py3 import DiskEncryptionConfiguration - from ._models_py3 import EnvironmentSetting - from ._models_py3 import ErrorMessage - from ._models_py3 import ExitCodeMapping - from ._models_py3 import ExitCodeRangeMapping - from ._models_py3 import ExitConditions - from ._models_py3 import ExitOptions - from ._models_py3 import FileDeleteFromComputeNodeOptions - from ._models_py3 import FileDeleteFromTaskOptions - from ._models_py3 import FileGetFromComputeNodeOptions - from ._models_py3 import FileGetFromTaskOptions - from ._models_py3 import FileGetPropertiesFromComputeNodeOptions - from ._models_py3 import FileGetPropertiesFromTaskOptions - from ._models_py3 import FileListFromComputeNodeOptions - from ._models_py3 import FileListFromTaskOptions - from ._models_py3 import FileProperties - from ._models_py3 import HttpHeader - from ._models_py3 import ImageInformation - from ._models_py3 import ImageReference - from ._models_py3 import InboundEndpoint - from ._models_py3 import InboundNATPool - from ._models_py3 import InstanceViewStatus - from ._models_py3 import JobAddOptions - from ._models_py3 import JobAddParameter - from ._models_py3 import JobConstraints - from ._models_py3 import JobDeleteOptions - from ._models_py3 import JobDisableOptions - from ._models_py3 import JobDisableParameter - from ._models_py3 import JobEnableOptions - from ._models_py3 import JobExecutionInformation - from ._models_py3 import JobGetOptions - from ._models_py3 import JobGetTaskCountsOptions - from ._models_py3 import JobListFromJobScheduleOptions - from ._models_py3 import JobListOptions - from ._models_py3 import JobListPreparationAndReleaseTaskStatusOptions - from ._models_py3 import JobManagerTask - from ._models_py3 import JobNetworkConfiguration - from ._models_py3 import JobPatchOptions - from ._models_py3 import JobPatchParameter - from ._models_py3 import JobPreparationAndReleaseTaskExecutionInformation - from ._models_py3 import JobPreparationTask - from ._models_py3 import JobPreparationTaskExecutionInformation - from ._models_py3 import JobReleaseTask - from ._models_py3 import JobReleaseTaskExecutionInformation - from ._models_py3 import JobScheduleAddOptions - from ._models_py3 import JobScheduleAddParameter - from ._models_py3 import JobScheduleDeleteOptions - from ._models_py3 import JobScheduleDisableOptions - from ._models_py3 import JobScheduleEnableOptions - from ._models_py3 import JobScheduleExecutionInformation - from ._models_py3 import JobScheduleExistsOptions - from ._models_py3 import JobScheduleGetOptions - from ._models_py3 import JobScheduleListOptions - from ._models_py3 import JobSchedulePatchOptions - from ._models_py3 import JobSchedulePatchParameter - from ._models_py3 import JobScheduleStatistics - from ._models_py3 import JobScheduleTerminateOptions - from ._models_py3 import JobScheduleUpdateOptions - from ._models_py3 import JobScheduleUpdateParameter - from ._models_py3 import JobSchedulingError - from ._models_py3 import JobSpecification - from ._models_py3 import JobStatistics - from ._models_py3 import JobTerminateOptions - from ._models_py3 import JobTerminateParameter - from ._models_py3 import JobUpdateOptions - from ._models_py3 import JobUpdateParameter - from ._models_py3 import LinuxUserConfiguration - from ._models_py3 import ManagedDisk - from ._models_py3 import MetadataItem - from ._models_py3 import MountConfiguration - from ._models_py3 import MultiInstanceSettings - from ._models_py3 import NameValuePair - from ._models_py3 import NetworkConfiguration - from ._models_py3 import NetworkSecurityGroupRule - from ._models_py3 import NFSMountConfiguration - from ._models_py3 import NodeAgentInformation - from ._models_py3 import NodeCounts - from ._models_py3 import NodeDisableSchedulingParameter - from ._models_py3 import NodeFile - from ._models_py3 import NodePlacementConfiguration - from ._models_py3 import NodeRebootParameter - from ._models_py3 import NodeReimageParameter - from ._models_py3 import NodeRemoveParameter - from ._models_py3 import NodeUpdateUserParameter - from ._models_py3 import NodeVMExtension - from ._models_py3 import OSDisk - from ._models_py3 import OutputFile - from ._models_py3 import OutputFileBlobContainerDestination - from ._models_py3 import OutputFileDestination - from ._models_py3 import OutputFileUploadOptions - from ._models_py3 import PoolAddOptions - from ._models_py3 import PoolAddParameter - from ._models_py3 import PoolDeleteOptions - from ._models_py3 import PoolDisableAutoScaleOptions - from ._models_py3 import PoolEnableAutoScaleOptions - from ._models_py3 import PoolEnableAutoScaleParameter - from ._models_py3 import PoolEndpointConfiguration - from ._models_py3 import PoolEvaluateAutoScaleOptions - from ._models_py3 import PoolEvaluateAutoScaleParameter - from ._models_py3 import PoolExistsOptions - from ._models_py3 import PoolGetOptions - from ._models_py3 import PoolInformation - from ._models_py3 import PoolListOptions - from ._models_py3 import PoolListUsageMetricsOptions - from ._models_py3 import PoolNodeCounts - from ._models_py3 import PoolPatchOptions - from ._models_py3 import PoolPatchParameter - from ._models_py3 import PoolRemoveNodesOptions - from ._models_py3 import PoolResizeOptions - from ._models_py3 import PoolResizeParameter - from ._models_py3 import PoolSpecification - from ._models_py3 import PoolStatistics - from ._models_py3 import PoolStopResizeOptions - from ._models_py3 import PoolUpdatePropertiesOptions - from ._models_py3 import PoolUpdatePropertiesParameter - from ._models_py3 import PoolUsageMetrics - from ._models_py3 import PublicIPAddressConfiguration - from ._models_py3 import RecentJob - from ._models_py3 import ResizeError - from ._models_py3 import ResourceFile - from ._models_py3 import ResourceStatistics - from ._models_py3 import RollingUpgradePolicy - from ._models_py3 import Schedule - from ._models_py3 import SecurityProfile - from ._models_py3 import ServiceArtifactReference - from ._models_py3 import StartTask - from ._models_py3 import StartTaskInformation - from ._models_py3 import SubtaskInformation - from ._models_py3 import TaskAddCollectionOptions - from ._models_py3 import TaskAddCollectionParameter - from ._models_py3 import TaskAddCollectionResult - from ._models_py3 import TaskAddOptions - from ._models_py3 import TaskAddParameter - from ._models_py3 import TaskAddResult - from ._models_py3 import TaskConstraints - from ._models_py3 import TaskContainerExecutionInformation - from ._models_py3 import TaskContainerSettings - from ._models_py3 import TaskCounts - from ._models_py3 import TaskCountsResult - from ._models_py3 import TaskDeleteOptions - from ._models_py3 import TaskDependencies - from ._models_py3 import TaskExecutionInformation - from ._models_py3 import TaskFailureInformation - from ._models_py3 import TaskGetOptions - from ._models_py3 import TaskIdRange - from ._models_py3 import TaskInformation - from ._models_py3 import TaskListOptions - from ._models_py3 import TaskListSubtasksOptions - from ._models_py3 import TaskReactivateOptions - from ._models_py3 import TaskSchedulingPolicy - from ._models_py3 import TaskSlotCounts - from ._models_py3 import TaskStatistics - from ._models_py3 import TaskTerminateOptions - from ._models_py3 import TaskUpdateOptions - from ._models_py3 import TaskUpdateParameter - from ._models_py3 import UefiSettings - from ._models_py3 import UpgradePolicy - from ._models_py3 import UploadBatchServiceLogsConfiguration - from ._models_py3 import UploadBatchServiceLogsResult - from ._models_py3 import UsageStatistics - from ._models_py3 import UserAccount - from ._models_py3 import UserAssignedIdentity - from ._models_py3 import UserIdentity - from ._models_py3 import VirtualMachineConfiguration - from ._models_py3 import VirtualMachineInfo - from ._models_py3 import VMExtension - from ._models_py3 import VMExtensionInstanceView - from ._models_py3 import WindowsConfiguration - from ._models_py3 import WindowsUserConfiguration -except (SyntaxError, ImportError): - from ._models import AccountListPoolNodeCountsOptions - from ._models import AccountListSupportedImagesOptions - from ._models import AffinityInformation - from ._models import ApplicationGetOptions - from ._models import ApplicationListOptions - from ._models import ApplicationPackageReference - from ._models import ApplicationSummary - from ._models import AuthenticationTokenSettings - from ._models import AutomaticOSUpgradePolicy - from ._models import AutoPoolSpecification - from ._models import AutoScaleRun - from ._models import AutoScaleRunError - from ._models import AutoUserSpecification - from ._models import AzureBlobFileSystemConfiguration - from ._models import AzureFileShareConfiguration - from ._models import BatchError, BatchErrorException - from ._models import BatchErrorDetail - from ._models import BatchPoolIdentity - from ._models import Certificate - from ._models import CertificateAddOptions - from ._models import CertificateAddParameter - from ._models import CertificateCancelDeletionOptions - from ._models import CertificateDeleteOptions - from ._models import CertificateGetOptions - from ._models import CertificateListOptions - from ._models import CertificateReference - from ._models import CIFSMountConfiguration - from ._models import CloudJob - from ._models import CloudJobSchedule - from ._models import CloudPool - from ._models import CloudServiceConfiguration - from ._models import CloudTask - from ._models import CloudTaskListSubtasksResult - from ._models import ComputeNode - from ._models import ComputeNodeAddUserOptions - from ._models import ComputeNodeDeleteUserOptions - from ._models import ComputeNodeDisableSchedulingOptions - from ._models import ComputeNodeEnableSchedulingOptions - from ._models import ComputeNodeEndpointConfiguration - from ._models import ComputeNodeError - from ._models import ComputeNodeExtensionGetOptions - from ._models import ComputeNodeExtensionListOptions - from ._models import ComputeNodeGetOptions - from ._models import ComputeNodeGetRemoteDesktopOptions - from ._models import ComputeNodeGetRemoteLoginSettingsOptions - from ._models import ComputeNodeGetRemoteLoginSettingsResult - from ._models import ComputeNodeIdentityReference - from ._models import ComputeNodeInformation - from ._models import ComputeNodeListOptions - from ._models import ComputeNodeRebootOptions - from ._models import ComputeNodeReimageOptions - from ._models import ComputeNodeUpdateUserOptions - from ._models import ComputeNodeUploadBatchServiceLogsOptions - from ._models import ComputeNodeUser - from ._models import ContainerConfiguration - from ._models import ContainerRegistry - from ._models import DataDisk - from ._models import DeleteCertificateError - from ._models import DiffDiskSettings - from ._models import DiskEncryptionConfiguration - from ._models import EnvironmentSetting - from ._models import ErrorMessage - from ._models import ExitCodeMapping - from ._models import ExitCodeRangeMapping - from ._models import ExitConditions - from ._models import ExitOptions - from ._models import FileDeleteFromComputeNodeOptions - from ._models import FileDeleteFromTaskOptions - from ._models import FileGetFromComputeNodeOptions - from ._models import FileGetFromTaskOptions - from ._models import FileGetPropertiesFromComputeNodeOptions - from ._models import FileGetPropertiesFromTaskOptions - from ._models import FileListFromComputeNodeOptions - from ._models import FileListFromTaskOptions - from ._models import FileProperties - from ._models import HttpHeader - from ._models import ImageInformation - from ._models import ImageReference - from ._models import InboundEndpoint - from ._models import InboundNATPool - from ._models import InstanceViewStatus - from ._models import JobAddOptions - from ._models import JobAddParameter - from ._models import JobConstraints - from ._models import JobDeleteOptions - from ._models import JobDisableOptions - from ._models import JobDisableParameter - from ._models import JobEnableOptions - from ._models import JobExecutionInformation - from ._models import JobGetOptions - from ._models import JobGetTaskCountsOptions - from ._models import JobListFromJobScheduleOptions - from ._models import JobListOptions - from ._models import JobListPreparationAndReleaseTaskStatusOptions - from ._models import JobManagerTask - from ._models import JobNetworkConfiguration - from ._models import JobPatchOptions - from ._models import JobPatchParameter - from ._models import JobPreparationAndReleaseTaskExecutionInformation - from ._models import JobPreparationTask - from ._models import JobPreparationTaskExecutionInformation - from ._models import JobReleaseTask - from ._models import JobReleaseTaskExecutionInformation - from ._models import JobScheduleAddOptions - from ._models import JobScheduleAddParameter - from ._models import JobScheduleDeleteOptions - from ._models import JobScheduleDisableOptions - from ._models import JobScheduleEnableOptions - from ._models import JobScheduleExecutionInformation - from ._models import JobScheduleExistsOptions - from ._models import JobScheduleGetOptions - from ._models import JobScheduleListOptions - from ._models import JobSchedulePatchOptions - from ._models import JobSchedulePatchParameter - from ._models import JobScheduleStatistics - from ._models import JobScheduleTerminateOptions - from ._models import JobScheduleUpdateOptions - from ._models import JobScheduleUpdateParameter - from ._models import JobSchedulingError - from ._models import JobSpecification - from ._models import JobStatistics - from ._models import JobTerminateOptions - from ._models import JobTerminateParameter - from ._models import JobUpdateOptions - from ._models import JobUpdateParameter - from ._models import LinuxUserConfiguration - from ._models import ManagedDisk - from ._models import MetadataItem - from ._models import MountConfiguration - from ._models import MultiInstanceSettings - from ._models import NameValuePair - from ._models import NetworkConfiguration - from ._models import NetworkSecurityGroupRule - from ._models import NFSMountConfiguration - from ._models import NodeAgentInformation - from ._models import NodeCounts - from ._models import NodeDisableSchedulingParameter - from ._models import NodeFile - from ._models import NodePlacementConfiguration - from ._models import NodeRebootParameter - from ._models import NodeReimageParameter - from ._models import NodeRemoveParameter - from ._models import NodeUpdateUserParameter - from ._models import NodeVMExtension - from ._models import OSDisk - from ._models import OutputFile - from ._models import OutputFileBlobContainerDestination - from ._models import OutputFileDestination - from ._models import OutputFileUploadOptions - from ._models import PoolAddOptions - from ._models import PoolAddParameter - from ._models import PoolDeleteOptions - from ._models import PoolDisableAutoScaleOptions - from ._models import PoolEnableAutoScaleOptions - from ._models import PoolEnableAutoScaleParameter - from ._models import PoolEndpointConfiguration - from ._models import PoolEvaluateAutoScaleOptions - from ._models import PoolEvaluateAutoScaleParameter - from ._models import PoolExistsOptions - from ._models import PoolGetOptions - from ._models import PoolInformation - from ._models import PoolListOptions - from ._models import PoolListUsageMetricsOptions - from ._models import PoolNodeCounts - from ._models import PoolPatchOptions - from ._models import PoolPatchParameter - from ._models import PoolRemoveNodesOptions - from ._models import PoolResizeOptions - from ._models import PoolResizeParameter - from ._models import PoolSpecification - from ._models import PoolStatistics - from ._models import PoolStopResizeOptions - from ._models import PoolUpdatePropertiesOptions - from ._models import PoolUpdatePropertiesParameter - from ._models import PoolUsageMetrics - from ._models import PublicIPAddressConfiguration - from ._models import RecentJob - from ._models import ResizeError - from ._models import ResourceFile - from ._models import ResourceStatistics - from ._models import RollingUpgradePolicy - from ._models import Schedule - from ._models import SecurityProfile - from ._models import ServiceArtifactReference - from ._models import StartTask - from ._models import StartTaskInformation - from ._models import SubtaskInformation - from ._models import TaskAddCollectionOptions - from ._models import TaskAddCollectionParameter - from ._models import TaskAddCollectionResult - from ._models import TaskAddOptions - from ._models import TaskAddParameter - from ._models import TaskAddResult - from ._models import TaskConstraints - from ._models import TaskContainerExecutionInformation - from ._models import TaskContainerSettings - from ._models import TaskCounts - from ._models import TaskCountsResult - from ._models import TaskDeleteOptions - from ._models import TaskDependencies - from ._models import TaskExecutionInformation - from ._models import TaskFailureInformation - from ._models import TaskGetOptions - from ._models import TaskIdRange - from ._models import TaskInformation - from ._models import TaskListOptions - from ._models import TaskListSubtasksOptions - from ._models import TaskReactivateOptions - from ._models import TaskSchedulingPolicy - from ._models import TaskSlotCounts - from ._models import TaskStatistics - from ._models import TaskTerminateOptions - from ._models import TaskUpdateOptions - from ._models import TaskUpdateParameter - from ._models import UefiSettings - from ._models import UpgradePolicy - from ._models import UploadBatchServiceLogsConfiguration - from ._models import UploadBatchServiceLogsResult - from ._models import UsageStatistics - from ._models import UserAccount - from ._models import UserAssignedIdentity - from ._models import UserIdentity - from ._models import VirtualMachineConfiguration - from ._models import VirtualMachineInfo - from ._models import VMExtension - from ._models import VMExtensionInstanceView - from ._models import WindowsConfiguration - from ._models import WindowsUserConfiguration -from ._paged_models import ApplicationSummaryPaged -from ._paged_models import CertificatePaged -from ._paged_models import CloudJobPaged -from ._paged_models import CloudJobSchedulePaged -from ._paged_models import CloudPoolPaged -from ._paged_models import CloudTaskPaged -from ._paged_models import ComputeNodePaged -from ._paged_models import ImageInformationPaged -from ._paged_models import JobPreparationAndReleaseTaskExecutionInformationPaged -from ._paged_models import NodeFilePaged -from ._paged_models import NodeVMExtensionPaged -from ._paged_models import PoolNodeCountsPaged -from ._paged_models import PoolUsageMetricsPaged -from ._batch_service_client_enums import ( - OSType, - VerificationType, - AccessScope, - CertificateState, - CertificateFormat, - ContainerWorkingDirectory, - JobAction, - DependencyAction, - AutoUserScope, - ElevationLevel, - LoginMode, - OutputFileUploadCondition, - ComputeNodeFillType, - CertificateStoreLocation, - CertificateVisibility, - CachingType, - StorageAccountType, - ContainerType, - DiskEncryptionTarget, - NodePlacementPolicyType, - DiffDiskPlacement, - SecurityTypes, - DynamicVNetAssignmentScope, - InboundEndpointProtocol, - NetworkSecurityGroupRuleAccess, - IPAddressProvisioningType, - NodeCommunicationMode, - UpgradeMode, - PoolLifetimeOption, - OnAllTasksComplete, - OnTaskFailure, - JobScheduleState, - ErrorCategory, - JobState, - JobPreparationTaskState, - TaskExecutionResult, - JobReleaseTaskState, - StatusLevelTypes, - PoolState, - AllocationState, - PoolIdentityType, - TaskState, - TaskAddStatus, - SubtaskState, - StartTaskState, - ComputeNodeState, - SchedulingState, - DisableJobOption, - ComputeNodeDeallocationOption, - ComputeNodeRebootOption, - ComputeNodeReimageOption, - DisableComputeNodeSchedulingOption, -) +from ._models import AffinityInfo +from ._models import AuthenticationTokenSettings +from ._models import AutoScaleRun +from ._models import AutoScaleRunError +from ._models import AutoUserSpecification +from ._models import AutomaticOsUpgradePolicy +from ._models import AzureBlobFileSystemConfiguration +from ._models import AzureFileShareConfiguration +from ._models import BatchApplication +from ._models import BatchApplicationPackageReference +from ._models import BatchAutoPoolSpecification +from ._models import BatchError +from ._models import BatchErrorDetail +from ._models import BatchErrorMessage +from ._models import BatchJob +from ._models import BatchJobConstraints +from ._models import BatchJobCreateContent +from ._models import BatchJobDisableContent +from ._models import BatchJobExecutionInfo +from ._models import BatchJobManagerTask +from ._models import BatchJobNetworkConfiguration +from ._models import BatchJobPreparationAndReleaseTaskStatus +from ._models import BatchJobPreparationTask +from ._models import BatchJobPreparationTaskExecutionInfo +from ._models import BatchJobReleaseTask +from ._models import BatchJobReleaseTaskExecutionInfo +from ._models import BatchJobSchedule +from ._models import BatchJobScheduleConfiguration +from ._models import BatchJobScheduleCreateContent +from ._models import BatchJobScheduleExecutionInfo +from ._models import BatchJobScheduleStatistics +from ._models import BatchJobScheduleUpdateContent +from ._models import BatchJobSchedulingError +from ._models import BatchJobSpecification +from ._models import BatchJobStatistics +from ._models import BatchJobTerminateContent +from ._models import BatchJobUpdateContent +from ._models import BatchNode +from ._models import BatchNodeAgentInfo +from ._models import BatchNodeCounts +from ._models import BatchNodeDisableSchedulingContent +from ._models import BatchNodeEndpointConfiguration +from ._models import BatchNodeError +from ._models import BatchNodeFile +from ._models import BatchNodeIdentityReference +from ._models import BatchNodeInfo +from ._models import BatchNodePlacementConfiguration +from ._models import BatchNodeRebootContent +from ._models import BatchNodeRemoteLoginSettings +from ._models import BatchNodeRemoveContent +from ._models import BatchNodeUserCreateContent +from ._models import BatchNodeUserUpdateContent +from ._models import BatchNodeVMExtension +from ._models import BatchPool +from ._models import BatchPoolCreateContent +from ._models import BatchPoolEnableAutoScaleContent +from ._models import BatchPoolEndpointConfiguration +from ._models import BatchPoolEvaluateAutoScaleContent +from ._models import BatchPoolIdentity +from ._models import BatchPoolInfo +from ._models import BatchPoolNodeCounts +from ._models import BatchPoolReplaceContent +from ._models import BatchPoolResizeContent +from ._models import BatchPoolResourceStatistics +from ._models import BatchPoolSpecification +from ._models import BatchPoolStatistics +from ._models import BatchPoolUpdateContent +from ._models import BatchPoolUsageMetrics +from ._models import BatchPoolUsageStatistics +from ._models import BatchStartTask +from ._models import BatchStartTaskInfo +from ._models import BatchSubtask +from ._models import BatchSupportedImage +from ._models import BatchTask +from ._models import BatchTaskAddCollectionResult +from ._models import BatchTaskAddResult +from ._models import BatchTaskConstraints +from ._models import BatchTaskContainerExecutionInfo +from ._models import BatchTaskContainerSettings +from ._models import BatchTaskCounts +from ._models import BatchTaskCountsResult +from ._models import BatchTaskCreateContent +from ._models import BatchTaskDependencies +from ._models import BatchTaskExecutionInfo +from ._models import BatchTaskFailureInfo +from ._models import BatchTaskGroup +from ._models import BatchTaskIdRange +from ._models import BatchTaskInfo +from ._models import BatchTaskSchedulingPolicy +from ._models import BatchTaskSlotCounts +from ._models import BatchTaskStatistics +from ._models import CifsMountConfiguration +from ._models import ContainerConfiguration +from ._models import ContainerRegistryReference +from ._models import DataDisk +from ._models import DiffDiskSettings +from ._models import DiskEncryptionConfiguration +from ._models import EnvironmentSetting +from ._models import ExitCodeMapping +from ._models import ExitCodeRangeMapping +from ._models import ExitConditions +from ._models import ExitOptions +from ._models import FileProperties +from ._models import HttpHeader +from ._models import ImageReference +from ._models import InboundEndpoint +from ._models import InboundNatPool +from ._models import InstanceViewStatus +from ._models import LinuxUserConfiguration +from ._models import ManagedDisk +from ._models import MetadataItem +from ._models import MountConfiguration +from ._models import MultiInstanceSettings +from ._models import NameValuePair +from ._models import NetworkConfiguration +from ._models import NetworkSecurityGroupRule +from ._models import NfsMountConfiguration +from ._models import OSDisk +from ._models import OutputFile +from ._models import OutputFileBlobContainerDestination +from ._models import OutputFileDestination +from ._models import OutputFileUploadConfig +from ._models import PublicIpAddressConfiguration +from ._models import RecentBatchJob +from ._models import ResizeError +from ._models import ResourceFile +from ._models import RollingUpgradePolicy +from ._models import SecurityProfile +from ._models import ServiceArtifactReference +from ._models import UefiSettings +from ._models import UpgradePolicy +from ._models import UploadBatchServiceLogsContent +from ._models import UploadBatchServiceLogsResult +from ._models import UserAccount +from ._models import UserAssignedIdentity +from ._models import UserIdentity +from ._models import VMExtension +from ._models import VMExtensionInstanceView +from ._models import VirtualMachineConfiguration +from ._models import VirtualMachineInfo +from ._models import WindowsConfiguration +from ._models import WindowsUserConfiguration + +from ._enums import AccessScope +from ._enums import AllocationState +from ._enums import AutoUserScope +from ._enums import BatchJobAction +from ._enums import BatchJobPreparationTaskState +from ._enums import BatchJobReleaseTaskState +from ._enums import BatchJobScheduleState +from ._enums import BatchJobState +from ._enums import BatchNodeCommunicationMode +from ._enums import BatchNodeDeallocationOption +from ._enums import BatchNodeDisableSchedulingOption +from ._enums import BatchNodeFillType +from ._enums import BatchNodePlacementPolicyType +from ._enums import BatchNodeRebootOption +from ._enums import BatchNodeState +from ._enums import BatchPoolIdentityType +from ._enums import BatchPoolLifetimeOption +from ._enums import BatchPoolState +from ._enums import BatchStartTaskState +from ._enums import BatchSubtaskState +from ._enums import BatchTaskAddStatus +from ._enums import BatchTaskExecutionResult +from ._enums import BatchTaskState +from ._enums import CachingType +from ._enums import ContainerType +from ._enums import ContainerWorkingDirectory +from ._enums import DependencyAction +from ._enums import DiffDiskPlacement +from ._enums import DisableBatchJobOption +from ._enums import DiskEncryptionTarget +from ._enums import DynamicVNetAssignmentScope +from ._enums import ElevationLevel +from ._enums import ErrorCategory +from ._enums import ImageVerificationType +from ._enums import InboundEndpointProtocol +from ._enums import IpAddressProvisioningType +from ._enums import LoginMode +from ._enums import NetworkSecurityGroupRuleAccess +from ._enums import OSType +from ._enums import OnAllBatchTasksComplete +from ._enums import OnBatchTaskFailure +from ._enums import OutputFileUploadCondition +from ._enums import SchedulingState +from ._enums import SecurityTypes +from ._enums import StatusLevelTypes +from ._enums import StorageAccountType +from ._enums import UpgradeMode +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk __all__ = [ - 'AccountListPoolNodeCountsOptions', - 'AccountListSupportedImagesOptions', - 'AffinityInformation', - 'ApplicationGetOptions', - 'ApplicationListOptions', - 'ApplicationPackageReference', - 'ApplicationSummary', - 'AuthenticationTokenSettings', - 'AutomaticOSUpgradePolicy', - 'AutoPoolSpecification', - 'AutoScaleRun', - 'AutoScaleRunError', - 'AutoUserSpecification', - 'AzureBlobFileSystemConfiguration', - 'AzureFileShareConfiguration', - 'BatchError', 'BatchErrorException', - 'BatchErrorDetail', - 'BatchPoolIdentity', - 'Certificate', - 'CertificateAddOptions', - 'CertificateAddParameter', - 'CertificateCancelDeletionOptions', - 'CertificateDeleteOptions', - 'CertificateGetOptions', - 'CertificateListOptions', - 'CertificateReference', - 'CIFSMountConfiguration', - 'CloudJob', - 'CloudJobSchedule', - 'CloudPool', - 'CloudServiceConfiguration', - 'CloudTask', - 'CloudTaskListSubtasksResult', - 'ComputeNode', - 'ComputeNodeAddUserOptions', - 'ComputeNodeDeleteUserOptions', - 'ComputeNodeDisableSchedulingOptions', - 'ComputeNodeEnableSchedulingOptions', - 'ComputeNodeEndpointConfiguration', - 'ComputeNodeError', - 'ComputeNodeExtensionGetOptions', - 'ComputeNodeExtensionListOptions', - 'ComputeNodeGetOptions', - 'ComputeNodeGetRemoteDesktopOptions', - 'ComputeNodeGetRemoteLoginSettingsOptions', - 'ComputeNodeGetRemoteLoginSettingsResult', - 'ComputeNodeIdentityReference', - 'ComputeNodeInformation', - 'ComputeNodeListOptions', - 'ComputeNodeRebootOptions', - 'ComputeNodeReimageOptions', - 'ComputeNodeUpdateUserOptions', - 'ComputeNodeUploadBatchServiceLogsOptions', - 'ComputeNodeUser', - 'ContainerConfiguration', - 'ContainerRegistry', - 'DataDisk', - 'DeleteCertificateError', - 'DiffDiskSettings', - 'DiskEncryptionConfiguration', - 'EnvironmentSetting', - 'ErrorMessage', - 'ExitCodeMapping', - 'ExitCodeRangeMapping', - 'ExitConditions', - 'ExitOptions', - 'FileDeleteFromComputeNodeOptions', - 'FileDeleteFromTaskOptions', - 'FileGetFromComputeNodeOptions', - 'FileGetFromTaskOptions', - 'FileGetPropertiesFromComputeNodeOptions', - 'FileGetPropertiesFromTaskOptions', - 'FileListFromComputeNodeOptions', - 'FileListFromTaskOptions', - 'FileProperties', - 'HttpHeader', - 'ImageInformation', - 'ImageReference', - 'InboundEndpoint', - 'InboundNATPool', - 'InstanceViewStatus', - 'JobAddOptions', - 'JobAddParameter', - 'JobConstraints', - 'JobDeleteOptions', - 'JobDisableOptions', - 'JobDisableParameter', - 'JobEnableOptions', - 'JobExecutionInformation', - 'JobGetOptions', - 'JobGetTaskCountsOptions', - 'JobListFromJobScheduleOptions', - 'JobListOptions', - 'JobListPreparationAndReleaseTaskStatusOptions', - 'JobManagerTask', - 'JobNetworkConfiguration', - 'JobPatchOptions', - 'JobPatchParameter', - 'JobPreparationAndReleaseTaskExecutionInformation', - 'JobPreparationTask', - 'JobPreparationTaskExecutionInformation', - 'JobReleaseTask', - 'JobReleaseTaskExecutionInformation', - 'JobScheduleAddOptions', - 'JobScheduleAddParameter', - 'JobScheduleDeleteOptions', - 'JobScheduleDisableOptions', - 'JobScheduleEnableOptions', - 'JobScheduleExecutionInformation', - 'JobScheduleExistsOptions', - 'JobScheduleGetOptions', - 'JobScheduleListOptions', - 'JobSchedulePatchOptions', - 'JobSchedulePatchParameter', - 'JobScheduleStatistics', - 'JobScheduleTerminateOptions', - 'JobScheduleUpdateOptions', - 'JobScheduleUpdateParameter', - 'JobSchedulingError', - 'JobSpecification', - 'JobStatistics', - 'JobTerminateOptions', - 'JobTerminateParameter', - 'JobUpdateOptions', - 'JobUpdateParameter', - 'LinuxUserConfiguration', - 'ManagedDisk', - 'MetadataItem', - 'MountConfiguration', - 'MultiInstanceSettings', - 'NameValuePair', - 'NetworkConfiguration', - 'NetworkSecurityGroupRule', - 'NFSMountConfiguration', - 'NodeAgentInformation', - 'NodeCounts', - 'NodeDisableSchedulingParameter', - 'NodeFile', - 'NodePlacementConfiguration', - 'NodeRebootParameter', - 'NodeReimageParameter', - 'NodeRemoveParameter', - 'NodeUpdateUserParameter', - 'NodeVMExtension', - 'OSDisk', - 'OutputFile', - 'OutputFileBlobContainerDestination', - 'OutputFileDestination', - 'OutputFileUploadOptions', - 'PoolAddOptions', - 'PoolAddParameter', - 'PoolDeleteOptions', - 'PoolDisableAutoScaleOptions', - 'PoolEnableAutoScaleOptions', - 'PoolEnableAutoScaleParameter', - 'PoolEndpointConfiguration', - 'PoolEvaluateAutoScaleOptions', - 'PoolEvaluateAutoScaleParameter', - 'PoolExistsOptions', - 'PoolGetOptions', - 'PoolInformation', - 'PoolListOptions', - 'PoolListUsageMetricsOptions', - 'PoolNodeCounts', - 'PoolPatchOptions', - 'PoolPatchParameter', - 'PoolRemoveNodesOptions', - 'PoolResizeOptions', - 'PoolResizeParameter', - 'PoolSpecification', - 'PoolStatistics', - 'PoolStopResizeOptions', - 'PoolUpdatePropertiesOptions', - 'PoolUpdatePropertiesParameter', - 'PoolUsageMetrics', - 'PublicIPAddressConfiguration', - 'RecentJob', - 'ResizeError', - 'ResourceFile', - 'ResourceStatistics', - 'RollingUpgradePolicy', - 'Schedule', - 'SecurityProfile', - 'ServiceArtifactReference', - 'StartTask', - 'StartTaskInformation', - 'SubtaskInformation', - 'TaskAddCollectionOptions', - 'TaskAddCollectionParameter', - 'TaskAddCollectionResult', - 'TaskAddOptions', - 'TaskAddParameter', - 'TaskAddResult', - 'TaskConstraints', - 'TaskContainerExecutionInformation', - 'TaskContainerSettings', - 'TaskCounts', - 'TaskCountsResult', - 'TaskDeleteOptions', - 'TaskDependencies', - 'TaskExecutionInformation', - 'TaskFailureInformation', - 'TaskGetOptions', - 'TaskIdRange', - 'TaskInformation', - 'TaskListOptions', - 'TaskListSubtasksOptions', - 'TaskReactivateOptions', - 'TaskSchedulingPolicy', - 'TaskSlotCounts', - 'TaskStatistics', - 'TaskTerminateOptions', - 'TaskUpdateOptions', - 'TaskUpdateParameter', - 'UefiSettings', - 'UpgradePolicy', - 'UploadBatchServiceLogsConfiguration', - 'UploadBatchServiceLogsResult', - 'UsageStatistics', - 'UserAccount', - 'UserAssignedIdentity', - 'UserIdentity', - 'VirtualMachineConfiguration', - 'VirtualMachineInfo', - 'VMExtension', - 'VMExtensionInstanceView', - 'WindowsConfiguration', - 'WindowsUserConfiguration', - 'ApplicationSummaryPaged', - 'PoolUsageMetricsPaged', - 'CloudPoolPaged', - 'ImageInformationPaged', - 'PoolNodeCountsPaged', - 'CertificatePaged', - 'NodeFilePaged', - 'CloudJobSchedulePaged', - 'CloudJobPaged', - 'JobPreparationAndReleaseTaskExecutionInformationPaged', - 'CloudTaskPaged', - 'ComputeNodePaged', - 'NodeVMExtensionPaged', - 'OSType', - 'VerificationType', - 'AccessScope', - 'CertificateState', - 'CertificateFormat', - 'ContainerWorkingDirectory', - 'JobAction', - 'DependencyAction', - 'AutoUserScope', - 'ElevationLevel', - 'LoginMode', - 'OutputFileUploadCondition', - 'ComputeNodeFillType', - 'CertificateStoreLocation', - 'CertificateVisibility', - 'CachingType', - 'StorageAccountType', - 'ContainerType', - 'DiskEncryptionTarget', - 'NodePlacementPolicyType', - 'DiffDiskPlacement', - 'SecurityTypes', - 'DynamicVNetAssignmentScope', - 'InboundEndpointProtocol', - 'NetworkSecurityGroupRuleAccess', - 'IPAddressProvisioningType', - 'NodeCommunicationMode', - 'UpgradeMode', - 'PoolLifetimeOption', - 'OnAllTasksComplete', - 'OnTaskFailure', - 'JobScheduleState', - 'ErrorCategory', - 'JobState', - 'JobPreparationTaskState', - 'TaskExecutionResult', - 'JobReleaseTaskState', - 'StatusLevelTypes', - 'PoolState', - 'AllocationState', - 'PoolIdentityType', - 'TaskState', - 'TaskAddStatus', - 'SubtaskState', - 'StartTaskState', - 'ComputeNodeState', - 'SchedulingState', - 'DisableJobOption', - 'ComputeNodeDeallocationOption', - 'ComputeNodeRebootOption', - 'ComputeNodeReimageOption', - 'DisableComputeNodeSchedulingOption', + "AffinityInfo", + "AuthenticationTokenSettings", + "AutoScaleRun", + "AutoScaleRunError", + "AutoUserSpecification", + "AutomaticOsUpgradePolicy", + "AzureBlobFileSystemConfiguration", + "AzureFileShareConfiguration", + "BatchApplication", + "BatchApplicationPackageReference", + "BatchAutoPoolSpecification", + "BatchError", + "BatchErrorDetail", + "BatchErrorMessage", + "BatchJob", + "BatchJobConstraints", + "BatchJobCreateContent", + "BatchJobDisableContent", + "BatchJobExecutionInfo", + "BatchJobManagerTask", + "BatchJobNetworkConfiguration", + "BatchJobPreparationAndReleaseTaskStatus", + "BatchJobPreparationTask", + "BatchJobPreparationTaskExecutionInfo", + "BatchJobReleaseTask", + "BatchJobReleaseTaskExecutionInfo", + "BatchJobSchedule", + "BatchJobScheduleConfiguration", + "BatchJobScheduleCreateContent", + "BatchJobScheduleExecutionInfo", + "BatchJobScheduleStatistics", + "BatchJobScheduleUpdateContent", + "BatchJobSchedulingError", + "BatchJobSpecification", + "BatchJobStatistics", + "BatchJobTerminateContent", + "BatchJobUpdateContent", + "BatchNode", + "BatchNodeAgentInfo", + "BatchNodeCounts", + "BatchNodeDisableSchedulingContent", + "BatchNodeEndpointConfiguration", + "BatchNodeError", + "BatchNodeFile", + "BatchNodeIdentityReference", + "BatchNodeInfo", + "BatchNodePlacementConfiguration", + "BatchNodeRebootContent", + "BatchNodeRemoteLoginSettings", + "BatchNodeRemoveContent", + "BatchNodeUserCreateContent", + "BatchNodeUserUpdateContent", + "BatchNodeVMExtension", + "BatchPool", + "BatchPoolCreateContent", + "BatchPoolEnableAutoScaleContent", + "BatchPoolEndpointConfiguration", + "BatchPoolEvaluateAutoScaleContent", + "BatchPoolIdentity", + "BatchPoolInfo", + "BatchPoolNodeCounts", + "BatchPoolReplaceContent", + "BatchPoolResizeContent", + "BatchPoolResourceStatistics", + "BatchPoolSpecification", + "BatchPoolStatistics", + "BatchPoolUpdateContent", + "BatchPoolUsageMetrics", + "BatchPoolUsageStatistics", + "BatchStartTask", + "BatchStartTaskInfo", + "BatchSubtask", + "BatchSupportedImage", + "BatchTask", + "BatchTaskAddCollectionResult", + "BatchTaskAddResult", + "BatchTaskConstraints", + "BatchTaskContainerExecutionInfo", + "BatchTaskContainerSettings", + "BatchTaskCounts", + "BatchTaskCountsResult", + "BatchTaskCreateContent", + "BatchTaskDependencies", + "BatchTaskExecutionInfo", + "BatchTaskFailureInfo", + "BatchTaskGroup", + "BatchTaskIdRange", + "BatchTaskInfo", + "BatchTaskSchedulingPolicy", + "BatchTaskSlotCounts", + "BatchTaskStatistics", + "CifsMountConfiguration", + "ContainerConfiguration", + "ContainerRegistryReference", + "DataDisk", + "DiffDiskSettings", + "DiskEncryptionConfiguration", + "EnvironmentSetting", + "ExitCodeMapping", + "ExitCodeRangeMapping", + "ExitConditions", + "ExitOptions", + "FileProperties", + "HttpHeader", + "ImageReference", + "InboundEndpoint", + "InboundNatPool", + "InstanceViewStatus", + "LinuxUserConfiguration", + "ManagedDisk", + "MetadataItem", + "MountConfiguration", + "MultiInstanceSettings", + "NameValuePair", + "NetworkConfiguration", + "NetworkSecurityGroupRule", + "NfsMountConfiguration", + "OSDisk", + "OutputFile", + "OutputFileBlobContainerDestination", + "OutputFileDestination", + "OutputFileUploadConfig", + "PublicIpAddressConfiguration", + "RecentBatchJob", + "ResizeError", + "ResourceFile", + "RollingUpgradePolicy", + "SecurityProfile", + "ServiceArtifactReference", + "UefiSettings", + "UpgradePolicy", + "UploadBatchServiceLogsContent", + "UploadBatchServiceLogsResult", + "UserAccount", + "UserAssignedIdentity", + "UserIdentity", + "VMExtension", + "VMExtensionInstanceView", + "VirtualMachineConfiguration", + "VirtualMachineInfo", + "WindowsConfiguration", + "WindowsUserConfiguration", + "AccessScope", + "AllocationState", + "AutoUserScope", + "BatchJobAction", + "BatchJobPreparationTaskState", + "BatchJobReleaseTaskState", + "BatchJobScheduleState", + "BatchJobState", + "BatchNodeCommunicationMode", + "BatchNodeDeallocationOption", + "BatchNodeDisableSchedulingOption", + "BatchNodeFillType", + "BatchNodePlacementPolicyType", + "BatchNodeRebootOption", + "BatchNodeState", + "BatchPoolIdentityType", + "BatchPoolLifetimeOption", + "BatchPoolState", + "BatchStartTaskState", + "BatchSubtaskState", + "BatchTaskAddStatus", + "BatchTaskExecutionResult", + "BatchTaskState", + "CachingType", + "ContainerType", + "ContainerWorkingDirectory", + "DependencyAction", + "DiffDiskPlacement", + "DisableBatchJobOption", + "DiskEncryptionTarget", + "DynamicVNetAssignmentScope", + "ElevationLevel", + "ErrorCategory", + "ImageVerificationType", + "InboundEndpointProtocol", + "IpAddressProvisioningType", + "LoginMode", + "NetworkSecurityGroupRuleAccess", + "OSType", + "OnAllBatchTasksComplete", + "OnBatchTaskFailure", + "OutputFileUploadCondition", + "SchedulingState", + "SecurityTypes", + "StatusLevelTypes", + "StorageAccountType", + "UpgradeMode", ] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/models/_batch_service_client_enums.py b/sdk/batch/azure-batch/azure/batch/models/_batch_service_client_enums.py deleted file mode 100644 index fe6ea6e84d02..000000000000 --- a/sdk/batch/azure-batch/azure/batch/models/_batch_service_client_enums.py +++ /dev/null @@ -1,364 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class OSType(str, Enum): - - linux = "linux" #: The Linux operating system. - windows = "windows" #: The Windows operating system. - - -class VerificationType(str, Enum): - - verified = "verified" #: The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all Batch features have been confirmed to work as expected. - unverified = "unverified" #: The associated Compute Node agent SKU should have binary compatibility with the Image, but specific functionality has not been verified. - - -class AccessScope(str, Enum): - - job = "job" #: Grants access to perform all operations on the Job containing the Task. - - -class CertificateState(str, Enum): - - active = "active" #: The Certificate is available for use in Pools. - deleting = "deleting" #: The user has requested that the Certificate be deleted, but the delete operation has not yet completed. You may not reference the Certificate when creating or updating Pools. - delete_failed = "deletefailed" #: The user requested that the Certificate be deleted, but there are Pools that still have references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete operation to cancel the delete, or the delete Certificate operation to retry the delete. - - -class CertificateFormat(str, Enum): - - pfx = "pfx" #: The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain. - cer = "cer" #: The Certificate is a base64-encoded X.509 Certificate. - - -class ContainerWorkingDirectory(str, Enum): - - task_working_directory = "taskWorkingDirectory" #: Use the standard Batch service Task working directory, which will contain the Task Resource Files populated by Batch. - container_image_default = "containerImageDefault" #: Use the working directory defined in the container Image. Beware that this directory will not contain the Resource Files downloaded by Batch. - - -class JobAction(str, Enum): - - none = "none" #: Take no action. - disable = "disable" #: Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value of requeue. - terminate = "terminate" #: Terminate the Job. The terminateReason in the Job's executionInfo is set to "TaskFailed". - - -class DependencyAction(str, Enum): - - satisfy = "satisfy" #: Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be scheduled to run. - block = "block" #: Blocks tasks waiting on this task, preventing them from being scheduled. - - -class AutoUserScope(str, Enum): - - task = "task" #: Specifies that the service should create a new user for the Task. - pool = "pool" #: Specifies that the Task runs as the common auto user Account which is created on every Compute Node in a Pool. - - -class ElevationLevel(str, Enum): - - non_admin = "nonadmin" #: The user is a standard user without elevated access. - admin = "admin" #: The user is a user with elevated access and operates with full Administrator permissions. - - -class LoginMode(str, Enum): - - batch = "batch" #: The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. - interactive = "interactive" #: The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration Pool, the user session will not be elevated unless the application executed by the Task command line is configured to always require administrative privilege or to always require maximum privilege. - - -class OutputFileUploadCondition(str, Enum): - - task_success = "tasksuccess" #: Upload the file(s) only after the Task process exits with an exit code of 0. - task_failure = "taskfailure" #: Upload the file(s) only after the Task process exits with a nonzero exit code. - task_completion = "taskcompletion" #: Upload the file(s) after the Task process exits, no matter what the exit code was. - - -class ComputeNodeFillType(str, Enum): - - spread = "spread" #: Tasks should be assigned evenly across all Compute Nodes in the Pool. - pack = "pack" #: As many Tasks as possible (taskSlotsPerNode) should be assigned to each Compute Node in the Pool before any Tasks are assigned to the next Compute Node in the Pool. - - -class CertificateStoreLocation(str, Enum): - - current_user = "currentuser" #: Certificates should be installed to the CurrentUser Certificate store. - local_machine = "localmachine" #: Certificates should be installed to the LocalMachine Certificate store. - - -class CertificateVisibility(str, Enum): - - start_task = "starttask" #: The Certificate should be visible to the user account under which the StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well. - task = "task" #: The Certificate should be visible to the user accounts under which Job Tasks are run. - remote_user = "remoteuser" #: The Certificate should be visible to the user accounts under which users remotely access the Compute Node. - - -class CachingType(str, Enum): - - none = "none" #: The caching mode for the disk is not enabled. - read_only = "readonly" #: The caching mode for the disk is read only. - read_write = "readwrite" #: The caching mode for the disk is read and write. - - -class StorageAccountType(str, Enum): - - standard_lrs = "standard_lrs" #: The data disk / OS disk should use standard locally redundant storage. - premium_lrs = "premium_lrs" #: The data disk / OS disk should use premium locally redundant storage. - standard_ssdlrs = "standardssd_lrs" #: The data disk / OS disk should use standard SSD locally redundant storage. - - -class ContainerType(str, Enum): - - docker_compatible = "dockerCompatible" #: A Docker compatible container technology will be used to launch the containers. - cri_compatible = "criCompatible" #: A CRI based technology will be used to launch the containers. - - -class DiskEncryptionTarget(str, Enum): - - os_disk = "osdisk" #: The OS Disk on the compute node is encrypted. - temporary_disk = "temporarydisk" #: The temporary disk on the compute node is encrypted. On Linux this encryption applies to other partitions (such as those on mounted data disks) when encryption occurs at boot time. - - -class NodePlacementPolicyType(str, Enum): - - regional = "regional" #: All nodes in the pool will be allocated in the same region. - zonal = "zonal" #: Nodes in the pool will be spread across different availability zones with best effort balancing. - - -class DiffDiskPlacement(str, Enum): - - cache_disk = "CacheDisk" #: The Ephemeral OS Disk is stored on the VM cache. - - -class SecurityTypes(str, Enum): - - trusted_launch = "trustedLaunch" #: Trusted launch protects against advanced and persistent attack techniques. - - -class DynamicVNetAssignmentScope(str, Enum): - - none = "none" #: No dynamic VNet assignment is enabled. - job = "job" #: Dynamic VNet assignment is done per-job. - - -class InboundEndpointProtocol(str, Enum): - - tcp = "tcp" #: Use TCP for the endpoint. - udp = "udp" #: Use UDP for the endpoint. - - -class NetworkSecurityGroupRuleAccess(str, Enum): - - allow = "allow" #: Allow access. - deny = "deny" #: Deny access. - - -class IPAddressProvisioningType(str, Enum): - - batch_managed = "batchmanaged" #: A public IP will be created and managed by Batch. There may be multiple public IPs depending on the size of the Pool. - user_managed = "usermanaged" #: Public IPs are provided by the user and will be used to provision the Compute Nodes. - no_public_ip_addresses = "nopublicipaddresses" #: No public IP Address will be created. - - -class NodeCommunicationMode(str, Enum): - - default = "default" #: The node communication mode is automatically set by the Batch service. - classic = "classic" #: Nodes using the classic communication mode require inbound TCP communication on ports 29876 and 29877 from the "BatchNodeManagement.{region}" service tag and outbound TCP communication on port 443 to the "Storage.region" and "BatchNodeManagement.{region}" service tags. - simplified = "simplified" #: Nodes using the simplified communication mode require outbound TCP communication on port 443 to the "BatchNodeManagement.{region}" service tag. No open inbound ports are required. - - -class UpgradeMode(str, Enum): - - automatic = "automatic" #: All virtual machines in the scale set are automatically updated at the same time. - manual = "manual" #: You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action. - rolling = "rolling" #: The existing instances in a scale set are brought down in batches to be upgraded. Once the upgraded batch is complete, the instances will begin taking traffic again and the next batch will begin. This continues until all instances brought up-to-date. - - -class PoolLifetimeOption(str, Enum): - - job_schedule = "jobschedule" #: The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job Schedules, not to Jobs. - job = "job" #: The Pool exists for the lifetime of the Job to which it is dedicated. The Batch service creates the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the Batch service creates a new auto Pool for every Job created on the schedule. - - -class OnAllTasksComplete(str, Enum): - - no_action = "noaction" #: Do nothing. The Job remains active unless terminated or disabled by some other means. - terminate_job = "terminatejob" #: Terminate the Job. The Job's terminateReason is set to 'AllTasksComplete'. - - -class OnTaskFailure(str, Enum): - - no_action = "noaction" #: Do nothing. The Job remains active unless terminated or disabled by some other means. - perform_exit_options_job_action = "performexitoptionsjobaction" #: Take the action associated with the Task exit condition in the Task's exitConditions collection. (This may still result in no action being taken, if that is what the Task specifies.) - - -class JobScheduleState(str, Enum): - - active = "active" #: The Job Schedule is active and will create Jobs as per its schedule. - completed = "completed" #: The Job Schedule has terminated, either by reaching its end time or by the user terminating it explicitly. - disabled = "disabled" #: The user has disabled the Job Schedule. The scheduler will not initiate any new Jobs will on this schedule, but any existing active Job will continue to run. - terminating = "terminating" #: The Job Schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, nor is any existing Job active. - deleting = "deleting" #: The user has requested that the Job Schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, and will delete any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job Schedule will be deleted when all Jobs and Tasks under the Job Schedule have been deleted. - - -class ErrorCategory(str, Enum): - - user_error = "usererror" #: The error is due to a user issue, such as misconfiguration. - server_error = "servererror" #: The error is due to an internal server issue. - - -class JobState(str, Enum): - - active = "active" #: The Job is available to have Tasks scheduled. - disabling = "disabling" #: A user has requested that the Job be disabled, but the disable operation is still in progress (for example, waiting for Tasks to terminate). - disabled = "disabled" #: A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled. - enabling = "enabling" #: A user has requested that the Job be enabled, but the enable operation is still in progress. - terminating = "terminating" #: The Job is about to complete, either because a Job Manager Task has completed or because the user has terminated the Job, but the terminate operation is still in progress (for example, because Job Release Tasks are running). - completed = "completed" #: All Tasks have terminated, and the system will not accept any more Tasks or any further changes to the Job. - deleting = "deleting" #: A user has requested that the Job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running Tasks). - - -class JobPreparationTaskState(str, Enum): - - running = "running" #: The Task is currently running (including retrying). - completed = "completed" #: The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). - - -class TaskExecutionResult(str, Enum): - - success = "success" #: The Task ran successfully. - failure = "failure" #: There was an error during processing of the Task. The failure may have occurred before the Task process was launched, while the Task process was executing, or after the Task process exited. - - -class JobReleaseTaskState(str, Enum): - - running = "running" #: The Task is currently running (including retrying). - completed = "completed" #: The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). - - -class StatusLevelTypes(str, Enum): - - error = "Error" - info = "Info" - warning = "Warning" - - -class PoolState(str, Enum): - - active = "active" #: The Pool is available to run Tasks subject to the availability of Compute Nodes. - deleting = "deleting" #: The user has requested that the Pool be deleted, but the delete operation has not yet completed. - - -class AllocationState(str, Enum): - - steady = "steady" #: The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in progress. A Pool enters this state when it is created and when no operations are being performed on the Pool to change the number of Compute Nodes. - resizing = "resizing" #: The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool. - stopping = "stopping" #: The Pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. - - -class PoolIdentityType(str, Enum): - - user_assigned = "UserAssigned" #: Batch pool has user assigned identities with it. - none = "None" #: Batch pool has no identity associated with it. Setting `None` in update pool will remove existing identities. - - -class TaskState(str, Enum): - - active = "active" #: The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run. - preparing = "preparing" #: The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. - running = "running" #: The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. - completed = "completed" #: The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. - - -class TaskAddStatus(str, Enum): - - success = "success" #: The Task was added successfully. - client_error = "clienterror" #: The Task failed to add due to a client error and should not be retried without modifying the request as appropriate. - server_error = "servererror" #: Task failed to add due to a server error and can be retried without modification. - - -class SubtaskState(str, Enum): - - preparing = "preparing" #: The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. - running = "running" #: The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. - completed = "completed" #: The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. - - -class StartTaskState(str, Enum): - - running = "running" #: The StartTask is currently running. - completed = "completed" #: The StartTask has exited with exit code 0, or the StartTask has failed and the retry limit has reached, or the StartTask process did not run due to Task preparation errors (such as resource file download failures). - - -class ComputeNodeState(str, Enum): - - idle = "idle" #: The Compute Node is not currently running a Task. - rebooting = "rebooting" #: The Compute Node is rebooting. - reimaging = "reimaging" #: The Compute Node is reimaging. - running = "running" #: The Compute Node is running one or more Tasks (other than a StartTask). - unusable = "unusable" #: The Compute Node cannot be used for Task execution due to errors. - creating = "creating" #: The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the Pool. - starting = "starting" #: The Batch service is starting on the underlying virtual machine. - waiting_for_start_task = "waitingforstarttask" #: The StartTask has started running on the Compute Node, but waitForSuccess is set and the StartTask has not yet completed. - start_task_failed = "starttaskfailed" #: The StartTask has failed on the Compute Node (and exhausted all retries), and waitForSuccess is set. The Compute Node is not usable for running Tasks. - unknown = "unknown" #: The Batch service has lost contact with the Compute Node, and does not know its true state. - leaving_pool = "leavingpool" #: The Compute Node is leaving the Pool, either because the user explicitly removed it or because the Pool is resizing or autoscaling down. - offline = "offline" #: The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute Node is disabled. - preempted = "preempted" #: The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. - upgrading_os = "upgradingos" #: The Compute Node is undergoing an OS upgrade operation. - - -class SchedulingState(str, Enum): - - enabled = "enabled" #: Tasks can be scheduled on the Compute Node. - disabled = "disabled" #: No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node may still run to completion. All Compute Nodes start with scheduling enabled. - - -class DisableJobOption(str, Enum): - - requeue = "requeue" #: Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled. - terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. - wait = "wait" #: Allow currently running Tasks to complete. - - -class ComputeNodeDeallocationOption(str, Enum): - - requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Remove Compute Nodes as soon as Tasks have been terminated. - terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated. - task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove Compute Nodes when all Tasks have completed. - retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention periods have expired. - - -class ComputeNodeRebootOption(str, Enum): - - requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated. - terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks have been terminated. - task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the Compute Node when all Tasks have completed. - retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention periods have expired. - - -class ComputeNodeReimageOption(str, Enum): - - requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been terminated. - terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the Compute Node as soon as Tasks have been terminated. - task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Reimage the Compute Node when all Tasks have completed. - retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all Task retention periods have expired. - - -class DisableComputeNodeSchedulingOption(str, Enum): - - requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state as soon as Tasks have been terminated. - terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as Tasks have been terminated. - task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Enter offline state when all Tasks have completed. diff --git a/sdk/batch/azure-batch/azure/batch/models/_enums.py b/sdk/batch/azure-batch/azure/batch/models/_enums.py new file mode 100644 index 000000000000..15b60ea9ce76 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/models/_enums.py @@ -0,0 +1,597 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AccessScope(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccessScope enums.""" + + JOB = "job" + """Grants access to perform all operations on the Job containing the Task.""" + + +class AllocationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AllocationState enums.""" + + STEADY = "steady" + """The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in + progress. A Pool enters this state when it is created and when no operations are being + performed on the Pool to change the number of Compute Nodes.""" + RESIZING = "resizing" + """The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool.""" + STOPPING = "stopping" + """The Pool was resizing, but the user has requested that the resize be stopped, but the stop + request has not yet been completed.""" + + +class AutoUserScope(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AutoUserScope enums.""" + + TASK = "task" + """Specifies that the service should create a new user for the Task.""" + POOL = "pool" + """Specifies that the Task runs as the common auto user Account which is created on every Compute + Node in a Pool.""" + + +class BatchJobAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchJobAction enums.""" + + NONE = "none" + """Take no action.""" + DISABLE = "disable" + """Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value + of requeue.""" + TERMINATE = "terminate" + """Terminate the Job. The terminationReason in the Job's executionInfo is set to "TaskFailed".""" + + +class BatchJobPreparationTaskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchJobPreparationTaskState enums.""" + + RUNNING = "running" + """The Task is currently running (including retrying).""" + COMPLETED = "completed" + """The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch + service was unable to start the Task due to Task preparation errors (such as resource file + download failures).""" + + +class BatchJobReleaseTaskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchJobReleaseTaskState enums.""" + + RUNNING = "running" + """The Task is currently running (including retrying).""" + COMPLETED = "completed" + """The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch + service was unable to start the Task due to Task preparation errors (such as resource file + download failures).""" + + +class BatchJobScheduleState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchJobScheduleState enums.""" + + ACTIVE = "active" + """The Job Schedule is active and will create Jobs as per its schedule.""" + COMPLETED = "completed" + """The Job Schedule has terminated, either by reaching its end time or by the user terminating it + explicitly.""" + DISABLED = "disabled" + """The user has disabled the Job Schedule. The scheduler will not initiate any new Jobs will on + this schedule, but any existing active Job will continue to run.""" + TERMINATING = "terminating" + """The Job Schedule has no more work to do, or has been explicitly terminated by the user, but the + termination operation is still in progress. The scheduler will not initiate any new Jobs for + this Job Schedule, nor is any existing Job active.""" + DELETING = "deleting" + """The user has requested that the Job Schedule be deleted, but the delete operation is still in + progress. The scheduler will not initiate any new Jobs for this Job Schedule, and will delete + any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job Schedule + will be deleted when all Jobs and Tasks under the Job Schedule have been deleted.""" + + +class BatchJobState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchJobState enums.""" + + ACTIVE = "active" + """The Job is available to have Tasks scheduled.""" + DISABLING = "disabling" + """A user has requested that the Job be disabled, but the disable operation is still in progress + (for example, waiting for Tasks to terminate).""" + DISABLED = "disabled" + """A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled.""" + ENABLING = "enabling" + """A user has requested that the Job be enabled, but the enable operation is still in progress.""" + TERMINATING = "terminating" + """The Job is about to complete, either because a Job Manager Task has completed or because the + user has terminated the Job, but the terminate operation is still in progress (for example, + because Job Release Tasks are running).""" + COMPLETED = "completed" + """All Tasks have terminated, and the system will not accept any more Tasks or any further changes + to the Job.""" + DELETING = "deleting" + """A user has requested that the Job be deleted, but the delete operation is still in progress + (for example, because the system is still terminating running Tasks).""" + + +class BatchNodeCommunicationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeCommunicationMode enums.""" + + DEFAULT = "default" + """The node communication mode is automatically set by the Batch service.""" + CLASSIC = "classic" + """Nodes using the classic communication mode require inbound TCP communication on ports 29876 and + 29877 from the "BatchNodeManagement.{region}" service tag and outbound TCP communication on + port 443 to the "Storage.region" and "BatchNodeManagement.{region}" service tags.""" + SIMPLIFIED = "simplified" + """Nodes using the simplified communication mode require outbound TCP communication on port 443 to + the "BatchNodeManagement.{region}" service tag. No open inbound ports are required.""" + + +class BatchNodeDeallocationOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeDeallocationOption enums.""" + + REQUEUE = "requeue" + """Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute + Node is available. Remove Compute Nodes as soon as Tasks have been terminated.""" + TERMINATE = "terminate" + """Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were + terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated.""" + TASK_COMPLETION = "taskcompletion" + """Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove Compute + Nodes when all Tasks have completed.""" + RETAINED_DATA = "retaineddata" + """Allow currently running Tasks to complete, then wait for all Task data retention periods to + expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention + periods have expired.""" + + +class BatchNodeDisableSchedulingOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeDisableSchedulingOption enums.""" + + REQUEUE = "requeue" + """Terminate running Task processes and requeue the Tasks. The Tasks may run again on other + Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state + as soon as Tasks have been terminated.""" + TERMINATE = "terminate" + """Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were + terminated, and will not run again. Enter offline state as soon as Tasks have been terminated.""" + TASK_COMPLETION = "taskcompletion" + """Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Enter offline + state when all Tasks have completed.""" + + +class BatchNodeFillType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeFillType enums.""" + + SPREAD = "spread" + """Tasks should be assigned evenly across all Compute Nodes in the Pool.""" + PACK = "pack" + """As many Tasks as possible (taskSlotsPerNode) should be assigned to each Compute Node in the + Pool before any Tasks are assigned to the next Compute Node in the Pool.""" + + +class BatchNodePlacementPolicyType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodePlacementPolicyType enums.""" + + REGIONAL = "regional" + """All nodes in the pool will be allocated in the same region.""" + ZONAL = "zonal" + """Nodes in the pool will be spread across different availability zones with best effort + balancing.""" + + +class BatchNodeRebootOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeRebootOption enums.""" + + REQUEUE = "requeue" + """Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute + Node is available. Restart the Compute Node as soon as Tasks have been terminated.""" + TERMINATE = "terminate" + """Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were + terminated, and will not run again. Restart the Compute Node as soon as Tasks have been + terminated.""" + TASK_COMPLETION = "taskcompletion" + """Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the + Compute Node when all Tasks have completed.""" + RETAINED_DATA = "retaineddata" + """Allow currently running Tasks to complete, then wait for all Task data retention periods to + expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention + periods have expired.""" + + +class BatchNodeState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeState enums.""" + + IDLE = "idle" + """The Compute Node is not currently running a Task.""" + REBOOTING = "rebooting" + """The Compute Node is rebooting.""" + REIMAGING = "reimaging" + """The Compute Node is reimaging.""" + RUNNING = "running" + """The Compute Node is running one or more Tasks (other than a StartTask).""" + UNUSABLE = "unusable" + """The Compute Node cannot be used for Task execution due to errors.""" + CREATING = "creating" + """The Batch service has obtained the underlying virtual machine from Azure Compute, but it has + not yet started to join the Pool.""" + STARTING = "starting" + """The Batch service is starting on the underlying virtual machine.""" + WAITING_FOR_START_TASK = "waitingforstarttask" + """The StartTask has started running on the Compute Node, but waitForSuccess is set and the + StartTask has not yet completed.""" + START_TASK_FAILED = "starttaskfailed" + """The StartTask has failed on the Compute Node (and exhausted all retries), and waitForSuccess is + set. The Compute Node is not usable for running Tasks.""" + UNKNOWN = "unknown" + """The Batch service has lost contact with the Compute Node, and does not know its true state.""" + LEAVING_POOL = "leavingpool" + """The Compute Node is leaving the Pool, either because the user explicitly removed it or because + the Pool is resizing or autoscaling down.""" + OFFLINE = "offline" + """The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute + Node is disabled.""" + PREEMPTED = "preempted" + """The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute + Node when it was preempted will be rescheduled when another Compute Node becomes available.""" + UPGRADING_O_S = "upgradingos" + """The Compute Node is undergoing an OS upgrade operation.""" + + +class BatchPoolIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchPoolIdentityType enums.""" + + USER_ASSIGNED = "UserAssigned" + """Batch pool has user assigned identities with it.""" + NONE = "None" + """Batch pool has no identity associated with it. Setting ``None`` in update pool will remove + existing identities.""" + + +class BatchPoolLifetimeOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchPoolLifetimeOption enums.""" + + JOB_SCHEDULE = "jobschedule" + """The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when + it creates the first Job on the schedule. You may apply this option only to Job Schedules, not + to Jobs.""" + JOB = "job" + """The Pool exists for the lifetime of the Job to which it is dedicated. The Batch service creates + the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the Batch + service creates a new auto Pool for every Job created on the schedule.""" + + +class BatchPoolState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchPoolState enums.""" + + ACTIVE = "active" + """The Pool is available to run Tasks subject to the availability of Compute Nodes.""" + DELETING = "deleting" + """The user has requested that the Pool be deleted, but the delete operation has not yet + completed.""" + + +class BatchStartTaskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchStartTaskState enums.""" + + RUNNING = "running" + """The StartTask is currently running.""" + COMPLETED = "completed" + """The StartTask has exited with exit code 0, or the StartTask has failed and the retry limit has + reached, or the StartTask process did not run due to Task preparation errors (such as resource + file download failures).""" + + +class BatchSubtaskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchSubtaskState enums.""" + + PREPARING = "preparing" + """The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation + Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move + to running. If the Job Preparation Task fails, the Task will return to active and will be + eligible to be assigned to a different Compute Node.""" + RUNNING = "running" + """The Task is running on a Compute Node. This includes task-level preparation such as downloading + resource files or deploying Packages specified on the Task - it does not necessarily mean that + the Task command line has started executing.""" + COMPLETED = "completed" + """The Task is no longer eligible to run, usually because the Task has finished successfully, or + the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked + as completed if an error occurred launching the Task, or when the Task has been terminated.""" + + +class BatchTaskAddStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchTaskAddStatus enums.""" + + SUCCESS = "success" + """The Task was added successfully.""" + CLIENT_ERROR = "clienterror" + """The Task failed to add due to a client error and should not be retried without modifying the + request as appropriate.""" + SERVER_ERROR = "servererror" + """Task failed to add due to a server error and can be retried without modification.""" + + +class BatchTaskExecutionResult(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchTaskExecutionResult enums.""" + + SUCCESS = "success" + """The Task ran successfully.""" + FAILURE = "failure" + """There was an error during processing of the Task. The failure may have occurred before the Task + process was launched, while the Task process was executing, or after the Task process exited.""" + + +class BatchTaskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchTaskState enums.""" + + ACTIVE = "active" + """The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task + enters this state when it is created, when it is enabled after being disabled, or when it is + awaiting a retry after a failed run.""" + PREPARING = "preparing" + """The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation + Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move + to running. If the Job Preparation Task fails, the Task will return to active and will be + eligible to be assigned to a different Compute Node.""" + RUNNING = "running" + """The Task is running on a Compute Node. This includes task-level preparation such as downloading + resource files or deploying Packages specified on the Task - it does not necessarily mean that + the Task command line has started executing.""" + COMPLETED = "completed" + """The Task is no longer eligible to run, usually because the Task has finished successfully, or + the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked + as completed if an error occurred launching the Task, or when the Task has been terminated.""" + + +class CachingType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """CachingType enums.""" + + NONE = "none" + """The caching mode for the disk is not enabled.""" + READ_ONLY = "readonly" + """The caching mode for the disk is read only.""" + READ_WRITE = "readwrite" + """The caching mode for the disk is read and write.""" + + +class ContainerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ContainerType enums.""" + + DOCKER_COMPATIBLE = "dockerCompatible" + """A Docker compatible container technology will be used to launch the containers.""" + CRI_COMPATIBLE = "criCompatible" + """A CRI based technology will be used to launch the containers.""" + + +class ContainerWorkingDirectory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ContainerWorkingDirectory enums.""" + + TASK_WORKING_DIRECTORY = "taskWorkingDirectory" + """Use the standard Batch service Task working directory, which will contain the Task Resource + Files populated by Batch.""" + CONTAINER_IMAGE_DEFAULT = "containerImageDefault" + """Use the working directory defined in the container Image. Beware that this directory will not + contain the Resource Files downloaded by Batch.""" + + +class DependencyAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """DependencyAction enums.""" + + SATISFY = "satisfy" + """Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be + scheduled to run.""" + BLOCK = "block" + """Blocks tasks waiting on this task, preventing them from being scheduled.""" + + +class DiffDiskPlacement(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccessDiffDiskPlacementScope enums.""" + + CACHE_DISK = "cachedisk" + """The Ephemeral OS Disk is stored on the VM cache.""" + + +class DisableBatchJobOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """DisableBatchJobOption enums.""" + + REQUEUE = "requeue" + """Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled.""" + TERMINATE = "terminate" + """Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were + terminated, and will not run again.""" + WAIT = "wait" + """Allow currently running Tasks to complete.""" + + +class DiskEncryptionTarget(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """DiskEncryptionTarget enums.""" + + OS_DISK = "osdisk" + """The OS Disk on the compute node is encrypted.""" + TEMPORARY_DISK = "temporarydisk" + """The temporary disk on the compute node is encrypted. On Linux this encryption applies to other + partitions (such as those on mounted data disks) when encryption occurs at boot time.""" + + +class DynamicVNetAssignmentScope(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """DynamicVNetAssignmentScope enums.""" + + NONE = "none" + """No dynamic VNet assignment is enabled.""" + JOB = "job" + """Dynamic VNet assignment is done per-job.""" + + +class ElevationLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ElevationLevel enums.""" + + NON_ADMIN = "nonadmin" + """The user is a standard user without elevated access.""" + ADMIN = "admin" + """The user is a user with elevated access and operates with full Administrator permissions.""" + + +class ErrorCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ErrorCategory enums.""" + + USER_ERROR = "usererror" + """The error is due to a user issue, such as misconfiguration.""" + SERVER_ERROR = "servererror" + """The error is due to an internal server issue.""" + + +class ImageVerificationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ImageVerificationType enums.""" + + VERIFIED = "verified" + """The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all + Batch features have been confirmed to work as expected.""" + UNVERIFIED = "unverified" + """The associated Compute Node agent SKU should have binary compatibility with the Image, but + specific functionality has not been verified.""" + + +class InboundEndpointProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """InboundEndpointProtocol enums.""" + + TCP = "tcp" + """Use TCP for the endpoint.""" + UDP = "udp" + """Use UDP for the endpoint.""" + + +class IpAddressProvisioningType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """IPAddressProvisioningType enums.""" + + BATCH_MANAGED = "batchmanaged" + """A public IP will be created and managed by Batch. There may be multiple public IPs depending on + the size of the Pool.""" + USER_MANAGED = "usermanaged" + """Public IPs are provided by the user and will be used to provision the Compute Nodes.""" + NO_PUBLIC_IP_ADDRESSES = "nopublicipaddresses" + """No public IP Address will be created.""" + + +class LoginMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """LoginMode enums.""" + + BATCH = "batch" + """The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running + parallel processes.""" + INTERACTIVE = "interactive" + """The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows + VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a + Windows VirtualMachineConfiguration Pool, the user session will not be elevated unless the + application executed by the Task command line is configured to always require administrative + privilege or to always require maximum privilege.""" + + +class NetworkSecurityGroupRuleAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """NetworkSecurityGroupRuleAccess enums.""" + + ALLOW = "allow" + """Allow access.""" + DENY = "deny" + """Deny access.""" + + +class OnAllBatchTasksComplete(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The action the Batch service should take when all Tasks in the Job are in the completed state.""" + + NO_ACTION = "noaction" + """Do nothing. The Job remains active unless terminated or disabled by some other means.""" + TERMINATE_JOB = "terminatejob" + """Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'.""" + + +class OnBatchTaskFailure(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """OnTaskFailure enums.""" + + NO_ACTION = "noaction" + """Do nothing. The Job remains active unless terminated or disabled by some other means.""" + PERFORM_EXIT_OPTIONS_JOB_ACTION = "performexitoptionsjobaction" + """Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'.""" + + +class OSType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """OSType enums.""" + + LINUX = "linux" + """The Linux operating system.""" + WINDOWS = "windows" + """The Windows operating system.""" + + +class OutputFileUploadCondition(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """OutputFileUploadCondition enums.""" + + TASK_SUCCESS = "tasksuccess" + """Upload the file(s) only after the Task process exits with an exit code of 0.""" + TASK_FAILURE = "taskfailure" + """Upload the file(s) only after the Task process exits with a nonzero exit code.""" + TASK_COMPLETION = "taskcompletion" + """Upload the file(s) after the Task process exits, no matter what the exit code was.""" + + +class SchedulingState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """SchedulingState enums.""" + + ENABLED = "enabled" + """Tasks can be scheduled on the Compute Node.""" + DISABLED = "disabled" + """No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node + may still run to completion. All Compute Nodes start with scheduling enabled.""" + + +class SecurityTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the SecurityType of the virtual machine. It has to be set to any specified value to + enable UefiSettings. + """ + + TRUSTED_LAUNCH = "trustedLaunch" + """Trusted launch protects against advanced and persistent attack techniques.""" + + +class StatusLevelTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Level code.""" + + ERROR = "Error" + """Error""" + INFO = "Info" + """Info""" + WARNING = "Warning" + """Warning""" + + +class StorageAccountType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """StorageAccountType enums.""" + + STANDARD_L_R_S = "standard_lrs" + """The data disk should use standard locally redundant storage.""" + PREMIUM_L_R_S = "premium_lrs" + """The data disk should use premium locally redundant storage.""" + STANDARD_S_S_D_L_R_S = "standardssd_lrs" + """The data disk / OS disk should use standard SSD locally redundant storage.""" + + +class UpgradeMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """UpgradeMode enums.""" + + AUTOMATIC = "automatic" + """TAll virtual machines in the scale set are automatically updated at the same time.""" + MANUAL = "manual" + """You control the application of updates to virtual machines in the scale set. You do this by + using the manualUpgrade action.""" + ROLLING = "rolling" + """The existing instances in a scale set are brought down in batches to be upgraded. Once the + upgraded batch is complete, the instances will begin taking traffic again and the next batch + will begin. This continues until all instances brought up-to-date.""" diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index 71e18dcd959d..2aa28c73f2e5 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -1,11507 +1,10027 @@ # coding=utf-8 +# pylint: disable=too-many-lines # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from msrest.serialization import Model -from msrest.exceptions import HttpOperationError - - -class AccountListPoolNodeCountsOptions(Model): - """Additional parameters for list_pool_node_counts operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. - :type filter: str - :param max_results: The maximum number of items to return in the response. - Default value: 10 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.max_results = kwargs.get('max_results', 10) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class AccountListSupportedImagesOptions(Model): - """Additional parameters for list_supported_images operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 results will be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(AccountListSupportedImagesOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class AffinityInformation(Model): - """A locality hint that can be used by the Batch service to select a Compute - Node on which to start a Task. - - All required parameters must be populated in order to send to Azure. - - :param affinity_id: Required. You can pass the affinityId of a Node to - indicate that this Task needs to run on that Compute Node. Note that this - is just a soft affinity. If the target Compute Node is busy or unavailable - at the time the Task is scheduled, then the Task will be scheduled - elsewhere. - :type affinity_id: str - """ - - _validation = { - 'affinity_id': {'required': True}, - } - - _attribute_map = { - 'affinity_id': {'key': 'affinityId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AffinityInformation, self).__init__(**kwargs) - self.affinity_id = kwargs.get('affinity_id', None) - - -class ApplicationGetOptions(Model): - """Additional parameters for get operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ApplicationGetOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ApplicationListOptions(Model): - """Additional parameters for list operation. - - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 applications can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ApplicationListOptions, self).__init__(**kwargs) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ApplicationPackageReference(Model): - """A reference to an Package to be deployed to Compute Nodes. +import datetime +from typing import Any, Dict, List, Mapping, Optional, TYPE_CHECKING, Union, overload - All required parameters must be populated in order to send to Azure. +from .. import _model_base +from .._model_base import rest_field - :param application_id: Required. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - :type application_id: str - :param version: If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - :type version: str - """ +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models - _validation = { - 'application_id': {'required': True}, - } - _attribute_map = { - 'application_id': {'key': 'applicationId', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - } +class AffinityInfo(_model_base.Model): + """A locality hint that can be used by the Batch service to select a Compute Node + on which to start a Task. - def __init__(self, **kwargs): - super(ApplicationPackageReference, self).__init__(**kwargs) - self.application_id = kwargs.get('application_id', None) - self.version = kwargs.get('version', None) + :ivar affinity_id: An opaque string representing the location of a Compute Node or a Task that + has run previously. You can pass the affinityId of a Node to indicate that this Task needs to + run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is + busy or unavailable at the time the Task is scheduled, then the Task will be scheduled + elsewhere. Required. + :vartype affinity_id: str + """ -class ApplicationSummary(Model): - """Contains information about an application in an Azure Batch Account. + affinity_id: str = rest_field(name="affinityId") + """An opaque string representing the location of a Compute Node or a Task that has run previously. + You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute + Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable + at the time the Task is scheduled, then the Task will be scheduled elsewhere. Required.""" - All required parameters must be populated in order to send to Azure. + @overload + def __init__( + self, + *, + affinity_id: str, + ): ... - :param id: Required. - :type id: str - :param display_name: Required. - :type display_name: str - :param versions: Required. - :type versions: list[str] - """ + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - _validation = { - 'id': {'required': True}, - 'display_name': {'required': True}, - 'versions': {'required': True}, - } + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'versions': {'key': 'versions', 'type': '[str]'}, - } - def __init__(self, **kwargs): - super(ApplicationSummary, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.versions = kwargs.get('versions', None) +class AuthenticationTokenSettings(_model_base.Model): + """The settings for an authentication token that the Task can use to perform Batch + service operations. + :ivar access: The Batch resources to which the token grants access. The authentication token + grants access to a limited set of Batch service operations. Currently the only supported value + for the access property is 'job', which grants access to all operations related to the Job + which contains the Task. + :vartype access: list[str or ~azure.batch.models.AccessScope] + """ -class AuthenticationTokenSettings(Model): - """The settings for an authentication token that the Task can use to perform - Batch service operations. + access: Optional[List[Union[str, "_models.AccessScope"]]] = rest_field() + """The Batch resources to which the token grants access. The authentication token grants access to + a limited set of Batch service operations. Currently the only supported value for the access + property is 'job', which grants access to all operations related to the Job which contains the + Task.""" - :param access: The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related to - the Job which contains the Task. - :type access: list[str or ~azure.batch.models.AccessScope] - """ + @overload + def __init__( + self, + *, + access: Optional[List[Union[str, "_models.AccessScope"]]] = None, + ): ... - _attribute_map = { - 'access': {'key': 'access', 'type': '[AccessScope]'}, - } + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - def __init__(self, **kwargs): - super(AuthenticationTokenSettings, self).__init__(**kwargs) - self.access = kwargs.get('access', None) + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class AutomaticOSUpgradePolicy(Model): +class AutomaticOsUpgradePolicy(_model_base.Model): """The configuration parameters used for performing automatic OS upgrade. - :param disable_automatic_rollback: Whether OS image rollback feature - should be disabled. - :type disable_automatic_rollback: bool - :param enable_automatic_os_upgrade: Indicates whether OS upgrades should - automatically be applied to scale set instances in a rolling fashion when - a newer version of the OS image becomes available.

If this is - set to true for Windows based pools, - [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) + :ivar disable_automatic_rollback: Whether OS image rollback feature should be disabled. + :vartype disable_automatic_rollback: bool + :ivar enable_automatic_os_upgrade: Indicates whether OS upgrades should automatically be + applied to scale set instances in a rolling fashion when a newer version of the OS image + becomes available. :code:`
`:code:`
` If this is set to true for Windows based pools, + `WindowsConfiguration.enableAutomaticUpdates + `_ cannot be set to true. - :type enable_automatic_os_upgrade: bool - :param use_rolling_upgrade_policy: Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS Upgrade will - fallback to the default policy if no policy is defined on the VMSS. - :type use_rolling_upgrade_policy: bool - :param os_rolling_upgrade_deferral: Defer OS upgrades on the TVMs if they - are running tasks. - :type os_rolling_upgrade_deferral: bool - """ - - _attribute_map = { - 'disable_automatic_rollback': {'key': 'disableAutomaticRollback', 'type': 'bool'}, - 'enable_automatic_os_upgrade': {'key': 'enableAutomaticOSUpgrade', 'type': 'bool'}, - 'use_rolling_upgrade_policy': {'key': 'useRollingUpgradePolicy', 'type': 'bool'}, - 'os_rolling_upgrade_deferral': {'key': 'osRollingUpgradeDeferral', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(AutomaticOSUpgradePolicy, self).__init__(**kwargs) - self.disable_automatic_rollback = kwargs.get('disable_automatic_rollback', None) - self.enable_automatic_os_upgrade = kwargs.get('enable_automatic_os_upgrade', None) - self.use_rolling_upgrade_policy = kwargs.get('use_rolling_upgrade_policy', None) - self.os_rolling_upgrade_deferral = kwargs.get('os_rolling_upgrade_deferral', None) - - -class AutoPoolSpecification(Model): - """Specifies characteristics for a temporary 'auto pool'. The Batch service - will create this auto Pool when the Job is submitted. - - All required parameters must be populated in order to send to Azure. - - :param auto_pool_id_prefix: The Batch service assigns each auto Pool a - unique identifier on creation. To distinguish between Pools created for - different purposes, you can specify this element to add a prefix to the ID - that is assigned. The prefix can be up to 20 characters long. - :type auto_pool_id_prefix: str - :param pool_lifetime_option: Required. Possible values include: - 'jobSchedule', 'job' - :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption - :param keep_alive: Whether to keep an auto Pool alive after its lifetime - expires. If false, the Batch service deletes the Pool once its lifetime - (as determined by the poolLifetimeOption setting) expires; that is, when - the Job or Job Schedule completes. If true, the Batch service does not - delete the Pool automatically. It is up to the user to delete auto Pools - created with this option. - :type keep_alive: bool - :param pool: The Pool specification for the auto Pool. - :type pool: ~azure.batch.models.PoolSpecification - """ - - _validation = { - 'pool_lifetime_option': {'required': True}, - } - - _attribute_map = { - 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, - 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, - 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, - 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, - } - - def __init__(self, **kwargs): - super(AutoPoolSpecification, self).__init__(**kwargs) - self.auto_pool_id_prefix = kwargs.get('auto_pool_id_prefix', None) - self.pool_lifetime_option = kwargs.get('pool_lifetime_option', None) - self.keep_alive = kwargs.get('keep_alive', None) - self.pool = kwargs.get('pool', None) - - -class AutoScaleRun(Model): + :vartype enable_automatic_os_upgrade: bool + :ivar use_rolling_upgrade_policy: Indicates whether rolling upgrade policy should be used + during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is + defined on the VMSS. + :vartype use_rolling_upgrade_policy: bool + :ivar os_rolling_upgrade_deferral: Defer OS upgrades on the TVMs if they are running tasks. + :vartype os_rolling_upgrade_deferral: bool + """ + + disable_automatic_rollback: Optional[bool] = rest_field(name="disableAutomaticRollback") + """Whether OS image rollback feature should be disabled.""" + enable_automatic_os_upgrade: Optional[bool] = rest_field(name="enableAutomaticOSUpgrade") + """Indicates whether OS upgrades should automatically be applied to scale set instances in a + rolling fashion when a newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based pools, + `WindowsConfiguration.enableAutomaticUpdates + `_ + cannot be set to true.""" + use_rolling_upgrade_policy: Optional[bool] = rest_field(name="useRollingUpgradePolicy") + """Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade + will fallback to the default policy if no policy is defined on the VMSS.""" + os_rolling_upgrade_deferral: Optional[bool] = rest_field(name="osRollingUpgradeDeferral") + """Defer OS upgrades on the TVMs if they are running tasks.""" + + @overload + def __init__( + self, + *, + disable_automatic_rollback: Optional[bool] = None, + enable_automatic_os_upgrade: Optional[bool] = None, + use_rolling_upgrade_policy: Optional[bool] = None, + os_rolling_upgrade_deferral: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AutoScaleRun(_model_base.Model): """The results and errors from an execution of a Pool autoscale formula. - All required parameters must be populated in order to send to Azure. - :param timestamp: Required. - :type timestamp: datetime - :param results: Each variable value is returned in the form - $variable=value, and variables are separated by semicolons. - :type results: str - :param error: Details of the error encountered evaluating the autoscale - formula on the Pool, if the evaluation was unsuccessful. - :type error: ~azure.batch.models.AutoScaleRunError - """ + :ivar timestamp: The time at which the autoscale formula was last evaluated. Required. + :vartype timestamp: ~datetime.datetime + :ivar results: The final values of all variables used in the evaluation of the autoscale + formula. Each variable value is returned in the form $variable=value, and variables are + separated by semicolons. + :vartype results: str + :ivar error: Details of the error encountered evaluating the autoscale formula on the Pool, if + the evaluation was unsuccessful. + :vartype error: ~azure.batch.models.AutoScaleRunError + """ + + timestamp: datetime.datetime = rest_field(format="rfc3339") + """The time at which the autoscale formula was last evaluated. Required.""" + results: Optional[str] = rest_field() + """The final values of all variables used in the evaluation of the autoscale formula. Each + variable value is returned in the form $variable=value, and variables are separated by + semicolons.""" + error: Optional["_models.AutoScaleRunError"] = rest_field() + """Details of the error encountered evaluating the autoscale formula on the Pool, if the + evaluation was unsuccessful.""" + + @overload + def __init__( + self, + *, + timestamp: datetime.datetime, + results: Optional[str] = None, + error: Optional["_models.AutoScaleRunError"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AutoScaleRunError(_model_base.Model): + """An error that occurred when executing or evaluating a Pool autoscale formula. + + :ivar code: An identifier for the autoscale error. Codes are invariant and are intended to be + consumed programmatically. + :vartype code: str + :ivar message: A message describing the autoscale error, intended to be suitable for display in + a user interface. + :vartype message: str + :ivar values_property: A list of additional error details related to the autoscale error. + :vartype values_property: list[~azure.batch.models.NameValuePair] + """ + + code: Optional[str] = rest_field() + """An identifier for the autoscale error. Codes are invariant and are intended to be consumed + programmatically.""" + message: Optional[str] = rest_field() + """A message describing the autoscale error, intended to be suitable for display in a user + interface.""" + values_property: Optional[List["_models.NameValuePair"]] = rest_field(name="values") + """A list of additional error details related to the autoscale error.""" + + @overload + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + values_property: Optional[List["_models.NameValuePair"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AutoUserSpecification(_model_base.Model): + """Specifies the options for the auto user that runs an Azure Batch Task. + + :ivar scope: The scope for the auto user. The default value is pool. If the pool is running + Windows, a value of Task should be specified if stricter isolation between tasks is required, + such as if the task mutates the registry in a way which could impact other tasks. Known values + are: "task" and "pool". + :vartype scope: str or ~azure.batch.models.AutoUserScope + :ivar elevation_level: The elevation level of the auto user. The default value is nonAdmin. + Known values are: "nonadmin" and "admin". + :vartype elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + scope: Optional[Union[str, "_models.AutoUserScope"]] = rest_field() + """The scope for the auto user. The default value is pool. If the pool is running Windows, a value + of Task should be specified if stricter isolation between tasks is required, such as if the + task mutates the registry in a way which could impact other tasks. Known values are: \"task\" + and \"pool\".""" + elevation_level: Optional[Union[str, "_models.ElevationLevel"]] = rest_field(name="elevationLevel") + """The elevation level of the auto user. The default value is nonAdmin. Known values are: + \"nonadmin\" and \"admin\".""" + + @overload + def __init__( + self, + *, + scope: Optional[Union[str, "_models.AutoUserScope"]] = None, + elevation_level: Optional[Union[str, "_models.ElevationLevel"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AzureBlobFileSystemConfiguration(_model_base.Model): + """Information used to connect to an Azure Storage Container using Blobfuse. - _validation = { - 'timestamp': {'required': True}, - } - _attribute_map = { - 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, - 'results': {'key': 'results', 'type': 'str'}, - 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, - } + :ivar account_name: The Azure Storage Account name. Required. + :vartype account_name: str + :ivar container_name: The Azure Blob Storage Container name. Required. + :vartype container_name: str + :ivar account_key: The Azure Storage Account key. This property is mutually exclusive with both + sasKey and identity; exactly one must be specified. + :vartype account_key: str + :ivar sas_key: The Azure Storage SAS token. This property is mutually exclusive with both + accountKey and identity; exactly one must be specified. + :vartype sas_key: str + :ivar blobfuse_options: Additional command line options to pass to the mount command. These are + 'net use' options in Windows and 'mount' options in Linux. + :vartype blobfuse_options: str + :ivar relative_mount_path: The relative path on the compute node where the file system will be + mounted. All file systems are mounted relative to the Batch mounts directory, accessible via + the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. + :vartype relative_mount_path: str + :ivar identity_reference: The reference to the user assigned identity to use to access + containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one + must be specified. + :vartype identity_reference: ~azure.batch.models.BatchNodeIdentityReference + """ + + account_name: str = rest_field(name="accountName") + """The Azure Storage Account name. Required.""" + container_name: str = rest_field(name="containerName") + """The Azure Blob Storage Container name. Required.""" + account_key: Optional[str] = rest_field(name="accountKey") + """The Azure Storage Account key. This property is mutually exclusive with both sasKey and + identity; exactly one must be specified.""" + sas_key: Optional[str] = rest_field(name="sasKey") + """The Azure Storage SAS token. This property is mutually exclusive with both accountKey and + identity; exactly one must be specified.""" + blobfuse_options: Optional[str] = rest_field(name="blobfuseOptions") + """Additional command line options to pass to the mount command. These are 'net use' options in + Windows and 'mount' options in Linux.""" + relative_mount_path: str = rest_field(name="relativeMountPath") + """The relative path on the compute node where the file system will be mounted. All file systems + are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR + environment variable. Required.""" + identity_reference: Optional["_models.BatchNodeIdentityReference"] = rest_field(name="identityReference") + """The reference to the user assigned identity to use to access containerName. This property is + mutually exclusive with both accountKey and sasKey; exactly one must be specified.""" + + @overload + def __init__( + self, + *, + account_name: str, + container_name: str, + relative_mount_path: str, + account_key: Optional[str] = None, + sas_key: Optional[str] = None, + blobfuse_options: Optional[str] = None, + identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AzureFileShareConfiguration(_model_base.Model): + """Information used to connect to an Azure Fileshare. - def __init__(self, **kwargs): - super(AutoScaleRun, self).__init__(**kwargs) - self.timestamp = kwargs.get('timestamp', None) - self.results = kwargs.get('results', None) - self.error = kwargs.get('error', None) + :ivar account_name: The Azure Storage account name. Required. + :vartype account_name: str + :ivar azure_file_url: The Azure Files URL. This is of the form + 'https://{account}.file.core.windows.net/'. Required. + :vartype azure_file_url: str + :ivar account_key: The Azure Storage account key. Required. + :vartype account_key: str + :ivar relative_mount_path: The relative path on the compute node where the file system will be + mounted. All file systems are mounted relative to the Batch mounts directory, accessible via + the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. + :vartype relative_mount_path: str + :ivar mount_options: Additional command line options to pass to the mount command. These are + 'net use' options in Windows and 'mount' options in Linux. + :vartype mount_options: str + """ + + account_name: str = rest_field(name="accountName") + """The Azure Storage account name. Required.""" + azure_file_url: str = rest_field(name="azureFileUrl") + """The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. Required.""" + account_key: str = rest_field(name="accountKey") + """The Azure Storage account key. Required.""" + relative_mount_path: str = rest_field(name="relativeMountPath") + """The relative path on the compute node where the file system will be mounted. All file systems + are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR + environment variable. Required.""" + mount_options: Optional[str] = rest_field(name="mountOptions") + """Additional command line options to pass to the mount command. These are 'net use' options in + Windows and 'mount' options in Linux.""" + + @overload + def __init__( + self, + *, + account_name: str, + azure_file_url: str, + account_key: str, + relative_mount_path: str, + mount_options: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchApplication(_model_base.Model): + """Contains information about an application in an Azure Batch Account. -class AutoScaleRunError(Model): - """An error that occurred when executing or evaluating a Pool autoscale - formula. - :param code: - :type code: str - :param message: - :type message: str - :param values: - :type values: list[~azure.batch.models.NameValuePair] + :ivar id: A string that uniquely identifies the application within the Account. Required. + :vartype id: str + :ivar display_name: The display name for the application. Required. + :vartype display_name: str + :ivar versions: The list of available versions of the application. Required. + :vartype versions: list[str] """ - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[NameValuePair]'}, - } - - def __init__(self, **kwargs): - super(AutoScaleRunError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.values = kwargs.get('values', None) + id: str = rest_field() + """A string that uniquely identifies the application within the Account. Required.""" + display_name: str = rest_field(name="displayName") + """The display name for the application. Required.""" + versions: List[str] = rest_field() + """The list of available versions of the application. Required.""" + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + display_name: str, + versions: List[str], + ): ... -class AutoUserSpecification(Model): - """Specifies the parameters for the auto user that runs a Task on the Batch - service. - - :param scope: The default value is pool. If the pool is running Windows a - value of Task should be specified if stricter isolation between tasks is - required. For example, if the task mutates the registry in a way which - could impact other tasks, or if certificates have been specified on the - pool which should not be accessible by normal tasks but should be - accessible by StartTasks. Possible values include: 'task', 'pool' - :type scope: str or ~azure.batch.models.AutoUserScope - :param elevation_level: The elevation level of the auto user. The default - value is nonAdmin. Possible values include: 'nonAdmin', 'admin' - :type elevation_level: str or ~azure.batch.models.ElevationLevel - """ + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - _attribute_map = { - 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, - 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, - } + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - def __init__(self, **kwargs): - super(AutoUserSpecification, self).__init__(**kwargs) - self.scope = kwargs.get('scope', None) - self.elevation_level = kwargs.get('elevation_level', None) +class BatchApplicationPackageReference(_model_base.Model): + """A reference to an Package to be deployed to Compute Nodes. -class AzureBlobFileSystemConfiguration(Model): - """Information used to connect to an Azure Storage Container using Blobfuse. - - All required parameters must be populated in order to send to Azure. - - :param account_name: Required. - :type account_name: str - :param container_name: Required. - :type container_name: str - :param account_key: This property is mutually exclusive with both sasKey - and identity; exactly one must be specified. - :type account_key: str - :param sas_key: This property is mutually exclusive with both accountKey - and identity; exactly one must be specified. - :type sas_key: str - :param blobfuse_options: These are 'net use' options in Windows and - 'mount' options in Linux. - :type blobfuse_options: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param identity_reference: The reference to the user assigned identity to - use to access containerName. This property is mutually exclusive with both - accountKey and sasKey; exactly one must be specified. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _validation = { - 'account_name': {'required': True}, - 'container_name': {'required': True}, - 'relative_mount_path': {'required': True}, - } - - _attribute_map = { - 'account_name': {'key': 'accountName', 'type': 'str'}, - 'container_name': {'key': 'containerName', 'type': 'str'}, - 'account_key': {'key': 'accountKey', 'type': 'str'}, - 'sas_key': {'key': 'sasKey', 'type': 'str'}, - 'blobfuse_options': {'key': 'blobfuseOptions', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, **kwargs): - super(AzureBlobFileSystemConfiguration, self).__init__(**kwargs) - self.account_name = kwargs.get('account_name', None) - self.container_name = kwargs.get('container_name', None) - self.account_key = kwargs.get('account_key', None) - self.sas_key = kwargs.get('sas_key', None) - self.blobfuse_options = kwargs.get('blobfuse_options', None) - self.relative_mount_path = kwargs.get('relative_mount_path', None) - self.identity_reference = kwargs.get('identity_reference', None) - - -class AzureFileShareConfiguration(Model): - """Information used to connect to an Azure Fileshare. - All required parameters must be populated in order to send to Azure. - - :param account_name: Required. - :type account_name: str - :param azure_file_url: Required. This is of the form - 'https://{account}.file.core.windows.net/'. - :type azure_file_url: str - :param account_key: Required. - :type account_key: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param mount_options: These are 'net use' options in Windows and 'mount' - options in Linux. - :type mount_options: str - """ - - _validation = { - 'account_name': {'required': True}, - 'azure_file_url': {'required': True}, - 'account_key': {'required': True}, - 'relative_mount_path': {'required': True}, - } - - _attribute_map = { - 'account_name': {'key': 'accountName', 'type': 'str'}, - 'azure_file_url': {'key': 'azureFileUrl', 'type': 'str'}, - 'account_key': {'key': 'accountKey', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'mount_options': {'key': 'mountOptions', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AzureFileShareConfiguration, self).__init__(**kwargs) - self.account_name = kwargs.get('account_name', None) - self.azure_file_url = kwargs.get('azure_file_url', None) - self.account_key = kwargs.get('account_key', None) - self.relative_mount_path = kwargs.get('relative_mount_path', None) - self.mount_options = kwargs.get('mount_options', None) - - -class BatchError(Model): + :ivar application_id: The ID of the application to deploy. When creating a pool, the package's + application ID must be fully qualified + (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). # pylint: disable=line-too-long + Required. + :vartype application_id: str + :ivar version: The version of the application to deploy. If omitted, the default version is + deployed. If this is omitted on a Pool, and no default version is specified for this + application, the request fails with the error code InvalidApplicationPackageReferences and HTTP + status code 409. If this is omitted on a Task, and no default version is specified for this + application, the Task fails with a pre-processing error. + :vartype version: str + """ + + application_id: str = rest_field(name="applicationId") + """The ID of the application to deploy. When creating a pool, the package's application ID must be + fully qualified + (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). # pylint: disable=line-too-long + Required.""" + version: Optional[str] = rest_field() + """The version of the application to deploy. If omitted, the default version is deployed. If this + is omitted on a Pool, and no default version is specified for this application, the request + fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this + is omitted on a Task, and no default version is specified for this application, the Task fails + with a pre-processing error.""" + + @overload + def __init__( + self, + *, + application_id: str, + version: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchAutoPoolSpecification(_model_base.Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service will + create this auto Pool when the Job is submitted. + + + :ivar auto_pool_id_prefix: A prefix to be added to the unique identifier when a Pool is + automatically created. The Batch service assigns each auto Pool a unique identifier on + creation. To distinguish between Pools created for different purposes, you can specify this + element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. + :vartype auto_pool_id_prefix: str + :ivar pool_lifetime_option: The minimum lifetime of created auto Pools, and how multiple Jobs + on a schedule are assigned to Pools. Required. Known values are: "jobschedule" and "job". + :vartype pool_lifetime_option: str or ~azure.batch.models.BatchPoolLifetimeOption + :ivar keep_alive: Whether to keep an auto Pool alive after its lifetime expires. If false, the + Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption + setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service + does not delete the Pool automatically. It is up to the user to delete auto Pools created with + this option. + :vartype keep_alive: bool + :ivar pool: The Pool specification for the auto Pool. + :vartype pool: ~azure.batch.models.BatchPoolSpecification + """ + + auto_pool_id_prefix: Optional[str] = rest_field(name="autoPoolIdPrefix") + """A prefix to be added to the unique identifier when a Pool is automatically created. The Batch + service assigns each auto Pool a unique identifier on creation. To distinguish between Pools + created for different purposes, you can specify this element to add a prefix to the ID that is + assigned. The prefix can be up to 20 characters long.""" + pool_lifetime_option: Union[str, "_models.BatchPoolLifetimeOption"] = rest_field(name="poolLifetimeOption") + """The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to + Pools. Required. Known values are: \"jobschedule\" and \"job\".""" + keep_alive: Optional[bool] = rest_field(name="keepAlive") + """Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service + deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; + that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the + Pool automatically. It is up to the user to delete auto Pools created with this option.""" + pool: Optional["_models.BatchPoolSpecification"] = rest_field() + """The Pool specification for the auto Pool.""" + + @overload + def __init__( + self, + *, + pool_lifetime_option: Union[str, "_models.BatchPoolLifetimeOption"], + auto_pool_id_prefix: Optional[str] = None, + keep_alive: Optional[bool] = None, + pool: Optional["_models.BatchPoolSpecification"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchError(_model_base.Model): """An error response received from the Azure Batch service. - :param code: - :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. - :type message: ~azure.batch.models.ErrorMessage - :param values: - :type values: list[~azure.batch.models.BatchErrorDetail] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'ErrorMessage'}, - 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, - } - def __init__(self, **kwargs): - super(BatchError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.values = kwargs.get('values', None) - - -class BatchErrorException(HttpOperationError): - """Server responsed with exception of type: 'BatchError'. + :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. Required. + :vartype code: str + :ivar message: A message describing the error, intended to be suitable for display in a user + interface. + :vartype message: ~azure.batch.models.BatchErrorMessage + :ivar values_property: A collection of key-value pairs containing additional details about the + error. + :vartype values_property: list[~azure.batch.models.BatchErrorDetail] + """ + + code: str = rest_field() + """An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. Required.""" + message: Optional["_models.BatchErrorMessage"] = rest_field() + """A message describing the error, intended to be suitable for display in a user interface.""" + values_property: Optional[List["_models.BatchErrorDetail"]] = rest_field(name="values") + """A collection of key-value pairs containing additional details about the error.""" + + @overload + def __init__( + self, + *, + code: str, + message: Optional["_models.BatchErrorMessage"] = None, + values_property: Optional[List["_models.BatchErrorDetail"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchErrorDetail(_model_base.Model): + """An item of additional information included in an Azure Batch error response. + + :ivar key: An identifier specifying the meaning of the Value property. + :vartype key: str + :ivar value: The additional information included with the error response. + :vartype value: str + """ + + key: Optional[str] = rest_field() + """An identifier specifying the meaning of the Value property.""" + value: Optional[str] = rest_field() + """The additional information included with the error response.""" + + @overload + def __init__( + self, + *, + key: Optional[str] = None, + value: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchErrorMessage(_model_base.Model): + """An error message received in an Azure Batch error response. - :param deserialize: A deserializer - :param response: Server response to be deserialized. + :ivar lang: The language code of the error message. + :vartype lang: str + :ivar value: The text of the message. + :vartype value: str """ - def __init__(self, deserialize, response, *args): + lang: Optional[str] = rest_field() + """The language code of the error message.""" + value: Optional[str] = rest_field() + """The text of the message.""" - super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) + @overload + def __init__( + self, + *, + lang: Optional[str] = None, + value: Optional[str] = None, + ): ... + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ -class BatchErrorDetail(Model): - """An item of additional information included in an Azure Batch error - response. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - :param key: - :type key: str - :param value: - :type value: str - """ - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(BatchErrorDetail, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) +class BatchJob(_model_base.Model): # pylint: disable=too-many-instance-attributes + """An Azure Batch Job. + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: A string that uniquely identifies the Job within the Account. The ID is + case-preserving and case-insensitive (that is, you may not have two IDs within an Account that + differ only by case). + :vartype id: str + :ivar display_name: The display name for the Job. + :vartype display_name: str + :ivar uses_task_dependencies: Whether Tasks in the Job can define dependencies on each other. + The default is false. + :vartype uses_task_dependencies: bool + :ivar url: The URL of the Job. + :vartype url: str + :ivar e_tag: The ETag of the Job. This is an opaque string. You can use it to detect whether + the Job has changed between requests. In particular, you can be pass the ETag when updating a + Job to specify that your changes should take effect only if nobody else has modified the Job in + the meantime. + :vartype e_tag: str + :ivar last_modified: The last modified time of the Job. This is the last time at which the Job + level data, such as the Job state or priority, changed. It does not factor in task-level + changes such as adding new Tasks or Tasks changing state. + :vartype last_modified: ~datetime.datetime + :ivar creation_time: The creation time of the Job. + :vartype creation_time: ~datetime.datetime + :ivar state: The current state of the Job. Known values are: "active", "disabling", "disabled", + "enabling", "terminating", "completed", and "deleting". + :vartype state: str or ~azure.batch.models.BatchJobState + :ivar state_transition_time: The time at which the Job entered its current state. + :vartype state_transition_time: ~datetime.datetime + :ivar previous_state: The previous state of the Job. This property is not set if the Job is in + its initial Active state. Known values are: "active", "disabling", "disabled", "enabling", + "terminating", "completed", and "deleting". + :vartype previous_state: str or ~azure.batch.models.BatchJobState + :ivar previous_state_transition_time: The time at which the Job entered its previous state. + This property is not set if the Job is in its initial Active state. + :vartype previous_state_transition_time: ~datetime.datetime + :ivar priority: The priority of the Job. Priority values can range from -1000 to 1000, with + -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + :vartype priority: int + :ivar allow_task_preemption: Whether Tasks in this job can be preempted by other high priority + jobs. If the value is set to True, other high priority jobs submitted to the system will take + precedence and will be able requeue tasks from this job. You can update a job's + allowTaskPreemption after it has been created using the update job API. + :vartype allow_task_preemption: bool + :ivar max_parallel_tasks: The maximum number of tasks that can be executed in parallel for the + job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, + the default value is -1, which means there's no limit to the number of tasks that can be run at + once. You can update a job's maxParallelTasks after it has been created using the update job + API. + :vartype max_parallel_tasks: int + :ivar constraints: The execution constraints for the Job. + :vartype constraints: ~azure.batch.models.BatchJobConstraints + :ivar job_manager_task: Details of a Job Manager Task to be launched when the Job is started. + :vartype job_manager_task: ~azure.batch.models.BatchJobManagerTask + :ivar job_preparation_task: The Job Preparation Task. The Job Preparation Task is a special + Task run on each Compute Node before any other Task of the Job. + :vartype job_preparation_task: ~azure.batch.models.BatchJobPreparationTask + :ivar job_release_task: The Job Release Task. The Job Release Task is a special Task run at the + end of the Job on each Compute Node that has run any other Task of the Job. + :vartype job_release_task: ~azure.batch.models.BatchJobReleaseTask + :ivar common_environment_settings: The list of common environment variable settings. These + environment variables are set for all Tasks in the Job (including the Job Manager, Job + Preparation and Job Release Tasks). Individual Tasks can override an environment setting + specified here by specifying the same setting name with a different value. + :vartype common_environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar pool_info: The Pool settings associated with the Job. Required. + :vartype pool_info: ~azure.batch.models.BatchPoolInfo + :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in the Job + are in the completed state. The default is noaction. Known values are: "noaction" and + "terminatejob". + :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete + :ivar on_task_failure: The action the Batch service should take when any Task in the Job fails. + A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task + completes with a non-zero exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The default is noaction. + Known values are: "noaction" and "performexitoptionsjobaction". + :vartype on_task_failure: str or ~azure.batch.models.OnBatchTaskFailure + :ivar network_configuration: The network configuration for the Job. + :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration + :ivar metadata: A list of name-value pairs associated with the Job as metadata. The Batch + service does not assign any meaning to metadata; it is solely for the use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar execution_info: The execution information for the Job. + :vartype execution_info: ~azure.batch.models.BatchJobExecutionInfo + :ivar stats: Resource usage statistics for the entire lifetime of the Job. This property is + populated only if the CloudJob was retrieved with an expand clause including the 'stats' + attribute; otherwise it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + :vartype stats: ~azure.batch.models.BatchJobStatistics + """ + + id: Optional[str] = rest_field(visibility=["read"]) + """A string that uniquely identifies the Job within the Account. The ID is case-preserving and + case-insensitive (that is, you may not have two IDs within an Account that differ only by + case).""" + display_name: Optional[str] = rest_field(name="displayName", visibility=["read"]) + """The display name for the Job.""" + uses_task_dependencies: Optional[bool] = rest_field(name="usesTaskDependencies", visibility=["read"]) + """Whether Tasks in the Job can define dependencies on each other. The default is false.""" + url: Optional[str] = rest_field(visibility=["read"]) + """The URL of the Job.""" + e_tag: Optional[str] = rest_field(name="eTag", visibility=["read"]) + """The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has + changed between requests. In particular, you can be pass the ETag when updating a Job to + specify that your changes should take effect only if nobody else has modified the Job in the + meantime.""" + last_modified: Optional[datetime.datetime] = rest_field(name="lastModified", visibility=["read"], format="rfc3339") + """The last modified time of the Job. This is the last time at which the Job level data, such as + the Job state or priority, changed. It does not factor in task-level changes such as adding new + Tasks or Tasks changing state.""" + creation_time: Optional[datetime.datetime] = rest_field(name="creationTime", visibility=["read"], format="rfc3339") + """The creation time of the Job.""" + state: Optional[Union[str, "_models.BatchJobState"]] = rest_field(visibility=["read"]) + """The current state of the Job. Known values are: \"active\", \"disabling\", \"disabled\", + \"enabling\", \"terminating\", \"completed\", and \"deleting\".""" + state_transition_time: Optional[datetime.datetime] = rest_field( + name="stateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Job entered its current state.""" + previous_state: Optional[Union[str, "_models.BatchJobState"]] = rest_field( + name="previousState", visibility=["read"] + ) + """The previous state of the Job. This property is not set if the Job is in its initial Active + state. Known values are: \"active\", \"disabling\", \"disabled\", \"enabling\", + \"terminating\", \"completed\", and \"deleting\".""" + previous_state_transition_time: Optional[datetime.datetime] = rest_field( + name="previousStateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Job entered its previous state. This property is not set if the Job is in + its initial Active state.""" + priority: Optional[int] = rest_field() + """The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the + lowest priority and 1000 being the highest priority. The default value is 0.""" + allow_task_preemption: Optional[bool] = rest_field(name="allowTaskPreemption") + """Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to + True, other high priority jobs submitted to the system will take precedence and will be able + requeue tasks from this job. You can update a job's allowTaskPreemption after it has been + created using the update job API.""" + max_parallel_tasks: Optional[int] = rest_field(name="maxParallelTasks") + """The maximum number of tasks that can be executed in parallel for the job. The value of + maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value + is -1, which means there's no limit to the number of tasks that can be run at once. You can + update a job's maxParallelTasks after it has been created using the update job API.""" + constraints: Optional["_models.BatchJobConstraints"] = rest_field() + """The execution constraints for the Job.""" + job_manager_task: Optional["_models.BatchJobManagerTask"] = rest_field(name="jobManagerTask", visibility=["read"]) + """Details of a Job Manager Task to be launched when the Job is started.""" + job_preparation_task: Optional["_models.BatchJobPreparationTask"] = rest_field( + name="jobPreparationTask", visibility=["read"] + ) + """The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node + before any other Task of the Job.""" + job_release_task: Optional["_models.BatchJobReleaseTask"] = rest_field(name="jobReleaseTask", visibility=["read"]) + """The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each + Compute Node that has run any other Task of the Job.""" + common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field( + name="commonEnvironmentSettings", visibility=["read"] + ) + """The list of common environment variable settings. These environment variables are set for all + Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual + Tasks can override an environment setting specified here by specifying the same setting name + with a different value.""" + pool_info: "_models.BatchPoolInfo" = rest_field(name="poolInfo") + """The Pool settings associated with the Job. Required.""" + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + name="onAllTasksComplete" + ) + """The action the Batch service should take when all Tasks in the Job are in the completed state. + The default is noaction. Known values are: \"noaction\" and \"terminatejob\".""" + on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = rest_field( + name="onTaskFailure", visibility=["read"] + ) + """The action the Batch service should take when any Task in the Job fails. A Task is considered + to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error starting the Task, for + example due to a resource file download error. The default is noaction. Known values are: + \"noaction\" and \"performexitoptionsjobaction\".""" + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = rest_field( + name="networkConfiguration", visibility=["read"] + ) + """The network configuration for the Job.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Job as metadata. The Batch service does not + assign any meaning to metadata; it is solely for the use of user code.""" + execution_info: Optional["_models.BatchJobExecutionInfo"] = rest_field(name="executionInfo", visibility=["read"]) + """The execution information for the Job.""" + stats: Optional["_models.BatchJobStatistics"] = rest_field(visibility=["read"]) + """Resource usage statistics for the entire lifetime of the Job. This property is populated only + if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes.""" + + @overload + def __init__( + self, + *, + pool_info: "_models.BatchPoolInfo", + priority: Optional[int] = None, + allow_task_preemption: Optional[bool] = None, + max_parallel_tasks: Optional[int] = None, + constraints: Optional["_models.BatchJobConstraints"] = None, + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobConstraints(_model_base.Model): + """The execution constraints for a Job. -class BatchPoolIdentity(Model): - """The identity of the Batch pool, if configured. + :ivar max_wall_clock_time: The maximum elapsed time that the Job may run, measured from the + time the Job is created. If the Job does not complete within the time limit, the Batch service + terminates it and any Tasks that are still running. In this case, the termination reason will + be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how + long the Job may run. + :vartype max_wall_clock_time: ~datetime.timedelta + :ivar max_task_retry_count: The maximum number of times each Task may be retried. The Batch + service retries a Task if its exit code is nonzero. Note that this value specifically controls + the number of retries. The Batch service will try each Task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one + initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry + Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The + default value is 0 (no retries). + :vartype max_task_retry_count: int + """ + + max_wall_clock_time: Optional[datetime.timedelta] = rest_field(name="maxWallClockTime") + """The maximum elapsed time that the Job may run, measured from the time the Job is created. If + the Job does not complete within the time limit, the Batch service terminates it and any Tasks + that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If + this property is not specified, there is no time limit on how long the Job may run.""" + max_task_retry_count: Optional[int] = rest_field(name="maxTaskRetryCount") + """The maximum number of times each Task may be retried. The Batch service retries a Task if its + exit code is nonzero. Note that this value specifically controls the number of retries. The + Batch service will try each Task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If + the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry + count is -1, the Batch service retries Tasks without limit. The default value is 0 (no + retries).""" + + @overload + def __init__( + self, + *, + max_wall_clock_time: Optional[datetime.timedelta] = None, + max_task_retry_count: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobCreateContent(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Parameters for creating an Azure Batch Job. + + All required parameters must be populated in order to send to server. + + :ivar id: A string that uniquely identifies the Job within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two IDs within an Account that differ only by case). Required. + :vartype id: str + :ivar display_name: The display name for the Job. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar uses_task_dependencies: Whether Tasks in the Job can define dependencies on each other. + The default is false. + :vartype uses_task_dependencies: bool + :ivar priority: The priority of the Job. Priority values can range from -1000 to 1000, with + -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + :vartype priority: int + :ivar allow_task_preemption: Whether Tasks in this job can be preempted by other high priority + jobs. If the value is set to True, other high priority jobs submitted to the system will take + precedence and will be able requeue tasks from this job. You can update a job's + allowTaskPreemption after it has been created using the update job API. + :vartype allow_task_preemption: bool + :ivar max_parallel_tasks: The maximum number of tasks that can be executed in parallel for the + job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, + the default value is -1, which means there's no limit to the number of tasks that can be run at + once. You can update a job's maxParallelTasks after it has been created using the update job + API. + :vartype max_parallel_tasks: int + :ivar constraints: The execution constraints for the Job. + :vartype constraints: ~azure.batch.models.BatchJobConstraints + :ivar job_manager_task: Details of a Job Manager Task to be launched when the Job is started. + If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. + If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when + the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks + in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, + for example by deciding what additional Tasks to run, determining when the work is complete, + etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged + Task in the system and perform whatever actions are required for the Job.) For example, a Job + Manager Task might download a file specified as a parameter, analyze the contents of that file + and submit additional Tasks based on those contents. + :vartype job_manager_task: ~azure.batch.models.BatchJobManagerTask + :ivar job_preparation_task: The Job Preparation Task. If a Job has a Job Preparation Task, the + Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job + on that Compute Node. + :vartype job_preparation_task: ~azure.batch.models.BatchJobPreparationTask + :ivar job_release_task: The Job Release Task. A Job Release Task cannot be specified without + also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task + on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release + Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities + include deleting local files, or shutting down services that were started as part of Job + preparation. + :vartype job_release_task: ~azure.batch.models.BatchJobReleaseTask + :ivar common_environment_settings: The list of common environment variable settings. These + environment variables are set for all Tasks in the Job (including the Job Manager, Job + Preparation and Job Release Tasks). Individual Tasks can override an environment setting + specified here by specifying the same setting name with a different value. + :vartype common_environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar pool_info: The Pool on which the Batch service runs the Job's Tasks. Required. + :vartype pool_info: ~azure.batch.models.BatchPoolInfo + :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in the Job + are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered + complete. This option is therefore most commonly used with a Job Manager task; if you want to + use automatic Job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to + terminatejob once you have finished adding Tasks. The default is noaction. Known values are: + "noaction" and "terminatejob". + :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete + :ivar on_task_failure: The action the Batch service should take when any Task in the Job fails. + A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task + completes with a non-zero exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The default is noaction. + Known values are: "noaction" and "performexitoptionsjobaction". + :vartype on_task_failure: str or ~azure.batch.models.OnBatchTaskFailure + :ivar network_configuration: The network configuration for the Job. + :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration + :ivar metadata: A list of name-value pairs associated with the Job as metadata. The Batch + service does not assign any meaning to metadata; it is solely for the use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + """ + + id: str = rest_field() + """A string that uniquely identifies the Job within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two IDs within an Account that differ only by case). Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """The display name for the Job. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + uses_task_dependencies: Optional[bool] = rest_field(name="usesTaskDependencies") + """Whether Tasks in the Job can define dependencies on each other. The default is false.""" + priority: Optional[int] = rest_field() + """The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the + lowest priority and 1000 being the highest priority. The default value is 0.""" + allow_task_preemption: Optional[bool] = rest_field(name="allowTaskPreemption") + """Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to + True, other high priority jobs submitted to the system will take precedence and will be able + requeue tasks from this job. You can update a job's allowTaskPreemption after it has been + created using the update job API.""" + max_parallel_tasks: Optional[int] = rest_field(name="maxParallelTasks") + """The maximum number of tasks that can be executed in parallel for the job. The value of + maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value + is -1, which means there's no limit to the number of tasks that can be run at once. You can + update a job's maxParallelTasks after it has been created using the update job API.""" + constraints: Optional["_models.BatchJobConstraints"] = rest_field() + """The execution constraints for the Job.""" + job_manager_task: Optional["_models.BatchJobManagerTask"] = rest_field(name="jobManagerTask") + """Details of a Job Manager Task to be launched when the Job is started. If the Job does not + specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does + specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is + created, and will try to schedule the Job Manager Task before scheduling other Tasks in the + Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for + example by deciding what additional Tasks to run, determining when the work is complete, etc. + (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task + in the system and perform whatever actions are required for the Job.) For example, a Job + Manager Task might download a file specified as a parameter, analyze the contents of that file + and submit additional Tasks based on those contents.""" + job_preparation_task: Optional["_models.BatchJobPreparationTask"] = rest_field(name="jobPreparationTask") + """The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the + Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node.""" + job_release_task: Optional["_models.BatchJobReleaseTask"] = rest_field(name="jobReleaseTask") + """The Job Release Task. A Job Release Task cannot be specified without also specifying a Job + Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that + have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo + changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting + local files, or shutting down services that were started as part of Job preparation.""" + common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field( + name="commonEnvironmentSettings" + ) + """The list of common environment variable settings. These environment variables are set for all + Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual + Tasks can override an environment setting specified here by specifying the same setting name + with a different value.""" + pool_info: "_models.BatchPoolInfo" = rest_field(name="poolInfo") + """The Pool on which the Batch service runs the Job's Tasks. Required.""" + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + name="onAllTasksComplete" + ) + """The action the Batch service should take when all Tasks in the Job are in the completed state. + Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use automatic Job + termination without a Job Manager, you should initially set onAllTasksComplete to noaction and + update the Job properties to set onAllTasksComplete to terminatejob once you have finished + adding Tasks. The default is noaction. Known values are: \"noaction\" and \"terminatejob\".""" + on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = rest_field(name="onTaskFailure") + """The action the Batch service should take when any Task in the Job fails. A Task is considered + to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error starting the Task, for + example due to a resource file download error. The default is noaction. Known values are: + \"noaction\" and \"performexitoptionsjobaction\".""" + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = rest_field(name="networkConfiguration") + """The network configuration for the Job.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Job as metadata. The Batch service does not + assign any meaning to metadata; it is solely for the use of user code.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + pool_info: "_models.BatchPoolInfo", + display_name: Optional[str] = None, + uses_task_dependencies: Optional[bool] = None, + priority: Optional[int] = None, + allow_task_preemption: Optional[bool] = None, + max_parallel_tasks: Optional[int] = None, + constraints: Optional["_models.BatchJobConstraints"] = None, + job_manager_task: Optional["_models.BatchJobManagerTask"] = None, + job_preparation_task: Optional["_models.BatchJobPreparationTask"] = None, + job_release_task: Optional["_models.BatchJobReleaseTask"] = None, + common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, + on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = None, + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobDisableContent(_model_base.Model): + """Parameters for disabling an Azure Batch Job. + + All required parameters must be populated in order to send to server. + + :ivar disable_tasks: What to do with active Tasks associated with the Job. Required. Known + values are: "requeue", "terminate", and "wait". + :vartype disable_tasks: str or ~azure.batch.models.DisableBatchJobOption + """ + + disable_tasks: Union[str, "_models.DisableBatchJobOption"] = rest_field(name="disableTasks") + """What to do with active Tasks associated with the Job. Required. Known values are: \"requeue\", + \"terminate\", and \"wait\".""" + + @overload + def __init__( + self, + *, + disable_tasks: Union[str, "_models.DisableBatchJobOption"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobExecutionInfo(_model_base.Model): + """Contains information about the execution of a Job in the Azure Batch service. + + + :ivar start_time: The start time of the Job. This is the time at which the Job was created. + Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The completion time of the Job. This property is set only if the Job is in the + completed state. + :vartype end_time: ~datetime.datetime + :ivar pool_id: The ID of the Pool to which this Job is assigned. This element contains the + actual Pool where the Job is assigned. When you get Job details from the service, they also + contain a poolInfo element, which contains the Pool configuration data from when the Job was + added or updated. That poolInfo element may also contain a poolId element. If it does, the two + IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property + contains the ID of that auto Pool. + :vartype pool_id: str + :ivar scheduling_error: Details of any error encountered by the service in starting the Job. + This property is not set if there was no error starting the Job. + :vartype scheduling_error: ~azure.batch.models.BatchJobSchedulingError + :ivar termination_reason: A string describing the reason the Job ended. This property is set + only if the Job is in the completed state. If the Batch service terminates the Job, it sets the + reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set + to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. + TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. + AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks + in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified + a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to + the 'Terminate a Job' operation. + :vartype termination_reason: str + """ + + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the Job. This is the time at which the Job was created. Required.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The completion time of the Job. This property is set only if the Job is in the completed state.""" + pool_id: Optional[str] = rest_field(name="poolId") + """The ID of the Pool to which this Job is assigned. This element contains the actual Pool where + the Job is assigned. When you get Job details from the service, they also contain a poolInfo + element, which contains the Pool configuration data from when the Job was added or updated. + That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. + If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that + auto Pool.""" + scheduling_error: Optional["_models.BatchJobSchedulingError"] = rest_field(name="schedulingError") + """Details of any error encountered by the service in starting the Job. This property is not set + if there was no error starting the Job.""" + termination_reason: Optional[str] = rest_field(name="terminateReason") + """A string describing the reason the Job ended. This property is set only if the Job is in the + completed state. If the Batch service terminates the Job, it sets the reason as follows: + JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. + MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule + - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's + onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. + TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a + Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any + other string is a user-defined reason specified in a call to the 'Terminate a Job' operation.""" + + @overload + def __init__( + self, + *, + start_time: datetime.datetime, + end_time: Optional[datetime.datetime] = None, + pool_id: Optional[str] = None, + scheduling_error: Optional["_models.BatchJobSchedulingError"] = None, + termination_reason: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobManagerTask(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Specifies details of a Job Manager Task. + The Job Manager Task is automatically started when the Job is created. The + Batch service tries to schedule the Job Manager Task before any other Tasks in + the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where + Job Manager Tasks are running for as long as possible (that is, Compute Nodes + running 'normal' Tasks are removed before Compute Nodes running Job Manager + Tasks). When a Job Manager Task fails and needs to be restarted, the system + tries to schedule it at the highest priority. If there are no idle Compute + Nodes available, the system may terminate one of the running Tasks in the Pool + and return it to the queue in order to make room for the Job Manager Task to + restart. Note that a Job Manager Task in one Job does not have priority over + Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For + example, if a Job Manager in a priority 0 Job needs to be restarted, it will + not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery + operation is triggered on a Node. Examples of recovery operations include (but + are not limited to) when an unhealthy Node is rebooted or a Compute Node + disappeared due to host failure. Retries due to recovery operations are + independent of and are not counted against the maxTaskRetryCount. Even if the + maxTaskRetryCount is 0, an internal retry due to a recovery operation may + occur. Because of this, all Tasks should be idempotent. This means Tasks need + to tolerate being interrupted and restarted without causing any corruption or + duplicate data. The best practice for long running Tasks is to use some form of + checkpointing. - The identity of the Batch pool, if configured. - All required parameters must be populated in order to send to Azure. + :ivar id: A string that uniquely identifies the Job Manager Task within the Job. The ID can + contain any combination of alphanumeric characters including hyphens and underscores and cannot + contain more than 64 characters. Required. + :vartype id: str + :ivar display_name: The display name of the Job Manager Task. It need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar command_line: The command line of the Job Manager Task. The command line does not run + under a shell, and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you should invoke the shell + in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" + in Linux. If the command line refers to file paths, it should use a relative path (relative to + the Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required. + :vartype command_line: str + :ivar container_settings: The settings for the container under which the Job Manager Task runs. + If the Pool that will run this Task has containerConfiguration set, this must be set as well. + If the Pool that will run this Task doesn't have containerConfiguration set, this must not be + set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the + root of Azure Batch directories on the node) are mapped into the container, all Task + environment variables are mapped into the container, and the Task command line is executed in + the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be + reflected to the host disk, meaning that Batch file APIs will not be able to access those + files. + :vartype container_settings: ~azure.batch.models.BatchTaskContainerSettings + :ivar resource_files: A list of files that the Batch service will download to the Compute Node + before running the command line. Files listed under this element are located in the Task's + working directory. There is a maximum size for the list of resource files. When the max size + is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. + If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved + using .zip files, Application Packages, or Docker Containers. + :vartype resource_files: list[~azure.batch.models.ResourceFile] + :ivar output_files: A list of files that the Batch service will upload from the Compute Node + after running the command line. For multi-instance Tasks, the files will only be uploaded from + the Compute Node on which the primary Task is executed. + :vartype output_files: list[~azure.batch.models.OutputFile] + :ivar environment_settings: A list of environment variable settings for the Job Manager Task. + :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar constraints: Constraints that apply to the Job Manager Task. + :vartype constraints: ~azure.batch.models.BatchTaskConstraints + :ivar required_slots: The number of scheduling slots that the Task requires to run. The default + is 1. A Task can only be scheduled to run on a compute node if the node has enough free + scheduling slots available. For multi-instance Tasks, this property is not supported and must + not be specified. + :vartype required_slots: int + :ivar kill_job_on_completion: Whether completion of the Job Manager Task signifies completion + of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the + Job as complete. If any Tasks are still running at this time (other than Job Release), those + Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job + status. In this case, you should either use the onAllTasksComplete attribute to terminate the + Job, or have a client or user terminate the Job explicitly. An example of this is if the Job + Manager creates a set of Tasks but then takes no further role in their execution. The default + value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control + Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to + monitor progress), then it is important to set killJobOnCompletion to false. + :vartype kill_job_on_completion: bool + :ivar user_identity: The user identity under which the Job Manager Task runs. If omitted, the + Task runs as a non-administrative user unique to the Task. + :vartype user_identity: ~azure.batch.models.UserIdentity + :ivar run_exclusive: Whether the Job Manager Task requires exclusive use of the Compute Node + where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager + is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute + Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so + this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value + is true. + :vartype run_exclusive: bool + :ivar application_package_references: A list of Application Packages that the Batch service + will deploy to the + Compute Node before running the command line.Application Packages are + downloaded and deployed to a shared directory, not the Task working + directory. Therefore, if a referenced Application Package is already + on the Compute Node, and is up to date, then it is not re-downloaded; + the existing copy on the Compute Node is used. If a referenced Application + Package cannot be installed, for example because the package has been deleted + or because download failed, the Task fails. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar authentication_token_settings: The settings for an authentication token that the Task can + use to perform Batch service operations. If this property is set, the Batch service provides + the Task with an authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided via the + AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out + using the token depend on the settings. For example, a Task can request Job permissions in + order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the + Job. + :vartype authentication_token_settings: ~azure.batch.models.AuthenticationTokenSettings + :ivar allow_low_priority_node: Whether the Job Manager Task may run on a Spot/Low-priority + Compute Node. The default value is true. + :vartype allow_low_priority_node: bool + """ + + id: str = rest_field() + """A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any + combination of alphanumeric characters including hyphens and underscores and cannot contain + more than 64 characters. Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """The display name of the Job Manager Task. It need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + command_line: str = rest_field(name="commandLine") + """The command line of the Job Manager Task. The command line does not run under a shell, and + therefore cannot take advantage of shell features such as environment variable expansion. If + you want to take advantage of such features, you should invoke the shell in the command line, + for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the + command line refers to file paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required.""" + container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") + """The settings for the container under which the Job Manager Task runs. If the Pool that will run + this Task has containerConfiguration set, this must be set as well. If the Pool that will run + this Task doesn't have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure + Batch directories on the node) are mapped into the container, all Task environment variables + are mapped into the container, and the Task command line is executed in the container. Files + produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host + disk, meaning that Batch file APIs will not be able to access those files.""" + resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="resourceFiles") + """A list of files that the Batch service will download to the Compute Node before running the + command line. Files listed under this element are located in the Task's working directory. + There is a maximum size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be RequestEntityTooLarge. If this occurs, + the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers.""" + output_files: Optional[List["_models.OutputFile"]] = rest_field(name="outputFiles") + """A list of files that the Batch service will upload from the Compute Node after running the + command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node + on which the primary Task is executed.""" + environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field(name="environmentSettings") + """A list of environment variable settings for the Job Manager Task.""" + constraints: Optional["_models.BatchTaskConstraints"] = rest_field() + """Constraints that apply to the Job Manager Task.""" + required_slots: Optional[int] = rest_field(name="requiredSlots") + """The number of scheduling slots that the Task requires to run. The default is 1. A Task can only + be scheduled to run on a compute node if the node has enough free scheduling slots available. + For multi-instance Tasks, this property is not supported and must not be specified.""" + kill_job_on_completion: Optional[bool] = rest_field(name="killJobOnCompletion") + """Whether completion of the Job Manager Task signifies completion of the entire Job. If true, + when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks + are still running at this time (other than Job Release), those Tasks are terminated. If false, + the completion of the Job Manager Task does not affect the Job status. In this case, you should + either use the onAllTasksComplete attribute to terminate the Job, or have a client or user + terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks + but then takes no further role in their execution. The default value is true. If you are using + the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job + Manager Task only to create the Tasks for the Job (not to monitor progress), then it is + important to set killJobOnCompletion to false.""" + user_identity: Optional["_models.UserIdentity"] = rest_field(name="userIdentity") + """The user identity under which the Job Manager Task runs. If omitted, the Task runs as a + non-administrative user unique to the Task.""" + run_exclusive: Optional[bool] = rest_field(name="runExclusive") + """Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, + no other Tasks will run on the same Node for as long as the Job Manager is running. If false, + other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task + counts normally against the Compute Node's concurrent Task limit, so this is only relevant if + the Compute Node allows multiple concurrent Tasks. The default value is true.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences" + ) + """A list of Application Packages that the Batch service will deploy to the + Compute Node before running the command line.Application Packages are + downloaded and deployed to a shared directory, not the Task working + directory. Therefore, if a referenced Application Package is already + on the Compute Node, and is up to date, then it is not re-downloaded; + the existing copy on the Compute Node is used. If a referenced Application + Package cannot be installed, for example because the package has been deleted + or because download failed, the Task fails.""" + authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = rest_field( + name="authenticationTokenSettings" + ) + """The settings for an authentication token that the Task can use to perform Batch service + operations. If this property is set, the Batch service provides the Task with an authentication + token which can be used to authenticate Batch service operations without requiring an Account + access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. + The operations that the Task can carry out using the token depend on the settings. For example, + a Task can request Job permissions in order to add other Tasks to the Job, or check the status + of the Job or of other Tasks under the Job.""" + allow_low_priority_node: Optional[bool] = rest_field(name="allowLowPriorityNode") + """Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is + true.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + command_line: str, + display_name: Optional[str] = None, + container_settings: Optional["_models.BatchTaskContainerSettings"] = None, + resource_files: Optional[List["_models.ResourceFile"]] = None, + output_files: Optional[List["_models.OutputFile"]] = None, + environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + constraints: Optional["_models.BatchTaskConstraints"] = None, + required_slots: Optional[int] = None, + kill_job_on_completion: Optional[bool] = None, + user_identity: Optional["_models.UserIdentity"] = None, + run_exclusive: Optional[bool] = None, + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, + authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = None, + allow_low_priority_node: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobNetworkConfiguration(_model_base.Model): + """The network configuration for the Job. - :param type: Required. The list of user identities associated with the - Batch pool. The user identity dictionary key references will be ARM - resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - Possible values include: 'UserAssigned', 'None' - :type type: str or ~azure.batch.models.PoolIdentityType - :param user_assigned_identities: The user identity dictionary key - references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - :type user_assigned_identities: - list[~azure.batch.models.UserAssignedIdentity] - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'PoolIdentityType'}, - 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '[UserAssignedIdentity]'}, - } - - def __init__(self, **kwargs): - super(BatchPoolIdentity, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.user_assigned_identities = kwargs.get('user_assigned_identities', None) - - -class Certificate(Model): - """A Certificate that can be installed on Compute Nodes and can be used to - authenticate operations on the machine. - - :param thumbprint: - :type thumbprint: str - :param thumbprint_algorithm: - :type thumbprint_algorithm: str - :param url: - :type url: str - :param state: The current state of the Certificate. Possible values - include: 'active', 'deleting', 'deleteFailed' - :type state: str or ~azure.batch.models.CertificateState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Certificate. This - property is not set if the Certificate is in its initial active state. - Possible values include: 'active', 'deleting', 'deleteFailed' - :type previous_state: str or ~azure.batch.models.CertificateState - :param previous_state_transition_time: This property is not set if the - Certificate is in its initial Active state. - :type previous_state_transition_time: datetime - :param public_data: - :type public_data: str - :param delete_certificate_error: The error that occurred on the last - attempt to delete this Certificate. This property is set only if the - Certificate is in the DeleteFailed state. - :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError - """ - - _attribute_map = { - 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'state': {'key': 'state', 'type': 'CertificateState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'public_data': {'key': 'publicData', 'type': 'str'}, - 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, - } - - def __init__(self, **kwargs): - super(Certificate, self).__init__(**kwargs) - self.thumbprint = kwargs.get('thumbprint', None) - self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) - self.url = kwargs.get('url', None) - self.state = kwargs.get('state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.previous_state = kwargs.get('previous_state', None) - self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) - self.public_data = kwargs.get('public_data', None) - self.delete_certificate_error = kwargs.get('delete_certificate_error', None) - - -class CertificateAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(CertificateAddOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class CertificateAddParameter(Model): - """A Certificate that can be installed on Compute Nodes and can be used to - authenticate operations on the machine. - - All required parameters must be populated in order to send to Azure. - - :param thumbprint: Required. - :type thumbprint: str - :param thumbprint_algorithm: Required. - :type thumbprint_algorithm: str - :param data: Required. - :type data: str - :param certificate_format: Possible values include: 'pfx', 'cer' - :type certificate_format: str or ~azure.batch.models.CertificateFormat - :param password: This must be omitted if the Certificate format is cer. - :type password: str - """ - - _validation = { - 'thumbprint': {'required': True}, - 'thumbprint_algorithm': {'required': True}, - 'data': {'required': True}, - } - - _attribute_map = { - 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, - 'data': {'key': 'data', 'type': 'str'}, - 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CertificateAddParameter, self).__init__(**kwargs) - self.thumbprint = kwargs.get('thumbprint', None) - self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) - self.data = kwargs.get('data', None) - self.certificate_format = kwargs.get('certificate_format', None) - self.password = kwargs.get('password', None) - - -class CertificateCancelDeletionOptions(Model): - """Additional parameters for cancel_deletion operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(CertificateCancelDeletionOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class CertificateDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(CertificateDeleteOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class CertificateGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(CertificateGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class CertificateListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Certificates can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(CertificateListOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class CertificateReference(Model): - """A reference to a Certificate to be installed on Compute Nodes in a Pool. - - Warning: This object is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - All required parameters must be populated in order to send to Azure. - - :param thumbprint: Required. - :type thumbprint: str - :param thumbprint_algorithm: Required. - :type thumbprint_algorithm: str - :param store_location: The default value is currentuser. This property is - applicable only for Pools configured with Windows Compute Nodes (that is, - created with cloudServiceConfiguration, or with - virtualMachineConfiguration using a Windows Image reference). For Linux - Compute Nodes, the Certificates are stored in a directory inside the Task - working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is - supplied to the Task to query for this location. For Certificates with - visibility of 'remoteUser', a 'certs' directory is created in the user's - home directory (e.g., /home/{user-name}/certs) and Certificates are placed - in that directory. Possible values include: 'currentUser', 'localMachine' - :type store_location: str or ~azure.batch.models.CertificateStoreLocation - :param store_name: This property is applicable only for Pools configured - with Windows Compute Nodes (that is, created with - cloudServiceConfiguration, or with virtualMachineConfiguration using a - Windows Image reference). Common store names include: My, Root, CA, Trust, - Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but - any custom store name can also be used. The default value is My. - :type store_name: str - :param visibility: You can specify more than one visibility in this - collection. The default is all Accounts. - :type visibility: list[str or ~azure.batch.models.CertificateVisibility] - """ - - _validation = { - 'thumbprint': {'required': True}, - 'thumbprint_algorithm': {'required': True}, - } - - _attribute_map = { - 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, - 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, - 'store_name': {'key': 'storeName', 'type': 'str'}, - 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, - } - - def __init__(self, **kwargs): - super(CertificateReference, self).__init__(**kwargs) - self.thumbprint = kwargs.get('thumbprint', None) - self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) - self.store_location = kwargs.get('store_location', None) - self.store_name = kwargs.get('store_name', None) - self.visibility = kwargs.get('visibility', None) - - -class CIFSMountConfiguration(Model): - """Information used to connect to a CIFS file system. - All required parameters must be populated in order to send to Azure. + :ivar subnet_id: The ARM resource identifier of the virtual network subnet which Compute Nodes + running Tasks from the Job will join for the duration of the Task. This will only work with a + VirtualMachineConfiguration Pool. The virtual network must be in the same region and + subscription as the Azure Batch Account. The specified subnet should have enough free IP + addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This + can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service + principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) + role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This + can be verified by checking if the specified VNet has any associated Network Security Groups + (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the + Batch service will set the state of the Compute Nodes to unusable. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. # pylint: disable=line-too-long + If the specified VNet has any associated Network Security Groups (NSG), then a few reserved + system ports must be enabled for inbound communication from the Azure Batch service. For Pools + created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound + connections for communications to Azure Storage. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + Required. + :vartype subnet_id: str + """ + + subnet_id: str = rest_field(name="subnetId") + """The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks + from the Job will join for the duration of the Task. This will only work with a + VirtualMachineConfiguration Pool. The virtual network must be in the same region and + subscription as the Azure Batch Account. The specified subnet should have enough free IP + addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This + can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service + principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) + role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This + can be verified by checking if the specified VNet has any associated Network Security Groups + (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the + Batch service will set the state of the Compute Nodes to unusable. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. # pylint: disable=line-too-long + If the specified VNet has any associated Network Security Groups (NSG), then a few reserved + system ports must be enabled for inbound communication from the Azure Batch service. For Pools + created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound + connections for communications to Azure Storage. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + Required.""" - :param username: Required. - :type username: str - :param source: Required. - :type source: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param mount_options: These are 'net use' options in Windows and 'mount' - options in Linux. - :type mount_options: str - :param password: Required. - :type password: str - """ + @overload + def __init__( + self, + *, + subnet_id: str, + ): ... - _validation = { - 'username': {'required': True}, - 'source': {'required': True}, - 'relative_mount_path': {'required': True}, - 'password': {'required': True}, - } + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - _attribute_map = { - 'username': {'key': 'username', 'type': 'str'}, - 'source': {'key': 'source', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'mount_options': {'key': 'mountOptions', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - def __init__(self, **kwargs): - super(CIFSMountConfiguration, self).__init__(**kwargs) - self.username = kwargs.get('username', None) - self.source = kwargs.get('source', None) - self.relative_mount_path = kwargs.get('relative_mount_path', None) - self.mount_options = kwargs.get('mount_options', None) - self.password = kwargs.get('password', None) +class BatchJobPreparationAndReleaseTaskStatus(_model_base.Model): + """The status of the Job Preparation and Job Release Tasks on a Compute Node. -class CloudError(Model): - """CloudError. - """ + :ivar pool_id: The ID of the Pool containing the Compute Node to which this entry refers. + :vartype pool_id: str + :ivar node_id: The ID of the Compute Node to which this entry refers. + :vartype node_id: str + :ivar node_url: The URL of the Compute Node to which this entry refers. + :vartype node_url: str + :ivar job_preparation_task_execution_info: Information about the execution status of the Job + Preparation Task on this Compute Node. + :vartype job_preparation_task_execution_info: + ~azure.batch.models.BatchJobPreparationTaskExecutionInfo + :ivar job_release_task_execution_info: Information about the execution status of the Job + Release Task on this Compute Node. This property is set only if the Job Release Task has run on + the Compute Node. + :vartype job_release_task_execution_info: ~azure.batch.models.BatchJobReleaseTaskExecutionInfo + """ + + pool_id: Optional[str] = rest_field(name="poolId") + """The ID of the Pool containing the Compute Node to which this entry refers.""" + node_id: Optional[str] = rest_field(name="nodeId") + """The ID of the Compute Node to which this entry refers.""" + node_url: Optional[str] = rest_field(name="nodeUrl") + """The URL of the Compute Node to which this entry refers.""" + job_preparation_task_execution_info: Optional["_models.BatchJobPreparationTaskExecutionInfo"] = rest_field( + name="jobPreparationTaskExecutionInfo" + ) + """Information about the execution status of the Job Preparation Task on this Compute Node.""" + job_release_task_execution_info: Optional["_models.BatchJobReleaseTaskExecutionInfo"] = rest_field( + name="jobReleaseTaskExecutionInfo" + ) + """Information about the execution status of the Job Release Task on this Compute Node. This + property is set only if the Job Release Task has run on the Compute Node.""" + + @overload + def __init__( + self, + *, + pool_id: Optional[str] = None, + node_id: Optional[str] = None, + node_url: Optional[str] = None, + job_preparation_task_execution_info: Optional["_models.BatchJobPreparationTaskExecutionInfo"] = None, + job_release_task_execution_info: Optional["_models.BatchJobReleaseTaskExecutionInfo"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobPreparationTask(_model_base.Model): + """A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. + You can use Job Preparation to prepare a Node to run Tasks for the Job. + Activities commonly performed in Job Preparation include: Downloading common + resource files used by all the Tasks in the Job. The Job Preparation Task can + download these common resource files to the shared location on the Node. + (AZ_BATCH_NODE_ROOT_DIR\\shared), or starting a local service on the Node so + that all Tasks of that Job can communicate with it. If the Job Preparation Task + fails (that is, exhausts its retry count before exiting with exit code 0), + Batch will not run Tasks of this Job on the Node. The Compute Node remains + ineligible to run Tasks of this Job until it is reimaged. The Compute Node + remains active and can be used for other Jobs. The Job Preparation Task can run + multiple times on the same Node. Therefore, you should write the Job + Preparation Task to handle re-execution. If the Node is rebooted, the Job + Preparation Task is run again on the Compute Node before scheduling any other + Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job + Preparation Task did not previously complete. If the Node is reimaged, the Job + Preparation Task is run again before scheduling any Task of the Job. Batch will + retry Tasks when a recovery operation is triggered on a Node. Examples of + recovery operations include (but are not limited to) when an unhealthy Node is + rebooted or a Compute Node disappeared due to host failure. Retries due to + recovery operations are independent of and are not counted against the + maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to + a recovery operation may occur. Because of this, all Tasks should be + idempotent. This means Tasks need to tolerate being interrupted and restarted + without causing any corruption or duplicate data. The best practice for long + running Tasks is to use some form of checkpointing. + + + :ivar id: A string that uniquely identifies the Job Preparation Task within the Job. The ID can + contain any combination of alphanumeric characters including hyphens and underscores and cannot + contain more than 64 characters. If you do not specify this property, the Batch service assigns + a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job + Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the + request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API + directly, the HTTP status code is 409 (Conflict). + :vartype id: str + :ivar command_line: The command line of the Job Preparation Task. The command line does not run + under a shell, and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you should invoke the shell + in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" + in Linux. If the command line refers to file paths, it should use a relative path (relative to + the Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required. + :vartype command_line: str + :ivar container_settings: The settings for the container under which the Job Preparation Task + runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the + root of Azure Batch directories on the node) are mapped into the container, all Task + environment variables are mapped into the container, and the Task command line is executed in + the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be + reflected to the host disk, meaning that Batch file APIs will not be able to access those + files. + :vartype container_settings: ~azure.batch.models.BatchTaskContainerSettings + :ivar resource_files: A list of files that the Batch service will download to the Compute Node + before running the command line. Files listed under this element are located in the Task's + working directory. There is a maximum size for the list of resource files. When the max size + is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. + If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved + using .zip files, Application Packages, or Docker Containers. + :vartype resource_files: list[~azure.batch.models.ResourceFile] + :ivar environment_settings: A list of environment variable settings for the Job Preparation + Task. + :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar constraints: Constraints that apply to the Job Preparation Task. + :vartype constraints: ~azure.batch.models.BatchTaskConstraints + :ivar wait_for_success: Whether the Batch service should wait for the Job Preparation Task to + complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job + Preparation Task has completed successfully if it exits with exit code 0. If true and the Job + Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its + maximum retry count (as specified in the constraints element). If the Task has still not + completed successfully after all retries, then the Batch service will not schedule Tasks of the + Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the + Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks + of the Job can start executing on the Compute Node while the Job Preparation Task is still + running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on + the Compute Node. The default value is true. + :vartype wait_for_success: bool + :ivar user_identity: The user identity under which the Job Preparation Task runs. If omitted, + the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a + non-administrative user unique to the Pool on Linux Compute Nodes. + :vartype user_identity: ~azure.batch.models.UserIdentity + :ivar rerun_on_node_reboot_after_success: Whether the Batch service should rerun the Job + Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a + Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the + reboot occurred while the Task was running). Therefore, you should always write a Job + Preparation Task to be idempotent and to behave correctly if run multiple times. The default + value is true. + :vartype rerun_on_node_reboot_after_success: bool + """ + + id: Optional[str] = rest_field() + """A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain + any combination of alphanumeric characters including hyphens and underscores and cannot contain + more than 64 characters. If you do not specify this property, the Batch service assigns a + default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job + Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the + request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API + directly, the HTTP status code is 409 (Conflict).""" + command_line: str = rest_field(name="commandLine") + """The command line of the Job Preparation Task. The command line does not run under a shell, and + therefore cannot take advantage of shell features such as environment variable expansion. If + you want to take advantage of such features, you should invoke the shell in the command line, + for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the + command line refers to file paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required.""" + container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") + """The settings for the container under which the Job Preparation Task runs. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure + Batch directories on the node) are mapped into the container, all Task environment variables + are mapped into the container, and the Task command line is executed in the container. Files + produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host + disk, meaning that Batch file APIs will not be able to access those files.""" + resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="resourceFiles") + """A list of files that the Batch service will download to the Compute Node before running the + command line. Files listed under this element are located in the Task's working directory. + There is a maximum size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be RequestEntityTooLarge. If this occurs, + the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers.""" + environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field(name="environmentSettings") + """A list of environment variable settings for the Job Preparation Task.""" + constraints: Optional["_models.BatchTaskConstraints"] = rest_field() + """Constraints that apply to the Job Preparation Task.""" + wait_for_success: Optional[bool] = rest_field(name="waitForSuccess") + """Whether the Batch service should wait for the Job Preparation Task to complete successfully + before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has + completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails + on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as + specified in the constraints element). If the Task has still not completed successfully after + all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node + remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not + wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start + executing on the Compute Node while the Job Preparation Task is still running; and even if the + Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The + default value is true.""" + user_identity: Optional["_models.UserIdentity"] = rest_field(name="userIdentity") + """The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a + non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative + user unique to the Pool on Linux Compute Nodes.""" + rerun_on_node_reboot_after_success: Optional[bool] = rest_field(name="rerunOnNodeRebootAfterSuccess") + """Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. + The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job + Preparation Task did not complete (e.g. because the reboot occurred while the Task was + running). Therefore, you should always write a Job Preparation Task to be idempotent and to + behave correctly if run multiple times. The default value is true.""" + + @overload + def __init__( + self, + *, + command_line: str, + id: Optional[str] = None, # pylint: disable=redefined-builtin + container_settings: Optional["_models.BatchTaskContainerSettings"] = None, + resource_files: Optional[List["_models.ResourceFile"]] = None, + environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + constraints: Optional["_models.BatchTaskConstraints"] = None, + wait_for_success: Optional[bool] = None, + user_identity: Optional["_models.UserIdentity"] = None, + rerun_on_node_reboot_after_success: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobPreparationTaskExecutionInfo(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Contains information about the execution of a Job Preparation Task on a Compute + Node. - _attribute_map = { - } + :ivar start_time: The time at which the Task started running. If the Task has been restarted or + retried, this is the most recent time at which the Task started running. Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The time at which the Job Preparation Task completed. This property is set only + if the Task is in the Completed state. + :vartype end_time: ~datetime.datetime + :ivar state: The current state of the Job Preparation Task on the Compute Node. Required. Known + values are: "running" and "completed". + :vartype state: str or ~azure.batch.models.BatchJobPreparationTaskState + :ivar task_root_directory: The root directory of the Job Preparation Task on the Compute Node. + You can use this path to retrieve files created by the Task, such as log files. + :vartype task_root_directory: str + :ivar task_root_directory_url: The URL to the root directory of the Job Preparation Task on the + Compute Node. + :vartype task_root_directory_url: str + :ivar exit_code: The exit code of the program specified on the Task command line. This + parameter is returned only if the Task is in the completed state. The exit code for a process + reflects the specific convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that you know the exit code + convention used by the application process. Note that the exit code may also be generated by + the Compute Node operating system, such as when a process is forcibly terminated. + :vartype exit_code: int + :ivar container_info: Information about the container under which the Task is executing. This + property is set only if the Task runs in a container context. + :vartype container_info: ~azure.batch.models.BatchTaskContainerExecutionInfo + :ivar failure_info: Information describing the Task failure, if any. This property is set only + if the Task is in the completed state and encountered a failure. + :vartype failure_info: ~azure.batch.models.BatchTaskFailureInfo + :ivar retry_count: The number of times the Task has been retried by the Batch service. Task + application failures (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will retry the Task up to + the limit specified by the constraints. Task application failures (non-zero exit code) are + retried, pre-processing errors (the Task could not be run) and file upload errors are not + retried. The Batch service will retry the Task up to the limit specified by the constraints. + Required. + :vartype retry_count: int + :ivar last_retry_time: The most recent time at which a retry of the Job Preparation Task + started running. This property is set only if the Task was retried (i.e. retryCount is + nonzero). If present, this is typically the same as startTime, but may be different if the Task + has been restarted for reasons other than retry; for example, if the Compute Node was rebooted + during a retry, then the startTime is updated but the lastRetryTime is not. + :vartype last_retry_time: ~datetime.datetime + :ivar result: The result of the Task execution. If the value is 'failed', then the details of + the failure can be found in the failureInfo property. Known values are: "success" and + "failure". + :vartype result: str or ~azure.batch.models.BatchTaskExecutionResult + """ + + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The time at which the Task started running. If the Task has been restarted or retried, this is + the most recent time at which the Task started running. Required.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The time at which the Job Preparation Task completed. This property is set only if the Task is + in the Completed state.""" + state: Union[str, "_models.BatchJobPreparationTaskState"] = rest_field() + """The current state of the Job Preparation Task on the Compute Node. Required. Known values are: + \"running\" and \"completed\".""" + task_root_directory: Optional[str] = rest_field(name="taskRootDirectory") + """The root directory of the Job Preparation Task on the Compute Node. You can use this path to + retrieve files created by the Task, such as log files.""" + task_root_directory_url: Optional[str] = rest_field(name="taskRootDirectoryUrl") + """The URL to the root directory of the Job Preparation Task on the Compute Node.""" + exit_code: Optional[int] = rest_field(name="exitCode") + """The exit code of the program specified on the Task command line. This parameter is returned + only if the Task is in the completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If you use the exit code + value to make decisions in your code, be sure that you know the exit code convention used by + the application process. Note that the exit code may also be generated by the Compute Node + operating system, such as when a process is forcibly terminated.""" + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = rest_field(name="containerInfo") + """Information about the container under which the Task is executing. This property is set only if + the Task runs in a container context.""" + failure_info: Optional["_models.BatchTaskFailureInfo"] = rest_field(name="failureInfo") + """Information describing the Task failure, if any. This property is set only if the Task is in + the completed state and encountered a failure.""" + retry_count: int = rest_field(name="retryCount") + """The number of times the Task has been retried by the Batch service. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file + upload errors are not retried. The Batch service will retry the Task up to the limit specified + by the constraints. Task application failures (non-zero exit code) are retried, pre-processing + errors (the Task could not be run) and file upload errors are not retried. The Batch service + will retry the Task up to the limit specified by the constraints. Required.""" + last_retry_time: Optional[datetime.datetime] = rest_field(name="lastRetryTime", format="rfc3339") + """The most recent time at which a retry of the Job Preparation Task started running. This + property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is + typically the same as startTime, but may be different if the Task has been restarted for + reasons other than retry; for example, if the Compute Node was rebooted during a retry, then + the startTime is updated but the lastRetryTime is not.""" + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = rest_field() + """The result of the Task execution. If the value is 'failed', then the details of the failure can + be found in the failureInfo property. Known values are: \"success\" and \"failure\".""" + + @overload + def __init__( + self, + *, + start_time: datetime.datetime, + state: Union[str, "_models.BatchJobPreparationTaskState"], + retry_count: int, + end_time: Optional[datetime.datetime] = None, + task_root_directory: Optional[str] = None, + task_root_directory_url: Optional[str] = None, + exit_code: Optional[int] = None, + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = None, + failure_info: Optional["_models.BatchTaskFailureInfo"] = None, + last_retry_time: Optional[datetime.datetime] = None, + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobReleaseTask(_model_base.Model): + """A Job Release Task to run on Job completion on any Compute Node where the Job has run. + The Job Release Task runs when the Job ends, because of one of the following: + The user calls the Terminate Job API, or the Delete Job API while the Job is + still active, the Job's maximum wall clock time constraint is reached, and the + Job is still active, or the Job's Job Manager Task completed, and the Job is + configured to terminate when the Job Manager completes. The Job Release Task + runs on each Node where Tasks of the Job have run and the Job Preparation Task + ran and completed. If you reimage a Node after it has run the Job Preparation + Task, and the Job ends without any further Tasks of the Job running on that + Node (and hence the Job Preparation Task does not re-run), then the Job Release + Task does not run on that Compute Node. If a Node reboots while the Job Release + Task is still running, the Job Release Task runs again when the Compute Node + starts up. The Job is not marked as complete until all Job Release Tasks have + completed. The Job Release Task runs in the background. It does not occupy a + scheduling slot; that is, it does not count towards the taskSlotsPerNode limit + specified on the Pool. + + + :ivar id: A string that uniquely identifies the Job Release Task within the Job. The ID can + contain any combination of alphanumeric characters including hyphens and underscores and cannot + contain more than 64 characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job + Release Task. If you try to submit a Task with the same id, the Batch service rejects the + request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :vartype id: str + :ivar command_line: The command line of the Job Release Task. The command line does not run + under a shell, and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you should invoke the shell + in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" + in Linux. If the command line refers to file paths, it should use a relative path (relative to + the Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required. + :vartype command_line: str + :ivar container_settings: The settings for the container under which the Job Release Task runs. + When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root + of Azure Batch directories on the node) are mapped into the container, all Task environment + variables are mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be + reflected to the host disk, meaning that Batch file APIs will not be able to access those + files. + :vartype container_settings: ~azure.batch.models.BatchTaskContainerSettings + :ivar resource_files: A list of files that the Batch service will download to the Compute Node + before running the command line. There is a maximum size for the list of resource files. When + the max size is exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. + This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed + under this element are located in the Task's working directory. + :vartype resource_files: list[~azure.batch.models.ResourceFile] + :ivar environment_settings: A list of environment variable settings for the Job Release Task. + :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar max_wall_clock_time: The maximum elapsed time that the Job Release Task may run on a + given Compute Node, measured from the time the Task starts. If the Task does not complete + within the time limit, the Batch service terminates it. The default value is 15 minutes. You + may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with + an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + :vartype max_wall_clock_time: ~datetime.timedelta + :ivar retention_time: The minimum time to retain the Task directory for the Job Release Task on + the Compute Node. After this time, the Batch service may delete the Task directory and all its + contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the + Compute Node is removed or the Job is deleted. + :vartype retention_time: ~datetime.timedelta + :ivar user_identity: The user identity under which the Job Release Task runs. If omitted, the + Task runs as a non-administrative user unique to the Task. + :vartype user_identity: ~azure.batch.models.UserIdentity + """ + + id: Optional[str] = rest_field() + """A string that uniquely identifies the Job Release Task within the Job. The ID can contain any + combination of alphanumeric characters including hyphens and underscores and cannot contain + more than 64 characters. If you do not specify this property, the Batch service assigns a + default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release + Task. If you try to submit a Task with the same id, the Batch service rejects the request with + error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP + status code is 409 (Conflict).""" + command_line: str = rest_field(name="commandLine") + """The command line of the Job Release Task. The command line does not run under a shell, and + therefore cannot take advantage of shell features such as environment variable expansion. If + you want to take advantage of such features, you should invoke the shell in the command line, + for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the + command line refers to file paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required.""" + container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") + """The settings for the container under which the Job Release Task runs. When this is specified, + all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all Task environment variables are + mapped into the container, and the Task command line is executed in the container. Files + produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host + disk, meaning that Batch file APIs will not be able to access those files.""" + resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="resourceFiles") + """A list of files that the Batch service will download to the Compute Node before running the + command line. There is a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If + this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved + using .zip files, Application Packages, or Docker Containers. Files listed under this element + are located in the Task's working directory.""" + environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field(name="environmentSettings") + """A list of environment variable settings for the Job Release Task.""" + max_wall_clock_time: Optional[datetime.timedelta] = rest_field(name="maxWallClockTime") + """The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured + from the time the Task starts. If the Task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not specify a timeout longer + than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request).""" + retention_time: Optional[datetime.timedelta] = rest_field(name="retentionTime") + """The minimum time to retain the Task directory for the Job Release Task on the Compute Node. + After this time, the Batch service may delete the Task directory and all its contents. The + default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node + is removed or the Job is deleted.""" + user_identity: Optional["_models.UserIdentity"] = rest_field(name="userIdentity") + """The user identity under which the Job Release Task runs. If omitted, the Task runs as a + non-administrative user unique to the Task.""" + + @overload + def __init__( + self, + *, + command_line: str, + id: Optional[str] = None, # pylint: disable=redefined-builtin + container_settings: Optional["_models.BatchTaskContainerSettings"] = None, + resource_files: Optional[List["_models.ResourceFile"]] = None, + environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + max_wall_clock_time: Optional[datetime.timedelta] = None, + retention_time: Optional[datetime.timedelta] = None, + user_identity: Optional["_models.UserIdentity"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobReleaseTaskExecutionInfo(_model_base.Model): + """Contains information about the execution of a Job Release Task on a Compute + Node. -class CloudJob(Model): - """An Azure Batch Job. - :param id: The ID is case-preserving and case-insensitive (that is, you - may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: - :type display_name: str - :param uses_task_dependencies: Whether Tasks in the Job can define - dependencies on each other. The default is false. - :type uses_task_dependencies: bool - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Job has changed between requests. In particular, you can be pass the - ETag when updating a Job to specify that your changes should take effect - only if nobody else has modified the Job in the meantime. - :type e_tag: str - :param last_modified: This is the last time at which the Job level data, - such as the Job state or priority, changed. It does not factor in - task-level changes such as adding new Tasks or Tasks changing state. - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param state: The current state of the Job. Possible values include: - 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', - 'deleting' - :type state: str or ~azure.batch.models.JobState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Job. This property is not - set if the Job is in its initial Active state. Possible values include: - 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', - 'deleting' - :type previous_state: str or ~azure.batch.models.JobState - :param previous_state_transition_time: This property is not set if the Job - is in its initial Active state. - :type previous_state_transition_time: datetime - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - :type priority: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param constraints: The execution constraints for the Job. - :type constraints: ~azure.batch.models.JobConstraints - :param job_manager_task: Details of a Job Manager Task to be launched when - the Job is started. - :type job_manager_task: ~azure.batch.models.JobManagerTask - :param job_preparation_task: The Job Preparation Task. The Job Preparation - Task is a special Task run on each Compute Node before any other Task of - the Job. - :type job_preparation_task: ~azure.batch.models.JobPreparationTask - :param job_release_task: The Job Release Task. The Job Release Task is a - special Task run at the end of the Job on each Compute Node that has run - any other Task of the Job. - :type job_release_task: ~azure.batch.models.JobReleaseTask - :param common_environment_settings: Individual Tasks can override an - environment setting specified here by specifying the same setting name - with a different value. - :type common_environment_settings: - list[~azure.batch.models.EnvironmentSetting] - :param pool_info: The Pool settings associated with the Job. - :type pool_info: ~azure.batch.models.PoolInformation - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. The default is - noaction. Possible values include: 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param on_task_failure: The action the Batch service should take when any - Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero - exit code after exhausting its retry count, or if there was an error - starting the Task, for example due to a resource file download error. The - default is noaction. Possible values include: 'noAction', - 'performExitOptionsJobAction' - :type on_task_failure: str or ~azure.batch.models.OnTaskFailure - :param network_configuration: The network configuration for the Job. - :type network_configuration: ~azure.batch.models.JobNetworkConfiguration - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param execution_info: The execution information for the Job. - :type execution_info: ~azure.batch.models.JobExecutionInformation - :param stats: Resource usage statistics for the entire lifetime of the - Job. This property is populated only if the CloudJob was retrieved with an - expand clause including the 'stats' attribute; otherwise it is null. The - statistics may not be immediately available. The Batch service performs - periodic roll-up of statistics. The typical delay is about 30 minutes. - :type stats: ~azure.batch.models.JobStatistics - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'JobState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, - 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, - 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, - 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, - 'stats': {'key': 'stats', 'type': 'JobStatistics'}, - } - - def __init__(self, **kwargs): - super(CloudJob, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) - self.url = kwargs.get('url', None) - self.e_tag = kwargs.get('e_tag', None) - self.last_modified = kwargs.get('last_modified', None) - self.creation_time = kwargs.get('creation_time', None) - self.state = kwargs.get('state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.previous_state = kwargs.get('previous_state', None) - self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) - self.priority = kwargs.get('priority', None) - self.allow_task_preemption = kwargs.get('allow_task_preemption', None) - self.max_parallel_tasks = kwargs.get('max_parallel_tasks', -1) - self.constraints = kwargs.get('constraints', None) - self.job_manager_task = kwargs.get('job_manager_task', None) - self.job_preparation_task = kwargs.get('job_preparation_task', None) - self.job_release_task = kwargs.get('job_release_task', None) - self.common_environment_settings = kwargs.get('common_environment_settings', None) - self.pool_info = kwargs.get('pool_info', None) - self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) - self.on_task_failure = kwargs.get('on_task_failure', None) - self.network_configuration = kwargs.get('network_configuration', None) - self.metadata = kwargs.get('metadata', None) - self.execution_info = kwargs.get('execution_info', None) - self.stats = kwargs.get('stats', None) - - -class CloudJobSchedule(Model): - """A Job Schedule that allows recurring Jobs by specifying when to run Jobs - and a specification used to create each Job. - - :param id: - :type id: str - :param display_name: - :type display_name: str - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Job Schedule has changed between requests. In particular, you can be - pass the ETag with an Update Job Schedule request to specify that your - changes should take effect only if nobody else has modified the schedule - in the meantime. - :type e_tag: str - :param last_modified: This is the last time at which the schedule level - data, such as the Job specification or recurrence information, changed. It - does not factor in job-level changes such as new Jobs being created or - Jobs changing state. - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param state: The current state of the Job Schedule. Possible values - include: 'active', 'completed', 'disabled', 'terminating', 'deleting' - :type state: str or ~azure.batch.models.JobScheduleState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Job Schedule. This - property is not present if the Job Schedule is in its initial active - state. Possible values include: 'active', 'completed', 'disabled', - 'terminating', 'deleting' - :type previous_state: str or ~azure.batch.models.JobScheduleState - :param previous_state_transition_time: This property is not present if the - Job Schedule is in its initial active state. - :type previous_state_transition_time: datetime - :param schedule: The schedule according to which Jobs will be created. All - times are fixed respective to UTC and are not impacted by daylight saving - time. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: The details of the Jobs to be created on this + :ivar start_time: The time at which the Task started running. If the Task has been restarted or + retried, this is the most recent time at which the Task started running. Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The time at which the Job Release Task completed. This property is set only if + the Task is in the Completed state. + :vartype end_time: ~datetime.datetime + :ivar state: The current state of the Job Release Task on the Compute Node. Required. Known + values are: "running" and "completed". + :vartype state: str or ~azure.batch.models.BatchJobReleaseTaskState + :ivar task_root_directory: The root directory of the Job Release Task on the Compute Node. You + can use this path to retrieve files created by the Task, such as log files. + :vartype task_root_directory: str + :ivar task_root_directory_url: The URL to the root directory of the Job Release Task on the + Compute Node. + :vartype task_root_directory_url: str + :ivar exit_code: The exit code of the program specified on the Task command line. This + parameter is returned only if the Task is in the completed state. The exit code for a process + reflects the specific convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that you know the exit code + convention used by the application process. Note that the exit code may also be generated by + the Compute Node operating system, such as when a process is forcibly terminated. + :vartype exit_code: int + :ivar container_info: Information about the container under which the Task is executing. This + property is set only if the Task runs in a container context. + :vartype container_info: ~azure.batch.models.BatchTaskContainerExecutionInfo + :ivar failure_info: Information describing the Task failure, if any. This property is set only + if the Task is in the completed state and encountered a failure. + :vartype failure_info: ~azure.batch.models.BatchTaskFailureInfo + :ivar result: The result of the Task execution. If the value is 'failed', then the details of + the failure can be found in the failureInfo property. Known values are: "success" and + "failure". + :vartype result: str or ~azure.batch.models.BatchTaskExecutionResult + """ + + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The time at which the Task started running. If the Task has been restarted or retried, this is + the most recent time at which the Task started running. Required.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The time at which the Job Release Task completed. This property is set only if the Task is in + the Completed state.""" + state: Union[str, "_models.BatchJobReleaseTaskState"] = rest_field() + """The current state of the Job Release Task on the Compute Node. Required. Known values are: + \"running\" and \"completed\".""" + task_root_directory: Optional[str] = rest_field(name="taskRootDirectory") + """The root directory of the Job Release Task on the Compute Node. You can use this path to + retrieve files created by the Task, such as log files.""" + task_root_directory_url: Optional[str] = rest_field(name="taskRootDirectoryUrl") + """The URL to the root directory of the Job Release Task on the Compute Node.""" + exit_code: Optional[int] = rest_field(name="exitCode") + """The exit code of the program specified on the Task command line. This parameter is returned + only if the Task is in the completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If you use the exit code + value to make decisions in your code, be sure that you know the exit code convention used by + the application process. Note that the exit code may also be generated by the Compute Node + operating system, such as when a process is forcibly terminated.""" + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = rest_field(name="containerInfo") + """Information about the container under which the Task is executing. This property is set only if + the Task runs in a container context.""" + failure_info: Optional["_models.BatchTaskFailureInfo"] = rest_field(name="failureInfo") + """Information describing the Task failure, if any. This property is set only if the Task is in + the completed state and encountered a failure.""" + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = rest_field() + """The result of the Task execution. If the value is 'failed', then the details of the failure can + be found in the failureInfo property. Known values are: \"success\" and \"failure\".""" + + @overload + def __init__( + self, + *, + start_time: datetime.datetime, + state: Union[str, "_models.BatchJobReleaseTaskState"], + end_time: Optional[datetime.datetime] = None, + task_root_directory: Optional[str] = None, + task_root_directory_url: Optional[str] = None, + exit_code: Optional[int] = None, + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = None, + failure_info: Optional["_models.BatchTaskFailureInfo"] = None, + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobSchedule(_model_base.Model): # pylint: disable=too-many-instance-attributes + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a + specification used to create each Job. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: A string that uniquely identifies the schedule within the Account. + :vartype id: str + :ivar display_name: The display name for the schedule. + :vartype display_name: str + :ivar url: The URL of the Job Schedule. + :vartype url: str + :ivar e_tag: The ETag of the Job Schedule. This is an opaque string. You can use it to detect + whether the Job Schedule has changed between requests. In particular, you can be pass the ETag + with an Update Job Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :vartype e_tag: str + :ivar last_modified: The last modified time of the Job Schedule. This is the last time at which + the schedule level data, such as the Job specification or recurrence information, changed. It + does not factor in job-level changes such as new Jobs being created or Jobs changing state. + :vartype last_modified: ~datetime.datetime + :ivar creation_time: The creation time of the Job Schedule. + :vartype creation_time: ~datetime.datetime + :ivar state: The current state of the Job Schedule. Known values are: "active", "completed", + "disabled", "terminating", and "deleting". + :vartype state: str or ~azure.batch.models.BatchJobScheduleState + :ivar state_transition_time: The time at which the Job Schedule entered the current state. + :vartype state_transition_time: ~datetime.datetime + :ivar previous_state: The previous state of the Job Schedule. This property is not present if + the Job Schedule is in its initial active state. Known values are: "active", "completed", + "disabled", "terminating", and "deleting". + :vartype previous_state: str or ~azure.batch.models.BatchJobScheduleState + :ivar previous_state_transition_time: The time at which the Job Schedule entered its previous + state. This property is not present if the Job Schedule is in its initial active state. + :vartype previous_state_transition_time: ~datetime.datetime + :ivar schedule: The schedule according to which Jobs will be created. All times are fixed + respective to UTC and are not impacted by daylight saving time. + :vartype schedule: ~azure.batch.models.BatchJobScheduleConfiguration + :ivar job_specification: The details of the Jobs to be created on this schedule. Required. + :vartype job_specification: ~azure.batch.models.BatchJobSpecification + :ivar execution_info: Information about Jobs that have been and will be run under this schedule. - :type job_specification: ~azure.batch.models.JobSpecification - :param execution_info: Information about Jobs that have been and will be - run under this schedule. - :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param stats: The lifetime resource usage statistics for the Job Schedule. - The statistics may not be immediately available. The Batch service - performs periodic roll-up of statistics. The typical delay is about 30 - minutes. - :type stats: ~azure.batch.models.JobScheduleStatistics - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobScheduleState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, - } - - def __init__(self, **kwargs): - super(CloudJobSchedule, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.url = kwargs.get('url', None) - self.e_tag = kwargs.get('e_tag', None) - self.last_modified = kwargs.get('last_modified', None) - self.creation_time = kwargs.get('creation_time', None) - self.state = kwargs.get('state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.previous_state = kwargs.get('previous_state', None) - self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) - self.schedule = kwargs.get('schedule', None) - self.job_specification = kwargs.get('job_specification', None) - self.execution_info = kwargs.get('execution_info', None) - self.metadata = kwargs.get('metadata', None) - self.stats = kwargs.get('stats', None) - - -class CloudPool(Model): - """A Pool in the Azure Batch service. + :vartype execution_info: ~azure.batch.models.BatchJobScheduleExecutionInfo + :ivar metadata: A list of name-value pairs associated with the schedule as metadata. The Batch + service does not assign any meaning to metadata; it is solely for the use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar stats: The lifetime resource usage statistics for the Job Schedule. The statistics may + not be immediately available. The Batch service performs periodic roll-up of statistics. The + typical delay is about 30 minutes. + :vartype stats: ~azure.batch.models.BatchJobScheduleStatistics + """ + + id: Optional[str] = rest_field(visibility=["read"]) + """A string that uniquely identifies the schedule within the Account.""" + display_name: Optional[str] = rest_field(name="displayName", visibility=["read"]) + """The display name for the schedule.""" + url: Optional[str] = rest_field(visibility=["read"]) + """The URL of the Job Schedule.""" + e_tag: Optional[str] = rest_field(name="eTag", visibility=["read"]) + """The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the + Job Schedule has changed between requests. In particular, you can be pass the ETag with an + Update Job Schedule request to specify that your changes should take effect only if nobody else + has modified the schedule in the meantime.""" + last_modified: Optional[datetime.datetime] = rest_field(name="lastModified", visibility=["read"], format="rfc3339") + """The last modified time of the Job Schedule. This is the last time at which the schedule level + data, such as the Job specification or recurrence information, changed. It does not factor in + job-level changes such as new Jobs being created or Jobs changing state.""" + creation_time: Optional[datetime.datetime] = rest_field(name="creationTime", visibility=["read"], format="rfc3339") + """The creation time of the Job Schedule.""" + state: Optional[Union[str, "_models.BatchJobScheduleState"]] = rest_field(visibility=["read"]) + """The current state of the Job Schedule. Known values are: \"active\", \"completed\", + \"disabled\", \"terminating\", and \"deleting\".""" + state_transition_time: Optional[datetime.datetime] = rest_field( + name="stateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Job Schedule entered the current state.""" + previous_state: Optional[Union[str, "_models.BatchJobScheduleState"]] = rest_field( + name="previousState", visibility=["read"] + ) + """The previous state of the Job Schedule. This property is not present if the Job Schedule is in + its initial active state. Known values are: \"active\", \"completed\", \"disabled\", + \"terminating\", and \"deleting\".""" + previous_state_transition_time: Optional[datetime.datetime] = rest_field( + name="previousStateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Job Schedule entered its previous state. This property is not present if + the Job Schedule is in its initial active state.""" + schedule: Optional["_models.BatchJobScheduleConfiguration"] = rest_field() + """The schedule according to which Jobs will be created. All times are fixed respective to UTC and + are not impacted by daylight saving time.""" + job_specification: "_models.BatchJobSpecification" = rest_field(name="jobSpecification") + """The details of the Jobs to be created on this schedule. Required.""" + execution_info: Optional["_models.BatchJobScheduleExecutionInfo"] = rest_field( + name="executionInfo", visibility=["read"] + ) + """Information about Jobs that have been and will be run under this schedule.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the schedule as metadata. The Batch service does not + assign any meaning to metadata; it is solely for the use of user code.""" + stats: Optional["_models.BatchJobScheduleStatistics"] = rest_field(visibility=["read"]) + """The lifetime resource usage statistics for the Job Schedule. The statistics may not be + immediately available. The Batch service performs periodic roll-up of statistics. The typical + delay is about 30 minutes.""" + + @overload + def __init__( + self, + *, + job_specification: "_models.BatchJobSpecification", + schedule: Optional["_models.BatchJobScheduleConfiguration"] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobScheduleConfiguration(_model_base.Model): + """The schedule according to which Jobs will be created. All times are fixed + respective to UTC and are not impacted by daylight saving time. - Variables are only populated by the server, and will be ignored when - sending a request. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 - characters. The ID is case-preserving and case-insensitive (that is, you - may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Pool has changed between requests. In particular, you can be pass the - ETag when updating a Pool to specify that your changes should take effect - only if nobody else has modified the Pool in the meantime. - :type e_tag: str - :param last_modified: This is the last time at which the Pool level data, - such as the targetDedicatedNodes or enableAutoscale settings, changed. It - does not factor in node-level changes such as a Compute Node changing - state. - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param state: Possible values include: 'active', 'deleting' - :type state: str or ~azure.batch.models.PoolState - :param state_transition_time: - :type state_transition_time: datetime - :param allocation_state: Possible values include: 'steady', 'resizing', - 'stopping' - :type allocation_state: str or ~azure.batch.models.AllocationState - :param allocation_state_transition_time: - :type allocation_state_transition_time: datetime - :param vm_size: For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param cloud_service_configuration: The cloud service configuration for - the Pool. This property and virtualMachineConfiguration are mutually - exclusive and one of the properties must be specified. This property - cannot be specified if the Batch Account was created with its - poolAllocationMode property set to 'UserSubscription'. - :type cloud_service_configuration: - ~azure.batch.models.CloudServiceConfiguration - :param virtual_machine_configuration: The virtual machine configuration - for the Pool. This property and cloudServiceConfiguration are mutually - exclusive and one of the properties must be specified. - :type virtual_machine_configuration: - ~azure.batch.models.VirtualMachineConfiguration - :param resize_timeout: This is the timeout for the most recent resize - operation. (The initial sizing when the Pool is created counts as a - resize.) The default value is 15 minutes. - :type resize_timeout: timedelta - :param resize_errors: This property is set only if one or more errors - occurred during the last Pool resize, and only when the Pool - allocationState is Steady. - :type resize_errors: list[~azure.batch.models.ResizeError] - :param current_dedicated_nodes: The number of dedicated Compute Nodes - currently in the Pool. - :type current_dedicated_nodes: int - :param current_low_priority_nodes: The number of Spot/Low-priority Compute - Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have - been preempted are included in this count. - :type current_low_priority_nodes: int - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. - :type target_low_priority_nodes: int - :param enable_auto_scale: Whether the Pool size should automatically - adjust over time. If false, at least one of targetDedicatedNodes and - targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the - formula. The default value is false. - :type enable_auto_scale: bool - :param auto_scale_formula: This property is set only if the Pool - automatically scales, i.e. enableAutoScale is true. - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: This property is set only if the - Pool automatically scales, i.e. enableAutoScale is true. - :type auto_scale_evaluation_interval: timedelta - :param auto_scale_run: The results and errors from the last execution of - the autoscale formula. This property is set only if the Pool automatically - scales, i.e. enableAutoScale is true. - :type auto_scale_run: ~azure.batch.models.AutoScaleRun - :param enable_inter_node_communication: Whether the Pool permits direct - communication between Compute Nodes. This imposes restrictions on which - Compute Nodes can be assigned to the Pool. Specifying this value can - reduce the chance of the requested number of Compute Nodes to be allocated - in the Pool. - :type enable_inter_node_communication: bool - :param network_configuration: The network configuration for the Pool. - :type network_configuration: ~azure.batch.models.NetworkConfiguration - :param start_task: A Task specified to run on each Compute Node as it - joins the Pool. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: Changes to Package references - affect all new Nodes joining the Pool, but do not affect Compute Nodes - that are already in the Pool until they are rebooted or reimaged. There is - a maximum of 10 Package references on any given Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses must be a - subset of available Batch service application licenses. If a license is - requested which is not supported, Pool creation will fail. - :type application_licenses: list[str] - :param task_slots_per_node: The number of task slots that can be used to - run concurrent tasks on a single compute node in the pool. The default - value is 1. The maximum value is the smaller of 4 times the number of - cores of the vmSize of the pool or 256. - :type task_slots_per_node: int - :param task_scheduling_policy: How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. - :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy - :param user_accounts: - :type user_accounts: list[~azure.batch.models.UserAccount] - :param metadata: - :type metadata: list[~azure.batch.models.MetadataItem] - :param stats: Utilization and resource usage statistics for the entire - lifetime of the Pool. This property is populated only if the CloudPool was - retrieved with an expand clause including the 'stats' attribute; otherwise - it is null. The statistics may not be immediately available. The Batch - service performs periodic roll-up of statistics. The typical delay is - about 30 minutes. - :type stats: ~azure.batch.models.PoolStatistics - :param mount_configuration: This supports Azure Files, NFS, CIFS/SMB, and - Blobfuse. - :type mount_configuration: list[~azure.batch.models.MountConfiguration] - :param identity: The identity of the Batch pool, if configured. The list - of user identities associated with the Batch pool. The user identity - dictionary key references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - :type identity: ~azure.batch.models.BatchPoolIdentity - :param target_node_communication_mode: The desired node communication mode - for the pool. If omitted, the default value is Default. Possible values - include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :ivar current_node_communication_mode: The current state of the pool - communication mode. Possible values include: 'default', 'classic', - 'simplified' - :vartype current_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :param upgrade_policy: The upgrade policy for the Pool. Describes an - upgrade policy - automatic, manual, or rolling. - :type upgrade_policy: ~azure.batch.models.UpgradePolicy - :param resource_tags: The user-defined tags to be associated with the - Azure Batch Pool. When specified, these tags are propagated to the backing - Azure resources associated with the pool. This property can only be - specified when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - :type resource_tags: dict[str, str] - """ - - _validation = { - 'current_node_communication_mode': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'PoolState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, - 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, - 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, - 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, - 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, - 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, - 'task_slots_per_node': {'key': 'taskSlotsPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, - 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'identity': {'key': 'identity', 'type': 'BatchPoolIdentity'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'current_node_communication_mode': {'key': 'currentNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'upgrade_policy': {'key': 'upgradePolicy', 'type': 'UpgradePolicy'}, - 'resource_tags': {'key': 'resourceTags', 'type': '{str}'}, - } - - def __init__(self, **kwargs): - super(CloudPool, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.url = kwargs.get('url', None) - self.e_tag = kwargs.get('e_tag', None) - self.last_modified = kwargs.get('last_modified', None) - self.creation_time = kwargs.get('creation_time', None) - self.state = kwargs.get('state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.allocation_state = kwargs.get('allocation_state', None) - self.allocation_state_transition_time = kwargs.get('allocation_state_transition_time', None) - self.vm_size = kwargs.get('vm_size', None) - self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) - self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) - self.resize_timeout = kwargs.get('resize_timeout', None) - self.resize_errors = kwargs.get('resize_errors', None) - self.current_dedicated_nodes = kwargs.get('current_dedicated_nodes', None) - self.current_low_priority_nodes = kwargs.get('current_low_priority_nodes', None) - self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) - self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) - self.enable_auto_scale = kwargs.get('enable_auto_scale', None) - self.auto_scale_formula = kwargs.get('auto_scale_formula', None) - self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) - self.auto_scale_run = kwargs.get('auto_scale_run', None) - self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) - self.network_configuration = kwargs.get('network_configuration', None) - self.start_task = kwargs.get('start_task', None) - self.certificate_references = kwargs.get('certificate_references', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.application_licenses = kwargs.get('application_licenses', None) - self.task_slots_per_node = kwargs.get('task_slots_per_node', None) - self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) - self.user_accounts = kwargs.get('user_accounts', None) - self.metadata = kwargs.get('metadata', None) - self.stats = kwargs.get('stats', None) - self.mount_configuration = kwargs.get('mount_configuration', None) - self.identity = kwargs.get('identity', None) - self.target_node_communication_mode = kwargs.get('target_node_communication_mode', None) - self.current_node_communication_mode = None - self.upgrade_policy = kwargs.get('upgrade_policy', None) - self.resource_tags = kwargs.get('resource_tags', None) - - -class CloudServiceConfiguration(Model): - """The configuration for Compute Nodes in a Pool based on the Azure Cloud - Services platform. - - All required parameters must be populated in order to send to Azure. - - :param os_family: Required. Possible values are: - 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. - 3 - OS Family 3, equivalent to Windows Server 2012. - 4 - OS Family 4, equivalent to Windows Server 2012 R2. - 5 - OS Family 5, equivalent to Windows Server 2016. - 6 - OS Family 6, equivalent to Windows Server 2019. For more information, - see Azure Guest OS Releases - (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). - :type os_family: str - :param os_version: The default value is * which specifies the latest - operating system version for the specified OS family. - :type os_version: str - """ - - _validation = { - 'os_family': {'required': True}, - } - - _attribute_map = { - 'os_family': {'key': 'osFamily', 'type': 'str'}, - 'os_version': {'key': 'osVersion', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CloudServiceConfiguration, self).__init__(**kwargs) - self.os_family = kwargs.get('os_family', None) - self.os_version = kwargs.get('os_version', None) - - -class CloudTask(Model): - """An Azure Batch Task. - - Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host - failure. Retries due to recovery operations are independent of and are not - counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, - an internal retry due to a recovery operation may occur. Because of this, - all Tasks should be idempotent. This means Tasks need to tolerate being - interrupted and restarted without causing any corruption or duplicate data. - The best practice for long running Tasks is to use some form of - checkpointing. + :ivar do_not_run_until: The earliest time at which any Job may be created under this Job + Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs + immediately. + :vartype do_not_run_until: ~datetime.datetime + :ivar do_not_run_after: A time after which no Job will be created under this Job Schedule. The + schedule will move to the completed state as soon as this deadline is past and there is no + active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are + creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly + terminate it. + :vartype do_not_run_after: ~datetime.datetime + :ivar start_window: The time interval, starting from the time at which the schedule indicates a + Job should be created, within which a Job must be created. If a Job is not created within the + startWindow interval, then the 'opportunity' is lost; no Job will be created until the next + recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than + the recurrence interval, then this is equivalent to an infinite startWindow, because the Job + that is 'due' in one recurrenceInterval is not carried forward into the next recurrence + interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, + the Batch service rejects the schedule with an error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :vartype start_window: ~datetime.timedelta + :ivar recurrence_interval: The time interval between the start times of two successive Jobs + under the Job Schedule. A Job Schedule can have at most one active Job under it at any given + time. Because a Job Schedule can have at most one active Job under it at any given time, if it + is time to create a new Job under a Job Schedule, but the previous Job is still running, the + Batch service will not create the new Job until the previous Job finishes. If the previous Job + does not finish within the startWindow period of the new recurrenceInterval, then no new Job + will be scheduled for that interval. For recurring Jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an + external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs + ready for the next recurrence. The default is that the schedule does not recur: one Job is + created, within the startWindow after the doNotRunUntil time, and the schedule is complete as + soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :vartype recurrence_interval: ~datetime.timedelta + """ + + do_not_run_until: Optional[datetime.datetime] = rest_field(name="doNotRunUntil", format="rfc3339") + """The earliest time at which any Job may be created under this Job Schedule. If you do not + specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately.""" + do_not_run_after: Optional[datetime.datetime] = rest_field(name="doNotRunAfter", format="rfc3339") + """A time after which no Job will be created under this Job Schedule. The schedule will move to + the completed state as soon as this deadline is past and there is no active Job under this Job + Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job + Schedule, the Job Schedule will remain active until you explicitly terminate it.""" + start_window: Optional[datetime.timedelta] = rest_field(name="startWindow") + """The time interval, starting from the time at which the schedule indicates a Job should be + created, within which a Job must be created. If a Job is not created within the startWindow + interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence + interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in + one recurrenceInterval is not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service + rejects the schedule with an error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request).""" + recurrence_interval: Optional[datetime.timedelta] = rest_field(name="recurrenceInterval") + """The time interval between the start times of two successive Jobs under the Job Schedule. A Job + Schedule can have at most one active Job under it at any given time. Because a Job Schedule can + have at most one active Job under it at any given time, if it is time to create a new Job under + a Job Schedule, but the previous Job is still running, the Batch service will not create the + new Job until the previous Job finishes. If the previous Job does not finish within the + startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that + interval. For recurring Jobs, you should normally specify a jobManagerTask in the + jobSpecification. If you do not use jobManagerTask, you will need an external process to + monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next + recurrence. The default is that the schedule does not recur: one Job is created, within the + startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job + finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service + rejects the schedule with an error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request).""" + + @overload + def __init__( + self, + *, + do_not_run_until: Optional[datetime.datetime] = None, + do_not_run_after: Optional[datetime.datetime] = None, + start_window: Optional[datetime.timedelta] = None, + recurrence_interval: Optional[datetime.timedelta] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobScheduleCreateContent(_model_base.Model): + """Parameters for creating an Azure Batch Job Schedule. + + All required parameters must be populated in order to send to server. + + :ivar id: A string that uniquely identifies the schedule within the Account. The ID can contain + any combination of alphanumeric characters including hyphens and underscores, and cannot + contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). Required. + :vartype id: str + :ivar display_name: The display name for the schedule. The display name need not be unique and + can contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar schedule: The schedule according to which Jobs will be created. All times are fixed + respective to UTC and are not impacted by daylight saving time. Required. + :vartype schedule: ~azure.batch.models.BatchJobScheduleConfiguration + :ivar job_specification: The details of the Jobs to be created on this schedule. Required. + :vartype job_specification: ~azure.batch.models.BatchJobSpecification + :ivar metadata: A list of name-value pairs associated with the schedule as metadata. The Batch + service does not assign any meaning to metadata; it is solely for the use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + """ + + id: str = rest_field() + """A string that uniquely identifies the schedule within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two IDs within an Account that differ only by case). Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """The display name for the schedule. The display name need not be unique and can contain any + Unicode characters up to a maximum length of 1024.""" + schedule: "_models.BatchJobScheduleConfiguration" = rest_field() + """The schedule according to which Jobs will be created. All times are fixed respective to UTC and + are not impacted by daylight saving time. Required.""" + job_specification: "_models.BatchJobSpecification" = rest_field(name="jobSpecification") + """The details of the Jobs to be created on this schedule. Required.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the schedule as metadata. The Batch service does not + assign any meaning to metadata; it is solely for the use of user code.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + schedule: "_models.BatchJobScheduleConfiguration", + job_specification: "_models.BatchJobSpecification", + display_name: Optional[str] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobScheduleExecutionInfo(_model_base.Model): + """Contains information about Jobs that have been and will be run under a Job + Schedule. - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 - characters. - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Task has changed between requests. In particular, you can be pass the - ETag when updating a Task to specify that your changes should take effect - only if nobody else has modified the Task in the meantime. - :type e_tag: str - :param last_modified: - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param exit_conditions: How the Batch service should respond when the Task - completes. - :type exit_conditions: ~azure.batch.models.ExitConditions - :param state: The current state of the Task. Possible values include: - 'active', 'preparing', 'running', 'completed' - :type state: str or ~azure.batch.models.TaskState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Task. This property is - not set if the Task is in its initial Active state. Possible values - include: 'active', 'preparing', 'running', 'completed' - :type previous_state: str or ~azure.batch.models.TaskState - :param previous_state_transition_time: This property is not set if the - Task is in its initial Active state. - :type previous_state_transition_time: datetime - :param command_line: For multi-instance Tasks, the command line is - executed as the primary Task, after the primary Task and all subtasks have - finished executing the coordination command line. The command line does - not run under a shell, and therefore cannot take advantage of shell - features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should - use a relative path (relative to the Task working directory), or use the - Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Task runs. If the Pool that will run this Task has containerConfiguration - set, this must be set as well. If the Pool that will run this Task doesn't - have containerConfiguration set, this must not be set. When this is - specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR - (the root of Azure Batch directories on the node) are mapped into the - container, all Task environment variables are mapped into the container, - and the Task command line is executed in the container. Files produced in - the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to - the host disk, meaning that Batch file APIs will not be able to access - those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: For multi-instance Tasks, the resource files will - only be downloaded to the Compute Node on which the primary Task is - executed. There is a maximum size for the list of resource files. When - the max size is exceeded, the request will fail and the response error - code will be RequestEntityTooLarge. If this occurs, the collection of - ResourceFiles must be reduced in size. This can be achieved using .zip - files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param output_files: For multi-instance Tasks, the files will only be - uploaded from the Compute Node on which the primary Task is executed. - :type output_files: list[~azure.batch.models.OutputFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param affinity_info: A locality hint that can be used by the Batch - service to select a Compute Node on which to start the new Task. - :type affinity_info: ~azure.batch.models.AffinityInformation - :param constraints: The execution constraints that apply to this Task. - :type constraints: ~azure.batch.models.TaskConstraints - :param required_slots: The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on - a compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - :type required_slots: int - :param user_identity: The user identity under which the Task runs. If - omitted, the Task runs as a non-administrative user unique to the Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param execution_info: Information about the execution of the Task. - :type execution_info: ~azure.batch.models.TaskExecutionInformation - :param node_info: Information about the Compute Node on which the Task - ran. - :type node_info: ~azure.batch.models.ComputeNodeInformation - :param multi_instance_settings: An object that indicates that the Task is - a multi-instance Task, and contains information about how to run the - multi-instance Task. - :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings - :param stats: Resource usage statistics for the Task. - :type stats: ~azure.batch.models.TaskStatistics - :param depends_on: The Tasks that this Task depends on. This Task will not - be scheduled until all Tasks that it depends on have completed - successfully. If any of those Tasks fail and exhaust their retry counts, - this Task will never be scheduled. - :type depends_on: ~azure.batch.models.TaskDependencies - :param application_package_references: Application packages are downloaded - and deployed to a shared directory, not the Task working directory. - Therefore, if a referenced package is already on the Node, and is up to - date, then it is not re-downloaded; the existing copy on the Compute Node - is used. If a referenced Package cannot be installed, for example because - the package has been deleted or because download failed, the Task fails. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param authentication_token_settings: The settings for an authentication - token that the Task can use to perform Batch service operations. If this - property is set, the Batch service provides the Task with an - authentication token which can be used to authenticate Batch service - operations without requiring an Account access key. The token is provided - via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations - that the Task can carry out using the token depend on the settings. For - example, a Task can request Job permissions in order to add other Tasks to - the Job, or check the status of the Job or of other Tasks under the Job. - :type authentication_token_settings: - ~azure.batch.models.AuthenticationTokenSettings - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, - 'state': {'key': 'state', 'type': 'TaskState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'required_slots': {'key': 'requiredSlots', 'type': 'int'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, - 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, - 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, - 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, - 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - } - - def __init__(self, **kwargs): - super(CloudTask, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.url = kwargs.get('url', None) - self.e_tag = kwargs.get('e_tag', None) - self.last_modified = kwargs.get('last_modified', None) - self.creation_time = kwargs.get('creation_time', None) - self.exit_conditions = kwargs.get('exit_conditions', None) - self.state = kwargs.get('state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.previous_state = kwargs.get('previous_state', None) - self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) - self.command_line = kwargs.get('command_line', None) - self.container_settings = kwargs.get('container_settings', None) - self.resource_files = kwargs.get('resource_files', None) - self.output_files = kwargs.get('output_files', None) - self.environment_settings = kwargs.get('environment_settings', None) - self.affinity_info = kwargs.get('affinity_info', None) - self.constraints = kwargs.get('constraints', None) - self.required_slots = kwargs.get('required_slots', None) - self.user_identity = kwargs.get('user_identity', None) - self.execution_info = kwargs.get('execution_info', None) - self.node_info = kwargs.get('node_info', None) - self.multi_instance_settings = kwargs.get('multi_instance_settings', None) - self.stats = kwargs.get('stats', None) - self.depends_on = kwargs.get('depends_on', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.authentication_token_settings = kwargs.get('authentication_token_settings', None) - - -class CloudTaskListSubtasksResult(Model): - """The result of listing the subtasks of a Task. - - :param value: - :type value: list[~azure.batch.models.SubtaskInformation] - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, - } - - def __init__(self, **kwargs): - super(CloudTaskListSubtasksResult, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - - -class ComputeNode(Model): - """A Compute Node in the Batch service. + :ivar next_run_time: The next time at which a Job will be created under this schedule. This + property is meaningful only if the schedule is in the active state when the time comes around. + For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job + is enabled before then. + :vartype next_run_time: ~datetime.datetime + :ivar recent_job: Information about the most recent Job under the Job Schedule. This property + is present only if the at least one Job has run under the schedule. + :vartype recent_job: ~azure.batch.models.RecentBatchJob + :ivar end_time: The time at which the schedule ended. This property is set only if the Job + Schedule is in the completed state. + :vartype end_time: ~datetime.datetime + """ + + next_run_time: Optional[datetime.datetime] = rest_field(name="nextRunTime", format="rfc3339") + """The next time at which a Job will be created under this schedule. This property is meaningful + only if the schedule is in the active state when the time comes around. For example, if the + schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before + then.""" + recent_job: Optional["_models.RecentBatchJob"] = rest_field(name="recentJob") + """Information about the most recent Job under the Job Schedule. This property is present only if + the at least one Job has run under the schedule.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The time at which the schedule ended. This property is set only if the Job Schedule is in the + completed state.""" + + @overload + def __init__( + self, + *, + next_run_time: Optional[datetime.datetime] = None, + recent_job: Optional["_models.RecentBatchJob"] = None, + end_time: Optional[datetime.datetime] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobScheduleStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Resource usage statistics for a Job Schedule. - :param id: Every Compute Node that is added to a Pool is assigned a unique - ID. Whenever a Compute Node is removed from a Pool, all of its local files - are deleted, and the ID is reclaimed and could be reused for new Compute - Nodes. - :type id: str - :param url: - :type url: str - :param state: The Spot/Low-priority Compute Node has been preempted. Tasks - which were running on the Compute Node when it was preempted will be - rescheduled when another Compute Node becomes available. Possible values - include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', - 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', - 'unknown', 'leavingPool', 'offline', 'preempted', 'upgradingOS' - :type state: str or ~azure.batch.models.ComputeNodeState - :param scheduling_state: Possible values include: 'enabled', 'disabled' - :type scheduling_state: str or ~azure.batch.models.SchedulingState - :param state_transition_time: - :type state_transition_time: datetime - :param last_boot_time: This property may not be present if the Compute - Node state is unusable. - :type last_boot_time: datetime - :param allocation_time: This is the time when the Compute Node was - initially allocated and doesn't change once set. It is not updated when - the Compute Node is service healed or preempted. - :type allocation_time: datetime - :param ip_address: Every Compute Node that is added to a Pool is assigned - a unique IP address. Whenever a Compute Node is removed from a Pool, all - of its local files are deleted, and the IP address is reclaimed and could - be reused for new Compute Nodes. - :type ip_address: str - :param affinity_id: Note that this is just a soft affinity. If the target - Compute Node is busy or unavailable at the time the Task is scheduled, - then the Task will be scheduled elsewhere. - :type affinity_id: str - :param vm_size: For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param total_tasks_run: The total number of Job Tasks completed on the - Compute Node. This includes Job Manager Tasks and normal Tasks, but not - Job Preparation, Job Release or Start Tasks. - :type total_tasks_run: int - :param running_tasks_count: The total number of currently running Job - Tasks on the Compute Node. This includes Job Manager Tasks and normal - Tasks, but not Job Preparation, Job Release or Start Tasks. - :type running_tasks_count: int - :param running_task_slots_count: The total number of scheduling slots used - by currently running Job Tasks on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or - Start Tasks. - :type running_task_slots_count: int - :param total_tasks_succeeded: The total number of Job Tasks which - completed successfully (with exitCode 0) on the Compute Node. This - includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job - Release or Start Tasks. - :type total_tasks_succeeded: int - :param recent_tasks: This property is present only if at least one Task - has run on this Compute Node since it was assigned to the Pool. - :type recent_tasks: list[~azure.batch.models.TaskInformation] - :param start_task: The Task specified to run on the Compute Node as it - joins the Pool. - :type start_task: ~azure.batch.models.StartTask - :param start_task_info: Runtime information about the execution of the - StartTask on the Compute Node. - :type start_task_info: ~azure.batch.models.StartTaskInformation - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param errors: - :type errors: list[~azure.batch.models.ComputeNodeError] - :param is_dedicated: Whether this Compute Node is a dedicated Compute - Node. If false, the Compute Node is a Spot/Low-priority Compute Node. - :type is_dedicated: bool - :param endpoint_configuration: The endpoint configuration for the Compute - Node. - :type endpoint_configuration: - ~azure.batch.models.ComputeNodeEndpointConfiguration - :param node_agent_info: Information about the Compute Node agent version - and the time the Compute Node upgraded to a new version. - :type node_agent_info: ~azure.batch.models.NodeAgentInformation - :param virtual_machine_info: Info about the current state of the virtual - machine. - :type virtual_machine_info: ~azure.batch.models.VirtualMachineInfo - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'state': {'key': 'state', 'type': 'ComputeNodeState'}, - 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, - 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, - 'ip_address': {'key': 'ipAddress', 'type': 'str'}, - 'affinity_id': {'key': 'affinityId', 'type': 'str'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, - 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, - 'running_task_slots_count': {'key': 'runningTaskSlotsCount', 'type': 'int'}, - 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, - 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, - 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, - 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, - 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, - 'virtual_machine_info': {'key': 'virtualMachineInfo', 'type': 'VirtualMachineInfo'}, - } - - def __init__(self, **kwargs): - super(ComputeNode, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.url = kwargs.get('url', None) - self.state = kwargs.get('state', None) - self.scheduling_state = kwargs.get('scheduling_state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.last_boot_time = kwargs.get('last_boot_time', None) - self.allocation_time = kwargs.get('allocation_time', None) - self.ip_address = kwargs.get('ip_address', None) - self.affinity_id = kwargs.get('affinity_id', None) - self.vm_size = kwargs.get('vm_size', None) - self.total_tasks_run = kwargs.get('total_tasks_run', None) - self.running_tasks_count = kwargs.get('running_tasks_count', None) - self.running_task_slots_count = kwargs.get('running_task_slots_count', None) - self.total_tasks_succeeded = kwargs.get('total_tasks_succeeded', None) - self.recent_tasks = kwargs.get('recent_tasks', None) - self.start_task = kwargs.get('start_task', None) - self.start_task_info = kwargs.get('start_task_info', None) - self.certificate_references = kwargs.get('certificate_references', None) - self.errors = kwargs.get('errors', None) - self.is_dedicated = kwargs.get('is_dedicated', None) - self.endpoint_configuration = kwargs.get('endpoint_configuration', None) - self.node_agent_info = kwargs.get('node_agent_info', None) - self.virtual_machine_info = kwargs.get('virtual_machine_info', None) - - -class ComputeNodeAddUserOptions(Model): - """Additional parameters for add_user operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeAddUserOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeDeleteUserOptions(Model): - """Additional parameters for delete_user operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeDisableSchedulingOptions(Model): - """Additional parameters for disable_scheduling operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeEnableSchedulingOptions(Model): - """Additional parameters for enable_scheduling operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeEndpointConfiguration(Model): - """The endpoint configuration for the Compute Node. - All required parameters must be populated in order to send to Azure. + :ivar url: The URL of the statistics. Required. + :vartype url: str + :ivar start_time: The start time of the time range covered by the statistics. Required. + :vartype start_time: ~datetime.datetime + :ivar last_update_time: The time at which the statistics were last updated. All statistics are + limited to the range between startTime and lastUpdateTime. Required. + :vartype last_update_time: ~datetime.datetime + :ivar user_cpu_time: The total user mode CPU time (summed across all cores and all Compute + Nodes) consumed by all Tasks in all Jobs created under the schedule. Required. + :vartype user_cpu_time: ~datetime.timedelta + :ivar kernel_cpu_time: The total kernel mode CPU time (summed across all cores and all Compute + Nodes) consumed by all Tasks in all Jobs created under the schedule. Required. + :vartype kernel_cpu_time: ~datetime.timedelta + :ivar wall_clock_time: The total wall clock time of all the Tasks in all the Jobs created under + the schedule. The wall clock time is the elapsed time from when the Task started running on a + Compute Node to when it finished (or to the last time the statistics were updated, if the Task + had not finished by then). If a Task was retried, this includes the wall clock time of all the + Task retries. Required. + :vartype wall_clock_time: ~datetime.timedelta + :ivar read_i_ops: The total number of disk read operations made by all Tasks in all Jobs + created under the schedule. Required. + :vartype read_i_ops: int + :ivar write_i_ops: The total number of disk write operations made by all Tasks in all Jobs + created under the schedule. Required. + :vartype write_i_ops: int + :ivar read_i_o_gi_b: The total gibibytes read from disk by all Tasks in all Jobs created under + the schedule. Required. + :vartype read_i_o_gi_b: float + :ivar write_i_o_gi_b: The total gibibytes written to disk by all Tasks in all Jobs created + under the schedule. Required. + :vartype write_i_o_gi_b: float + :ivar num_succeeded_tasks: The total number of Tasks successfully completed during the given + time range in Jobs created under the schedule. A Task completes successfully if it returns exit + code 0. Required. + :vartype num_succeeded_tasks: int + :ivar num_failed_tasks: The total number of Tasks that failed during the given time range in + Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without + returning exit code 0. Required. + :vartype num_failed_tasks: int + :ivar num_task_retries: The total number of retries during the given time range on all Tasks in + all Jobs created under the schedule. Required. + :vartype num_task_retries: int + :ivar wait_time: The total wait time of all Tasks in all Jobs created under the schedule. The + wait time for a Task is defined as the elapsed time between the creation of the Task and the + start of Task execution. (If the Task is retried due to failures, the wait time is the time to + the most recent Task execution.). This value is only reported in the Account lifetime + statistics; it is not included in the Job statistics. Required. + :vartype wait_time: ~datetime.timedelta + """ + + url: str = rest_field() + """The URL of the statistics. Required.""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the time range covered by the statistics. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time at which the statistics were last updated. All statistics are limited to the range + between startTime and lastUpdateTime. Required.""" + user_cpu_time: datetime.timedelta = rest_field(name="userCPUTime") + """The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all + Tasks in all Jobs created under the schedule. Required.""" + kernel_cpu_time: datetime.timedelta = rest_field(name="kernelCPUTime") + """The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all + Tasks in all Jobs created under the schedule. Required.""" + wall_clock_time: datetime.timedelta = rest_field(name="wallClockTime") + """The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall + clock time is the elapsed time from when the Task started running on a Compute Node to when it + finished (or to the last time the statistics were updated, if the Task had not finished by + then). If a Task was retried, this includes the wall clock time of all the Task retries. + Required.""" + read_i_ops: int = rest_field(name="readIOps") + """The total number of disk read operations made by all Tasks in all Jobs created under the + schedule. Required.""" + write_i_ops: int = rest_field(name="writeIOps") + """The total number of disk write operations made by all Tasks in all Jobs created under the + schedule. Required.""" + read_i_o_gi_b: float = rest_field(name="readIOGiB") + """The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + Required.""" + write_i_o_gi_b: float = rest_field(name="writeIOGiB") + """The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + Required.""" + num_succeeded_tasks: int = rest_field(name="numSucceededTasks") + """The total number of Tasks successfully completed during the given time range in Jobs created + under the schedule. A Task completes successfully if it returns exit code 0. Required.""" + num_failed_tasks: int = rest_field(name="numFailedTasks") + """The total number of Tasks that failed during the given time range in Jobs created under the + schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + Required.""" + num_task_retries: int = rest_field(name="numTaskRetries") + """The total number of retries during the given time range on all Tasks in all Jobs created under + the schedule. Required.""" + wait_time: datetime.timedelta = rest_field(name="waitTime") + """The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a + Task is defined as the elapsed time between the creation of the Task and the start of Task + execution. (If the Task is retried due to failures, the wait time is the time to the most + recent Task execution.). This value is only reported in the Account lifetime statistics; it is + not included in the Job statistics. Required.""" + + @overload + def __init__( + self, + *, + url: str, + start_time: datetime.datetime, + last_update_time: datetime.datetime, + user_cpu_time: datetime.timedelta, + kernel_cpu_time: datetime.timedelta, + wall_clock_time: datetime.timedelta, + read_i_ops: int, + write_i_ops: int, + read_i_o_gi_b: float, + write_i_o_gi_b: float, + num_succeeded_tasks: int, + num_failed_tasks: int, + num_task_retries: int, + wait_time: datetime.timedelta, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobScheduleUpdateContent(_model_base.Model): + """Parameters for updating an Azure Batch Job Schedule. + + :ivar schedule: The schedule according to which Jobs will be created. All times are fixed + respective to UTC and are not impacted by daylight saving time. If you do not specify this + element, the existing schedule is left unchanged. + :vartype schedule: ~azure.batch.models.BatchJobScheduleConfiguration + :ivar job_specification: The details of the Jobs to be created on this schedule. Updates affect + only Jobs that are started after the update has taken place. Any currently active Job continues + with the older specification. + :vartype job_specification: ~azure.batch.models.BatchJobSpecification + :ivar metadata: A list of name-value pairs associated with the Job Schedule as metadata. If you + do not specify this element, existing metadata is left unchanged. + :vartype metadata: list[~azure.batch.models.MetadataItem] + """ + + schedule: Optional["_models.BatchJobScheduleConfiguration"] = rest_field() + """The schedule according to which Jobs will be created. All times are fixed respective to UTC and + are not impacted by daylight saving time. If you do not specify this element, the existing + schedule is left unchanged.""" + job_specification: Optional["_models.BatchJobSpecification"] = rest_field(name="jobSpecification") + """The details of the Jobs to be created on this schedule. Updates affect only Jobs that are + started after the update has taken place. Any currently active Job continues with the older + specification.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify + this element, existing metadata is left unchanged.""" + + @overload + def __init__( + self, + *, + schedule: Optional["_models.BatchJobScheduleConfiguration"] = None, + job_specification: Optional["_models.BatchJobSpecification"] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobSchedulingError(_model_base.Model): + """An error encountered by the Batch service when scheduling a Job. - :param inbound_endpoints: Required. - :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] - """ - _validation = { - 'inbound_endpoints': {'required': True}, - } + :ivar category: The category of the Job scheduling error. Required. Known values are: + "usererror" and "servererror". + :vartype category: str or ~azure.batch.models.ErrorCategory + :ivar code: An identifier for the Job scheduling error. Codes are invariant and are intended to + be consumed programmatically. + :vartype code: str + :ivar message: A message describing the Job scheduling error, intended to be suitable for + display in a user interface. + :vartype message: str + :ivar details: A list of additional error details related to the scheduling error. + :vartype details: list[~azure.batch.models.NameValuePair] + """ + + category: Union[str, "_models.ErrorCategory"] = rest_field() + """The category of the Job scheduling error. Required. Known values are: \"usererror\" and + \"servererror\".""" + code: Optional[str] = rest_field() + """An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed + programmatically.""" + message: Optional[str] = rest_field() + """A message describing the Job scheduling error, intended to be suitable for display in a user + interface.""" + details: Optional[List["_models.NameValuePair"]] = rest_field() + """A list of additional error details related to the scheduling error.""" + + @overload + def __init__( + self, + *, + category: Union[str, "_models.ErrorCategory"], + code: Optional[str] = None, + message: Optional[str] = None, + details: Optional[List["_models.NameValuePair"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobSpecification(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Specifies details of the Jobs to be created on a schedule. - _attribute_map = { - 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, - } - def __init__(self, **kwargs): - super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) - self.inbound_endpoints = kwargs.get('inbound_endpoints', None) + :ivar priority: The priority of Jobs created under this schedule. Priority values can range + from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. + The default value is 0. This priority is used as the default for all Jobs under the Job + Schedule. You can update a Job's priority after it has been created using by using the update + Job API. + :vartype priority: int + :ivar allow_task_preemption: Whether Tasks in this job can be preempted by other high priority + jobs. If the value is set to True, other high priority jobs submitted to the system will take + precedence and will be able requeue tasks from this job. You can update a job's + allowTaskPreemption after it has been created using the update job API. + :vartype allow_task_preemption: bool + :ivar max_parallel_tasks: The maximum number of tasks that can be executed in parallel for the + job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, + the default value is -1, which means there's no limit to the number of tasks that can be run at + once. You can update a job's maxParallelTasks after it has been created using the update job + API. + :vartype max_parallel_tasks: int + :ivar display_name: The display name for Jobs created under this schedule. The name need not be + unique and can contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar uses_task_dependencies: Whether Tasks in the Job can define dependencies on each other. + The default is false. + :vartype uses_task_dependencies: bool + :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in a Job + created under this schedule are in the completed state. Note that if a Job contains no Tasks, + then all Tasks are considered complete. This option is therefore most commonly used with a Job + Manager task; if you want to use automatic Job termination without a Job Manager, you should + initially set onAllTasksComplete to noaction and update the Job properties to set + onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is + noaction. Known values are: "noaction" and "terminatejob". + :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete + :ivar on_task_failure: The action the Batch service should take when any Task fails in a Job + created under this schedule. A Task is considered to have failed if it have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after + exhausting its retry count, or if there was an error starting the Task, for example due to a + resource file download error. The default is noaction. Known values are: "noaction" and + "performexitoptionsjobaction". + :vartype on_task_failure: str or ~azure.batch.models.OnBatchTaskFailure + :ivar network_configuration: The network configuration for the Job. + :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration + :ivar constraints: The execution constraints for Jobs created under this schedule. + :vartype constraints: ~azure.batch.models.BatchJobConstraints + :ivar job_manager_task: The details of a Job Manager Task to be launched when a Job is started + under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly + add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch + service creates the Job Manager Task when the Job is created, and will try to schedule the Job + Manager Task before scheduling other Tasks in the Job. + :vartype job_manager_task: ~azure.batch.models.BatchJobManagerTask + :ivar job_preparation_task: The Job Preparation Task for Jobs created under this schedule. If a + Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node + before starting any Tasks of that Job on that Compute Node. + :vartype job_preparation_task: ~azure.batch.models.BatchJobPreparationTask + :ivar job_release_task: The Job Release Task for Jobs created under this schedule. The primary + purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. + Example activities include deleting local files, or shutting down services that were started as + part of Job preparation. A Job Release Task cannot be specified without also specifying a Job + Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes + that have run the Job Preparation Task. + :vartype job_release_task: ~azure.batch.models.BatchJobReleaseTask + :ivar common_environment_settings: A list of common environment variable settings. These + environment variables are set for all Tasks in Jobs created under this schedule (including the + Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an + environment setting specified here by specifying the same setting name with a different value. + :vartype common_environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar pool_info: The Pool on which the Batch service runs the Tasks of Jobs created under this + schedule. Required. + :vartype pool_info: ~azure.batch.models.BatchPoolInfo + :ivar metadata: A list of name-value pairs associated with each Job created under this schedule + as metadata. The Batch service does not assign any meaning to metadata; it is solely for the + use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + """ + + priority: Optional[int] = rest_field() + """The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, + with -1000 being the lowest priority and 1000 being the highest priority. The default value is + 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a + Job's priority after it has been created using by using the update Job API.""" + allow_task_preemption: Optional[bool] = rest_field(name="allowTaskPreemption") + """Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to + True, other high priority jobs submitted to the system will take precedence and will be able + requeue tasks from this job. You can update a job's allowTaskPreemption after it has been + created using the update job API.""" + max_parallel_tasks: Optional[int] = rest_field(name="maxParallelTasks") + """The maximum number of tasks that can be executed in parallel for the job. The value of + maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value + is -1, which means there's no limit to the number of tasks that can be run at once. You can + update a job's maxParallelTasks after it has been created using the update job API.""" + display_name: Optional[str] = rest_field(name="displayName") + """The display name for Jobs created under this schedule. The name need not be unique and can + contain any Unicode characters up to a maximum length of 1024.""" + uses_task_dependencies: Optional[bool] = rest_field(name="usesTaskDependencies") + """Whether Tasks in the Job can define dependencies on each other. The default is false.""" + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + name="onAllTasksComplete" + ) + """The action the Batch service should take when all Tasks in a Job created under this schedule + are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered + complete. This option is therefore most commonly used with a Job Manager task; if you want to + use automatic Job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to + terminatejob once you have finished adding Tasks. The default is noaction. Known values are: + \"noaction\" and \"terminatejob\".""" + on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = rest_field(name="onTaskFailure") + """The action the Batch service should take when any Task fails in a Job created under this + schedule. A Task is considered to have failed if it have failed if has a failureInfo. A + failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the Task, for example due to a resource file download + error. The default is noaction. Known values are: \"noaction\" and + \"performexitoptionsjobaction\".""" + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = rest_field(name="networkConfiguration") + """The network configuration for the Job.""" + constraints: Optional["_models.BatchJobConstraints"] = rest_field() + """The execution constraints for Jobs created under this schedule.""" + job_manager_task: Optional["_models.BatchJobManagerTask"] = rest_field(name="jobManagerTask") + """The details of a Job Manager Task to be launched when a Job is started under this schedule. If + the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job + using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the + Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before + scheduling other Tasks in the Job.""" + job_preparation_task: Optional["_models.BatchJobPreparationTask"] = rest_field(name="jobPreparationTask") + """The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation + Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks + of that Job on that Compute Node.""" + job_release_task: Optional["_models.BatchJobReleaseTask"] = rest_field(name="jobReleaseTask") + """The Job Release Task for Jobs created under this schedule. The primary purpose of the Job + Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities + include deleting local files, or shutting down services that were started as part of Job + preparation. A Job Release Task cannot be specified without also specifying a Job Preparation + Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have + run the Job Preparation Task.""" + common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field( + name="commonEnvironmentSettings" + ) + """A list of common environment variable settings. These environment variables are set for all + Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job + Release Tasks). Individual Tasks can override an environment setting specified here by + specifying the same setting name with a different value.""" + pool_info: "_models.BatchPoolInfo" = rest_field(name="poolInfo") + """The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. + Required.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with each Job created under this schedule as metadata. + The Batch service does not assign any meaning to metadata; it is solely for the use of user + code.""" + + @overload + def __init__( + self, + *, + pool_info: "_models.BatchPoolInfo", + priority: Optional[int] = None, + allow_task_preemption: Optional[bool] = None, + max_parallel_tasks: Optional[int] = None, + display_name: Optional[str] = None, + uses_task_dependencies: Optional[bool] = None, + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, + on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = None, + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, + constraints: Optional["_models.BatchJobConstraints"] = None, + job_manager_task: Optional["_models.BatchJobManagerTask"] = None, + job_preparation_task: Optional["_models.BatchJobPreparationTask"] = None, + job_release_task: Optional["_models.BatchJobReleaseTask"] = None, + common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Resource usage statistics for a Job. -class ComputeNodeError(Model): - """An error encountered by a Compute Node. + :ivar url: The URL of the statistics. Required. + :vartype url: str + :ivar start_time: The start time of the time range covered by the statistics. Required. + :vartype start_time: ~datetime.datetime + :ivar last_update_time: The time at which the statistics were last updated. All statistics are + limited to the range between startTime and lastUpdateTime. Required. + :vartype last_update_time: ~datetime.datetime + :ivar user_cpu_time: The total user mode CPU time (summed across all cores and all Compute + Nodes) consumed by all Tasks in the Job. Required. + :vartype user_cpu_time: ~datetime.timedelta + :ivar kernel_cpu_time: The total kernel mode CPU time (summed across all cores and all Compute + Nodes) consumed by all Tasks in the Job. Required. + :vartype kernel_cpu_time: ~datetime.timedelta + :ivar wall_clock_time: The total wall clock time of all Tasks in the Job. The wall clock time + is the elapsed time from when the Task started running on a Compute Node to when it finished + (or to the last time the statistics were updated, if the Task had not finished by then). If a + Task was retried, this includes the wall clock time of all the Task retries. Required. + :vartype wall_clock_time: ~datetime.timedelta + :ivar read_i_ops: The total number of disk read operations made by all Tasks in the Job. + Required. + :vartype read_i_ops: int + :ivar write_i_ops: The total number of disk write operations made by all Tasks in the Job. + Required. + :vartype write_i_ops: int + :ivar read_i_o_gi_b: The total amount of data in GiB read from disk by all Tasks in the Job. + Required. + :vartype read_i_o_gi_b: float + :ivar write_i_o_gi_b: The total amount of data in GiB written to disk by all Tasks in the Job. + Required. + :vartype write_i_o_gi_b: float + :ivar num_succeeded_tasks: The total number of Tasks successfully completed in the Job during + the given time range. A Task completes successfully if it returns exit code 0. Required. + :vartype num_succeeded_tasks: int + :ivar num_failed_tasks: The total number of Tasks in the Job that failed during the given time + range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + Required. + :vartype num_failed_tasks: int + :ivar num_task_retries: The total number of retries on all the Tasks in the Job during the + given time range. Required. + :vartype num_task_retries: int + :ivar wait_time: The total wait time of all Tasks in the Job. The wait time for a Task is + defined as the elapsed time between the creation of the Task and the start of Task execution. + (If the Task is retried due to failures, the wait time is the time to the most recent Task + execution.) This value is only reported in the Account lifetime statistics; it is not included + in the Job statistics. Required. + :vartype wait_time: ~datetime.timedelta + """ + + url: str = rest_field() + """The URL of the statistics. Required.""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the time range covered by the statistics. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time at which the statistics were last updated. All statistics are limited to the range + between startTime and lastUpdateTime. Required.""" + user_cpu_time: datetime.timedelta = rest_field(name="userCPUTime") + """The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all + Tasks in the Job. Required.""" + kernel_cpu_time: datetime.timedelta = rest_field(name="kernelCPUTime") + """The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all + Tasks in the Job. Required.""" + wall_clock_time: datetime.timedelta = rest_field(name="wallClockTime") + """The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time + from when the Task started running on a Compute Node to when it finished (or to the last time + the statistics were updated, if the Task had not finished by then). If a Task was retried, this + includes the wall clock time of all the Task retries. Required.""" + read_i_ops: int = rest_field(name="readIOps") + """The total number of disk read operations made by all Tasks in the Job. Required.""" + write_i_ops: int = rest_field(name="writeIOps") + """The total number of disk write operations made by all Tasks in the Job. Required.""" + read_i_o_gi_b: float = rest_field(name="readIOGiB") + """The total amount of data in GiB read from disk by all Tasks in the Job. Required.""" + write_i_o_gi_b: float = rest_field(name="writeIOGiB") + """The total amount of data in GiB written to disk by all Tasks in the Job. Required.""" + num_succeeded_tasks: int = rest_field(name="numSucceededTasks") + """The total number of Tasks successfully completed in the Job during the given time range. A Task + completes successfully if it returns exit code 0. Required.""" + num_failed_tasks: int = rest_field(name="numFailedTasks") + """The total number of Tasks in the Job that failed during the given time range. A Task fails if + it exhausts its maximum retry count without returning exit code 0. Required.""" + num_task_retries: int = rest_field(name="numTaskRetries") + """The total number of retries on all the Tasks in the Job during the given time range. Required.""" + wait_time: datetime.timedelta = rest_field(name="waitTime") + """The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed + time between the creation of the Task and the start of Task execution. (If the Task is retried + due to failures, the wait time is the time to the most recent Task execution.) This value is + only reported in the Account lifetime statistics; it is not included in the Job statistics. + Required.""" + + @overload + def __init__( + self, + *, + url: str, + start_time: datetime.datetime, + last_update_time: datetime.datetime, + user_cpu_time: datetime.timedelta, + kernel_cpu_time: datetime.timedelta, + wall_clock_time: datetime.timedelta, + read_i_ops: int, + write_i_ops: int, + read_i_o_gi_b: float, + write_i_o_gi_b: float, + num_succeeded_tasks: int, + num_failed_tasks: int, + num_task_retries: int, + wait_time: datetime.timedelta, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobTerminateContent(_model_base.Model): + """Parameters for terminating an Azure Batch Job. + + :ivar termination_reason: The text you want to appear as the Job's TerminationReason. The + default is 'UserTerminate'. + :vartype termination_reason: str + """ + + termination_reason: Optional[str] = rest_field(name="terminateReason") + """The text you want to appear as the Job's TerminationReason. The default is 'UserTerminate'.""" + + @overload + def __init__( + self, + *, + termination_reason: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchJobUpdateContent(_model_base.Model): + """Parameters for updating an Azure Batch Job. + + :ivar priority: The priority of the Job. Priority values can range from -1000 to 1000, with + -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority + of the Job is left unchanged. + :vartype priority: int + :ivar allow_task_preemption: Whether Tasks in this job can be preempted by other high priority + jobs. If the value is set to True, other high priority jobs submitted to the system will take + precedence and will be able requeue tasks from this job. You can update a job's + allowTaskPreemption after it has been created using the update job API. + :vartype allow_task_preemption: bool + :ivar max_parallel_tasks: The maximum number of tasks that can be executed in parallel for the + job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, + the default value is -1, which means there's no limit to the number of tasks that can be run at + once. You can update a job's maxParallelTasks after it has been created using the update job + API. + :vartype max_parallel_tasks: int + :ivar constraints: The execution constraints for the Job. If omitted, the existing execution + constraints are left unchanged. + :vartype constraints: ~azure.batch.models.BatchJobConstraints + :ivar pool_info: The Pool on which the Batch service runs the Job's Tasks. You may change the + Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the + poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the + poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then + only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be + updated as normal). If omitted, the Job continues to run on its current Pool. + :vartype pool_info: ~azure.batch.models.BatchPoolInfo + :ivar on_all_tasks_complete: The action the Batch service should take when all Tasks in the Job + are in the completed state. If omitted, the completion behavior is left unchanged. You may not + change the value from terminatejob to noaction - that is, once you have engaged automatic Job + termination, you cannot turn it off again. If you try to do this, the request fails with an + 'invalid property value' error response; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request). Known values are: "noaction" and "terminatejob". + :vartype on_all_tasks_complete: str or ~azure.batch.models.OnAllBatchTasksComplete + :ivar metadata: A list of name-value pairs associated with the Job as metadata. If omitted, the + existing Job metadata is left unchanged. + :vartype metadata: list[~azure.batch.models.MetadataItem] + """ + + priority: Optional[int] = rest_field() + """The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the + lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is + left unchanged.""" + allow_task_preemption: Optional[bool] = rest_field(name="allowTaskPreemption") + """Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to + True, other high priority jobs submitted to the system will take precedence and will be able + requeue tasks from this job. You can update a job's allowTaskPreemption after it has been + created using the update job API.""" + max_parallel_tasks: Optional[int] = rest_field(name="maxParallelTasks") + """The maximum number of tasks that can be executed in parallel for the job. The value of + maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value + is -1, which means there's no limit to the number of tasks that can be run at once. You can + update a job's maxParallelTasks after it has been created using the update job API.""" + constraints: Optional["_models.BatchJobConstraints"] = rest_field() + """The execution constraints for the Job. If omitted, the existing execution constraints are left + unchanged.""" + pool_info: Optional["_models.BatchPoolInfo"] = rest_field(name="poolInfo") + """The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job + only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element + and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the + keepAlive property of the autoPoolSpecification can be updated, and then only if the + autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as + normal). If omitted, the Job continues to run on its current Pool.""" + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = rest_field( + name="onAllTasksComplete" + ) + """The action the Batch service should take when all Tasks in the Job are in the completed state. + If omitted, the completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot + turn it off again. If you try to do this, the request fails with an 'invalid property value' + error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad + Request). Known values are: \"noaction\" and \"terminatejob\".""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job + metadata is left unchanged.""" + + @overload + def __init__( + self, + *, + priority: Optional[int] = None, + allow_task_preemption: Optional[bool] = None, + max_parallel_tasks: Optional[int] = None, + constraints: Optional["_models.BatchJobConstraints"] = None, + pool_info: Optional["_models.BatchPoolInfo"] = None, + on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNode(_model_base.Model): # pylint: disable=too-many-instance-attributes + """A Compute Node in the Batch service. - :param code: - :type code: str - :param message: - :type message: str - :param error_details: - :type error_details: list[~azure.batch.models.NameValuePair] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.error_details = kwargs.get('error_details', None) - - -class ComputeNodeExtensionGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeExtensionGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeExtensionListOptions(Model): - """Additional parameters for list operation. - - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeExtensionListOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeGetRemoteDesktopOptions(Model): - """Additional parameters for get_remote_desktop operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeGetRemoteLoginSettingsOptions(Model): - """Additional parameters for get_remote_login_settings operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeGetRemoteLoginSettingsResult(Model): - """The remote login settings for a Compute Node. + :ivar id: The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a + unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, + and the ID is reclaimed and could be reused for new Compute Nodes. + :vartype id: str + :ivar url: The URL of the Compute Node. + :vartype url: str + :ivar state: The current state of the Compute Node. The Spot/Low-priority Compute Node has been + preempted. Tasks which were running on the Compute Node when it was preempted will be + rescheduled when another Compute Node becomes available. Known values are: "idle", "rebooting", + "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", + "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", and "upgradingos". + :vartype state: str or ~azure.batch.models.BatchNodeState + :ivar scheduling_state: Whether the Compute Node is available for Task scheduling. Known values + are: "enabled" and "disabled". + :vartype scheduling_state: str or ~azure.batch.models.SchedulingState + :ivar state_transition_time: The time at which the Compute Node entered its current state. + :vartype state_transition_time: ~datetime.datetime + :ivar last_boot_time: The last time at which the Compute Node was started. This property may + not be present if the Compute Node state is unusable. + :vartype last_boot_time: ~datetime.datetime + :ivar allocation_time: The time at which this Compute Node was allocated to the Pool. This is + the time when the Compute Node was initially allocated and doesn't change once set. It is not + updated when the Compute Node is service healed or preempted. + :vartype allocation_time: ~datetime.datetime + :ivar ip_address: The IP address that other Nodes can use to communicate with this Compute + Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a + Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is + reclaimed and could be reused for new Compute Nodes. + :vartype ip_address: str + :ivar affinity_id: An identifier which can be passed when adding a Task to request that the + Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target + Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be + scheduled elsewhere. + :vartype affinity_id: str + :ivar vm_size: The size of the virtual machine hosting the Compute Node. For information about + available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an + Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :vartype vm_size: str + :ivar total_tasks_run: The total number of Job Tasks completed on the Compute Node. This + includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start + Tasks. + :vartype total_tasks_run: int + :ivar running_tasks_count: The total number of currently running Job Tasks on the Compute Node. + This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start + Tasks. + :vartype running_tasks_count: int + :ivar running_task_slots_count: The total number of scheduling slots used by currently running + Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job + Preparation, Job Release or Start Tasks. + :vartype running_task_slots_count: int + :ivar total_tasks_succeeded: The total number of Job Tasks which completed successfully (with + exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job + Preparation, Job Release or Start Tasks. + :vartype total_tasks_succeeded: int + :ivar recent_tasks: A list of Tasks whose state has recently changed. This property is present + only if at least one Task has run on this Compute Node since it was assigned to the Pool. + :vartype recent_tasks: list[~azure.batch.models.BatchTaskInfo] + :ivar start_task: The Task specified to run on the Compute Node as it joins the Pool. + :vartype start_task: ~azure.batch.models.BatchStartTask + :ivar start_task_info: Runtime information about the execution of the StartTask on the Compute + Node. + :vartype start_task_info: ~azure.batch.models.BatchStartTaskInfo + :ivar errors: The list of errors that are currently being encountered by the Compute Node. + :vartype errors: list[~azure.batch.models.BatchNodeError] + :ivar is_dedicated: Whether this Compute Node is a dedicated Compute Node. If false, the + Compute Node is a Spot/Low-priority Compute Node. + :vartype is_dedicated: bool + :ivar endpoint_configuration: The endpoint configuration for the Compute Node. + :vartype endpoint_configuration: ~azure.batch.models.BatchNodeEndpointConfiguration + :ivar node_agent_info: Information about the Compute Node agent version and the time the + Compute Node upgraded to a new version. + :vartype node_agent_info: ~azure.batch.models.BatchNodeAgentInfo + :ivar virtual_machine_info: Info about the current state of the virtual machine. + :vartype virtual_machine_info: ~azure.batch.models.VirtualMachineInfo + """ + + id: Optional[str] = rest_field() + """The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. + Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID + is reclaimed and could be reused for new Compute Nodes.""" + url: Optional[str] = rest_field() + """The URL of the Compute Node.""" + state: Optional[Union[str, "_models.BatchNodeState"]] = rest_field() + """The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. + Tasks which were running on the Compute Node when it was preempted will be rescheduled when + another Compute Node becomes available. Known values are: \"idle\", \"rebooting\", + \"reimaging\", \"running\", \"unusable\", \"creating\", \"starting\", \"waitingforstarttask\", + \"starttaskfailed\", \"unknown\", \"leavingpool\", \"offline\", \"preempted\", and + \"upgradingos\".""" + scheduling_state: Optional[Union[str, "_models.SchedulingState"]] = rest_field(name="schedulingState") + """Whether the Compute Node is available for Task scheduling. Known values are: \"enabled\" and + \"disabled\".""" + state_transition_time: Optional[datetime.datetime] = rest_field(name="stateTransitionTime", format="rfc3339") + """The time at which the Compute Node entered its current state.""" + last_boot_time: Optional[datetime.datetime] = rest_field(name="lastBootTime", format="rfc3339") + """The last time at which the Compute Node was started. This property may not be present if the + Compute Node state is unusable.""" + allocation_time: Optional[datetime.datetime] = rest_field(name="allocationTime", format="rfc3339") + """The time at which this Compute Node was allocated to the Pool. This is the time when the + Compute Node was initially allocated and doesn't change once set. It is not updated when the + Compute Node is service healed or preempted.""" + ip_address: Optional[str] = rest_field(name="ipAddress") + """The IP address that other Nodes can use to communicate with this Compute Node. Every Compute + Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is + removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and + could be reused for new Compute Nodes.""" + affinity_id: Optional[str] = rest_field(name="affinityId") + """An identifier which can be passed when adding a Task to request that the Task be scheduled on + this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy + or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.""" + vm_size: Optional[str] = rest_field(name="vmSize") + """The size of the virtual machine hosting the Compute Node. For information about available sizes + of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).""" + total_tasks_run: Optional[int] = rest_field(name="totalTasksRun") + """The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks + and normal Tasks, but not Job Preparation, Job Release or Start Tasks.""" + running_tasks_count: Optional[int] = rest_field(name="runningTasksCount") + """The total number of currently running Job Tasks on the Compute Node. This includes Job Manager + Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks.""" + running_task_slots_count: Optional[int] = rest_field(name="runningTaskSlotsCount") + """The total number of scheduling slots used by currently running Job Tasks on the Compute Node. + This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start + Tasks.""" + total_tasks_succeeded: Optional[int] = rest_field(name="totalTasksSucceeded") + """The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute + Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or + Start Tasks.""" + recent_tasks: Optional[List["_models.BatchTaskInfo"]] = rest_field(name="recentTasks") + """A list of Tasks whose state has recently changed. This property is present only if at least one + Task has run on this Compute Node since it was assigned to the Pool.""" + start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") + """The Task specified to run on the Compute Node as it joins the Pool.""" + start_task_info: Optional["_models.BatchStartTaskInfo"] = rest_field(name="startTaskInfo") + """Runtime information about the execution of the StartTask on the Compute Node.""" + errors: Optional[List["_models.BatchNodeError"]] = rest_field() + """The list of errors that are currently being encountered by the Compute Node.""" + is_dedicated: Optional[bool] = rest_field(name="isDedicated") + """Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a + Spot/Low-priority Compute Node.""" + endpoint_configuration: Optional["_models.BatchNodeEndpointConfiguration"] = rest_field( + name="endpointConfiguration" + ) + """The endpoint configuration for the Compute Node.""" + node_agent_info: Optional["_models.BatchNodeAgentInfo"] = rest_field(name="nodeAgentInfo") + """Information about the Compute Node agent version and the time the Compute Node upgraded to a + new version.""" + virtual_machine_info: Optional["_models.VirtualMachineInfo"] = rest_field(name="virtualMachineInfo") + """Info about the current state of the virtual machine.""" + + @overload + def __init__( + self, + *, + id: Optional[str] = None, # pylint: disable=redefined-builtin + url: Optional[str] = None, + state: Optional[Union[str, "_models.BatchNodeState"]] = None, + scheduling_state: Optional[Union[str, "_models.SchedulingState"]] = None, + state_transition_time: Optional[datetime.datetime] = None, + last_boot_time: Optional[datetime.datetime] = None, + allocation_time: Optional[datetime.datetime] = None, + ip_address: Optional[str] = None, + affinity_id: Optional[str] = None, + vm_size: Optional[str] = None, + total_tasks_run: Optional[int] = None, + running_tasks_count: Optional[int] = None, + running_task_slots_count: Optional[int] = None, + total_tasks_succeeded: Optional[int] = None, + recent_tasks: Optional[List["_models.BatchTaskInfo"]] = None, + start_task: Optional["_models.BatchStartTask"] = None, + start_task_info: Optional["_models.BatchStartTaskInfo"] = None, + errors: Optional[List["_models.BatchNodeError"]] = None, + is_dedicated: Optional[bool] = None, + endpoint_configuration: Optional["_models.BatchNodeEndpointConfiguration"] = None, + node_agent_info: Optional["_models.BatchNodeAgentInfo"] = None, + virtual_machine_info: Optional["_models.VirtualMachineInfo"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeAgentInfo(_model_base.Model): + """The Batch Compute Node agent is a program that runs on each Compute Node in the + Pool and provides Batch capability on the Compute Node. + + + :ivar version: The version of the Batch Compute Node agent running on the Compute Node. This + version number can be checked against the Compute Node agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. Required. + :vartype version: str + :ivar last_update_time: The time when the Compute Node agent was updated on the Compute Node. + This is the most recent time that the Compute Node agent was updated to a new version. + Required. + :vartype last_update_time: ~datetime.datetime + """ + + version: str = rest_field() + """The version of the Batch Compute Node agent running on the Compute Node. This version number + can be checked against the Compute Node agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time when the Compute Node agent was updated on the Compute Node. This is the most recent + time that the Compute Node agent was updated to a new version. Required.""" + + @overload + def __init__( + self, + *, + version: str, + last_update_time: datetime.datetime, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeCounts(_model_base.Model): # pylint: disable=too-many-instance-attributes + """The number of Compute Nodes in each Compute Node state. - All required parameters must be populated in order to send to Azure. - :param remote_login_ip_address: Required. - :type remote_login_ip_address: str - :param remote_login_port: Required. The port used for remote login to the - Compute Node. - :type remote_login_port: int - """ + :ivar creating: The number of Compute Nodes in the creating state. Required. + :vartype creating: int + :ivar idle: The number of Compute Nodes in the idle state. Required. + :vartype idle: int + :ivar offline: The number of Compute Nodes in the offline state. Required. + :vartype offline: int + :ivar preempted: The number of Compute Nodes in the preempted state. Required. + :vartype preempted: int + :ivar rebooting: The count of Compute Nodes in the rebooting state. Required. + :vartype rebooting: int + :ivar reimaging: The number of Compute Nodes in the reimaging state. Required. + :vartype reimaging: int + :ivar running: The number of Compute Nodes in the running state. Required. + :vartype running: int + :ivar starting: The number of Compute Nodes in the starting state. Required. + :vartype starting: int + :ivar start_task_failed: The number of Compute Nodes in the startTaskFailed state. Required. + :vartype start_task_failed: int + :ivar leaving_pool: The number of Compute Nodes in the leavingPool state. Required. + :vartype leaving_pool: int + :ivar unknown: The number of Compute Nodes in the unknown state. Required. + :vartype unknown: int + :ivar unusable: The number of Compute Nodes in the unusable state. Required. + :vartype unusable: int + :ivar waiting_for_start_task: The number of Compute Nodes in the waitingForStartTask state. + Required. + :vartype waiting_for_start_task: int + :ivar total: The total number of Compute Nodes. Required. + :vartype total: int + :ivar upgrading_os: The number of Compute Nodes in the upgradingOS state. Required. + :vartype upgrading_os: int + """ + + creating: int = rest_field() + """The number of Compute Nodes in the creating state. Required.""" + idle: int = rest_field() + """The number of Compute Nodes in the idle state. Required.""" + offline: int = rest_field() + """The number of Compute Nodes in the offline state. Required.""" + preempted: int = rest_field() + """The number of Compute Nodes in the preempted state. Required.""" + rebooting: int = rest_field() + """The count of Compute Nodes in the rebooting state. Required.""" + reimaging: int = rest_field() + """The number of Compute Nodes in the reimaging state. Required.""" + running: int = rest_field() + """The number of Compute Nodes in the running state. Required.""" + starting: int = rest_field() + """The number of Compute Nodes in the starting state. Required.""" + start_task_failed: int = rest_field(name="startTaskFailed") + """The number of Compute Nodes in the startTaskFailed state. Required.""" + leaving_pool: int = rest_field(name="leavingPool") + """The number of Compute Nodes in the leavingPool state. Required.""" + unknown: int = rest_field() + """The number of Compute Nodes in the unknown state. Required.""" + unusable: int = rest_field() + """The number of Compute Nodes in the unusable state. Required.""" + waiting_for_start_task: int = rest_field(name="waitingForStartTask") + """The number of Compute Nodes in the waitingForStartTask state. Required.""" + total: int = rest_field() + """The total number of Compute Nodes. Required.""" + upgrading_os: int = rest_field(name="upgradingOS") + """The number of Compute Nodes in the upgradingOS state. Required.""" + + @overload + def __init__( + self, + *, + creating: int, + idle: int, + offline: int, + preempted: int, + rebooting: int, + reimaging: int, + running: int, + starting: int, + start_task_failed: int, + leaving_pool: int, + unknown: int, + unusable: int, + waiting_for_start_task: int, + total: int, + upgrading_os: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeDisableSchedulingContent(_model_base.Model): + """Parameters for disabling scheduling on an Azure Batch Compute Node. + + :ivar node_disable_scheduling_option: What to do with currently running Tasks when disabling + Task scheduling on the Compute Node. The default value is requeue. Known values are: "requeue", + "terminate", and "taskcompletion". + :vartype node_disable_scheduling_option: str or + ~azure.batch.models.BatchNodeDisableSchedulingOption + """ + + node_disable_scheduling_option: Optional[Union[str, "_models.BatchNodeDisableSchedulingOption"]] = rest_field( + name="nodeDisableSchedulingOption" + ) + """What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The + default value is requeue. Known values are: \"requeue\", \"terminate\", and \"taskcompletion\".""" + + @overload + def __init__( + self, + *, + node_disable_scheduling_option: Optional[Union[str, "_models.BatchNodeDisableSchedulingOption"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeEndpointConfiguration(_model_base.Model): + """The endpoint configuration for the Compute Node. - _validation = { - 'remote_login_ip_address': {'required': True}, - 'remote_login_port': {'required': True}, - } - _attribute_map = { - 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, - 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, - } + :ivar inbound_endpoints: The list of inbound endpoints that are accessible on the Compute Node. + Required. + :vartype inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ - def __init__(self, **kwargs): - super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) - self.remote_login_ip_address = kwargs.get('remote_login_ip_address', None) - self.remote_login_port = kwargs.get('remote_login_port', None) + inbound_endpoints: List["_models.InboundEndpoint"] = rest_field(name="inboundEndpoints") + """The list of inbound endpoints that are accessible on the Compute Node. Required.""" + @overload + def __init__( + self, + *, + inbound_endpoints: List["_models.InboundEndpoint"], + ): ... -class ComputeNodeIdentityReference(Model): - """The reference to a user assigned identity associated with the Batch pool - which a compute node will use. + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - :param resource_id: The ARM resource id of the user assigned identity. - :type resource_id: str - """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - _attribute_map = { - 'resource_id': {'key': 'resourceId', 'type': 'str'}, - } - def __init__(self, **kwargs): - super(ComputeNodeIdentityReference, self).__init__(**kwargs) - self.resource_id = kwargs.get('resource_id', None) +class BatchNodeError(_model_base.Model): + """An error encountered by a Compute Node. + :ivar code: An identifier for the Compute Node error. Codes are invariant and are intended to + be consumed programmatically. + :vartype code: str + :ivar message: A message describing the Compute Node error, intended to be suitable for display + in a user interface. + :vartype message: str + :ivar error_details: The list of additional error details related to the Compute Node error. + :vartype error_details: list[~azure.batch.models.NameValuePair] + """ + + code: Optional[str] = rest_field() + """An identifier for the Compute Node error. Codes are invariant and are intended to be consumed + programmatically.""" + message: Optional[str] = rest_field() + """A message describing the Compute Node error, intended to be suitable for display in a user + interface.""" + error_details: Optional[List["_models.NameValuePair"]] = rest_field(name="errorDetails") + """The list of additional error details related to the Compute Node error.""" + + @overload + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + error_details: Optional[List["_models.NameValuePair"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeFile(_model_base.Model): + """Information about a file or directory on a Compute Node. -class ComputeNodeInformation(Model): + :ivar name: The file path. + :vartype name: str + :ivar url: The URL of the file. + :vartype url: str + :ivar is_directory: Whether the object represents a directory. + :vartype is_directory: bool + :ivar properties: The file properties. + :vartype properties: ~azure.batch.models.FileProperties + """ + + name: Optional[str] = rest_field() + """The file path.""" + url: Optional[str] = rest_field() + """The URL of the file.""" + is_directory: Optional[bool] = rest_field(name="isDirectory") + """Whether the object represents a directory.""" + properties: Optional["_models.FileProperties"] = rest_field() + """The file properties.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + url: Optional[str] = None, + is_directory: Optional[bool] = None, + properties: Optional["_models.FileProperties"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeIdentityReference(_model_base.Model): + """The reference to a user assigned identity associated with the Batch pool which + a compute node will use. + + :ivar resource_id: The ARM resource id of the user assigned identity. + :vartype resource_id: str + """ + + resource_id: Optional[str] = rest_field(name="resourceId") + """The ARM resource id of the user assigned identity.""" + + @overload + def __init__( + self, + *, + resource_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeInfo(_model_base.Model): """Information about the Compute Node on which a Task ran. - :param affinity_id: - :type affinity_id: str - :param node_url: - :type node_url: str - :param pool_id: - :type pool_id: str - :param node_id: - :type node_id: str - :param task_root_directory: - :type task_root_directory: str - :param task_root_directory_url: - :type task_root_directory_url: str - """ - - _attribute_map = { - 'affinity_id': {'key': 'affinityId', 'type': 'str'}, - 'node_url': {'key': 'nodeUrl', 'type': 'str'}, - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'node_id': {'key': 'nodeId', 'type': 'str'}, - 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, - 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeInformation, self).__init__(**kwargs) - self.affinity_id = kwargs.get('affinity_id', None) - self.node_url = kwargs.get('node_url', None) - self.pool_id = kwargs.get('pool_id', None) - self.node_id = kwargs.get('node_id', None) - self.task_root_directory = kwargs.get('task_root_directory', None) - self.task_root_directory_url = kwargs.get('task_root_directory_url', None) - - -class ComputeNodeListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeListOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeRebootOptions(Model): - """Additional parameters for reboot operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeRebootOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeReimageOptions(Model): - """Additional parameters for reimage operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeReimageOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeUpdateUserOptions(Model): - """Additional parameters for update_user operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeUploadBatchServiceLogsOptions(Model): - """Additional parameters for upload_batch_service_logs operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class ComputeNodeUser(Model): - """A user Account for RDP or SSH access on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param is_admin: Whether the Account should be an administrator on the - Compute Node. The default value is false. - :type is_admin: bool - :param expiry_time: If omitted, the default is 1 day from the current - time. For Linux Compute Nodes, the expiryTime has a precision up to a day. - :type expiry_time: datetime - :param password: The password is required for Windows Compute Nodes (those - created with 'cloudServiceConfiguration', or created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux - Compute Nodes, the password can optionally be specified along with the - sshPublicKey property. - :type password: str - :param ssh_public_key: The public key should be compatible with OpenSSH - encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute - Node, then the Batch service rejects the request; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - :type ssh_public_key: str - """ - - _validation = { - 'name': {'required': True}, - } + :ivar affinity_id: An identifier for the Node on which the Task ran, which can be passed when + adding a Task to request that the Task be scheduled on this Compute Node. + :vartype affinity_id: str + :ivar node_url: The URL of the Compute Node on which the Task ran. + :vartype node_url: str + :ivar pool_id: The ID of the Pool on which the Task ran. + :vartype pool_id: str + :ivar node_id: The ID of the Compute Node on which the Task ran. + :vartype node_id: str + :ivar task_root_directory: The root directory of the Task on the Compute Node. + :vartype task_root_directory: str + :ivar task_root_directory_url: The URL to the root directory of the Task on the Compute Node. + :vartype task_root_directory_url: str + """ + + affinity_id: Optional[str] = rest_field(name="affinityId") + """An identifier for the Node on which the Task ran, which can be passed when adding a Task to + request that the Task be scheduled on this Compute Node.""" + node_url: Optional[str] = rest_field(name="nodeUrl") + """The URL of the Compute Node on which the Task ran.""" + pool_id: Optional[str] = rest_field(name="poolId") + """The ID of the Pool on which the Task ran.""" + node_id: Optional[str] = rest_field(name="nodeId") + """The ID of the Compute Node on which the Task ran.""" + task_root_directory: Optional[str] = rest_field(name="taskRootDirectory") + """The root directory of the Task on the Compute Node.""" + task_root_directory_url: Optional[str] = rest_field(name="taskRootDirectoryUrl") + """The URL to the root directory of the Task on the Compute Node.""" + + @overload + def __init__( + self, + *, + affinity_id: Optional[str] = None, + node_url: Optional[str] = None, + pool_id: Optional[str] = None, + node_id: Optional[str] = None, + task_root_directory: Optional[str] = None, + task_root_directory_url: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodePlacementConfiguration(_model_base.Model): + """For regional placement, nodes in the pool will be allocated in the same region. + For zonal placement, nodes in the pool will be spread across different zones + with best effort balancing. + + :ivar policy: Node placement Policy type on Batch Pools. Allocation policy used by Batch + Service to provision the nodes. If not specified, Batch will use the regional policy. Known + values are: "regional" and "zonal". + :vartype policy: str or ~azure.batch.models.BatchNodePlacementPolicyType + """ + + policy: Optional[Union[str, "_models.BatchNodePlacementPolicyType"]] = rest_field() + """Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision + the nodes. If not specified, Batch will use the regional policy. Known values are: \"regional\" + and \"zonal\".""" + + @overload + def __init__( + self, + *, + policy: Optional[Union[str, "_models.BatchNodePlacementPolicyType"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeRebootContent(_model_base.Model): + """Parameters for rebooting an Azure Batch Compute Node. + + :ivar node_reboot_option: When to reboot the Compute Node and what to do with currently running + Tasks. The default value is requeue. Known values are: "requeue", "terminate", + "taskcompletion", and "retaineddata". + :vartype node_reboot_option: str or ~azure.batch.models.BatchNodeRebootOption + """ + + node_reboot_option: Optional[Union[str, "_models.BatchNodeRebootOption"]] = rest_field(name="nodeRebootOption") + """When to reboot the Compute Node and what to do with currently running Tasks. The default value + is requeue. Known values are: \"requeue\", \"terminate\", \"taskcompletion\", and + \"retaineddata\".""" + + @overload + def __init__( + self, + *, + node_reboot_option: Optional[Union[str, "_models.BatchNodeRebootOption"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeRemoteLoginSettings(_model_base.Model): + """The remote login settings for a Compute Node. - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, - 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, - 'password': {'key': 'password', 'type': 'str'}, - 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, - } - def __init__(self, **kwargs): - super(ComputeNodeUser, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.is_admin = kwargs.get('is_admin', None) - self.expiry_time = kwargs.get('expiry_time', None) - self.password = kwargs.get('password', None) - self.ssh_public_key = kwargs.get('ssh_public_key', None) + :ivar remote_login_ip_address: The IP address used for remote login to the Compute Node. + Required. + :vartype remote_login_ip_address: str + :ivar remote_login_port: The port used for remote login to the Compute Node. Required. + :vartype remote_login_port: int + """ + + remote_login_ip_address: str = rest_field(name="remoteLoginIPAddress") + """The IP address used for remote login to the Compute Node. Required.""" + remote_login_port: int = rest_field(name="remoteLoginPort") + """The port used for remote login to the Compute Node. Required.""" + + @overload + def __init__( + self, + *, + remote_login_ip_address: str, + remote_login_port: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeRemoveContent(_model_base.Model): + """Parameters for removing nodes from an Azure Batch Pool. + + All required parameters must be populated in order to send to server. + + :ivar node_list: A list containing the IDs of the Compute Nodes to be removed from the + specified Pool. A maximum of 100 nodes may be removed per request. Required. + :vartype node_list: list[str] + :ivar resize_timeout: The timeout for removal of Compute Nodes to the Pool. The default value + is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the + Batch service returns an error; if you are calling the REST API directly, the HTTP status code + is 400 (Bad Request). + :vartype resize_timeout: ~datetime.timedelta + :ivar node_deallocation_option: Determines what to do with a Compute Node and its running + task(s) after it has been selected for deallocation. The default value is requeue. Known values + are: "requeue", "terminate", "taskcompletion", and "retaineddata". + :vartype node_deallocation_option: str or ~azure.batch.models.BatchNodeDeallocationOption + """ + + node_list: List[str] = rest_field(name="nodeList") + """A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum + of 100 nodes may be removed per request. Required.""" + resize_timeout: Optional[datetime.timedelta] = rest_field(name="resizeTimeout") + """The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service + returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad + Request).""" + node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = rest_field( + name="nodeDeallocationOption" + ) + """Determines what to do with a Compute Node and its running task(s) after it has been selected + for deallocation. The default value is requeue. Known values are: \"requeue\", \"terminate\", + \"taskcompletion\", and \"retaineddata\".""" + + @overload + def __init__( + self, + *, + node_list: List[str], + resize_timeout: Optional[datetime.timedelta] = None, + node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeUserCreateContent(_model_base.Model): + """Parameters for creating a user account for RDP or SSH access on an Azure Batch Compute Node. + + All required parameters must be populated in order to send to server. + + :ivar name: The user name of the Account. Required. + :vartype name: str + :ivar is_admin: Whether the Account should be an administrator on the Compute Node. The default + value is false. + :vartype is_admin: bool + :ivar expiry_time: The time at which the Account should expire. If omitted, the default is 1 + day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + :vartype expiry_time: ~datetime.datetime + :ivar password: The password of the Account. The password is required for Windows Compute Nodes + (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux + Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + :vartype password: str + :ivar ssh_public_key: The SSH public key that can be used for remote login to the Compute Node. + The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This + property can be specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :vartype ssh_public_key: str + """ + + name: str = rest_field() + """The user name of the Account. Required.""" + is_admin: Optional[bool] = rest_field(name="isAdmin") + """Whether the Account should be an administrator on the Compute Node. The default value is false.""" + expiry_time: Optional[datetime.datetime] = rest_field(name="expiryTime", format="rfc3339") + """The time at which the Account should expire. If omitted, the default is 1 day from the current + time. For Linux Compute Nodes, the expiryTime has a precision up to a day.""" + password: Optional[str] = rest_field() + """The password of the Account. The password is required for Windows Compute Nodes (those created + with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, + the password can optionally be specified along with the sshPublicKey property.""" + ssh_public_key: Optional[str] = rest_field(name="sshPublicKey") + """The SSH public key that can be used for remote login to the Compute Node. The public key should + be compatible with OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then + the Batch service rejects the request; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request).""" + + @overload + def __init__( + self, + *, + name: str, + is_admin: Optional[bool] = None, + expiry_time: Optional[datetime.datetime] = None, + password: Optional[str] = None, + ssh_public_key: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeUserUpdateContent(_model_base.Model): + """Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. + + :ivar password: The password of the Account. The password is required for Windows Compute Nodes + (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux + Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + If omitted, any existing password is removed. + :vartype password: str + :ivar expiry_time: The time at which the Account should expire. If omitted, the default is 1 + day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + :vartype expiry_time: ~datetime.datetime + :ivar ssh_public_key: The SSH public key that can be used for remote login to the Compute Node. + The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This + property can be specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is + removed. + :vartype ssh_public_key: str + """ + + password: Optional[str] = rest_field() + """The password of the Account. The password is required for Windows Compute Nodes (those created + with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, + the password can optionally be specified along with the sshPublicKey property. If omitted, any + existing password is removed.""" + expiry_time: Optional[datetime.datetime] = rest_field(name="expiryTime", format="rfc3339") + """The time at which the Account should expire. If omitted, the default is 1 day from the current + time. For Linux Compute Nodes, the expiryTime has a precision up to a day.""" + ssh_public_key: Optional[str] = rest_field(name="sshPublicKey") + """The SSH public key that can be used for remote login to the Compute Node. The public key should + be compatible with OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then + the Batch service rejects the request; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request). If omitted, any existing SSH public key is removed.""" + + @overload + def __init__( + self, + *, + password: Optional[str] = None, + expiry_time: Optional[datetime.datetime] = None, + ssh_public_key: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchNodeVMExtension(_model_base.Model): + """The configuration for virtual machine extension instance view. + :ivar provisioning_state: The provisioning state of the virtual machine extension. + :vartype provisioning_state: str + :ivar vm_extension: The virtual machine extension. + :vartype vm_extension: ~azure.batch.models.VMExtension + :ivar instance_view: The vm extension instance view. + :vartype instance_view: ~azure.batch.models.VMExtensionInstanceView + """ + + provisioning_state: Optional[str] = rest_field(name="provisioningState") + """The provisioning state of the virtual machine extension.""" + vm_extension: Optional["_models.VMExtension"] = rest_field(name="vmExtension") + """The virtual machine extension.""" + instance_view: Optional["_models.VMExtensionInstanceView"] = rest_field(name="instanceView") + """The vm extension instance view.""" + + @overload + def __init__( + self, + *, + provisioning_state: Optional[str] = None, + vm_extension: Optional["_models.VMExtension"] = None, + instance_view: Optional["_models.VMExtensionInstanceView"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attributes + """A Pool in the Azure Batch service. -class ContainerConfiguration(Model): - """The configuration for container-enabled Pools. + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two IDs within an Account that differ only by case). + :vartype id: str + :ivar display_name: The display name for the Pool. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar url: The URL of the Pool. + :vartype url: str + :ivar e_tag: The ETag of the Pool. This is an opaque string. You can use it to detect whether + the Pool has changed between requests. In particular, you can be pass the ETag when updating a + Pool to specify that your changes should take effect only if nobody else has modified the Pool + in the meantime. + :vartype e_tag: str + :ivar last_modified: The last modified time of the Pool. This is the last time at which the + Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does + not factor in node-level changes such as a Compute Node changing state. + :vartype last_modified: ~datetime.datetime + :ivar creation_time: The creation time of the Pool. + :vartype creation_time: ~datetime.datetime + :ivar state: The current state of the Pool. Known values are: "active" and "deleting". + :vartype state: str or ~azure.batch.models.BatchPoolState + :ivar state_transition_time: The time at which the Pool entered its current state. + :vartype state_transition_time: ~datetime.datetime + :ivar allocation_state: Whether the Pool is resizing. Known values are: "steady", "resizing", + and "stopping". + :vartype allocation_state: str or ~azure.batch.models.AllocationState + :ivar allocation_state_transition_time: The time at which the Pool entered its current + allocation state. + :vartype allocation_state_transition_time: ~datetime.datetime + :ivar vm_size: The size of virtual machines in the Pool. All virtual machines in a Pool are the + same size. For information about available sizes of virtual machines in Pools, see Choose a VM + size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :vartype vm_size: str + :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This + property must be specified. + :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration + :ivar resize_timeout: The timeout for allocation of Compute Nodes to the Pool. This is the + timeout for the most recent resize operation. (The initial sizing when the Pool is created + counts as a resize.) The default value is 15 minutes. + :vartype resize_timeout: ~datetime.timedelta + :ivar resize_errors: A list of errors encountered while performing the last resize on the Pool. + This property is set only if one or more errors occurred during the last Pool resize, and only + when the Pool allocationState is Steady. + :vartype resize_errors: list[~azure.batch.models.ResizeError] + :ivar resource_tags: The user-specified tags associated with the pool. The user-defined tags to + be associated with the Azure Batch Pool. When specified, these tags are propagated to the + backing Azure resources associated with the pool. This property can only be specified when the + Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + :vartype resource_tags: dict[str, str] + :ivar current_dedicated_nodes: The number of dedicated Compute Nodes currently in the Pool. + :vartype current_dedicated_nodes: int + :ivar current_low_priority_nodes: The number of Spot/Low-priority Compute Nodes currently in + the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. + :vartype current_low_priority_nodes: int + :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. + :vartype target_dedicated_nodes: int + :ivar target_low_priority_nodes: The desired number of Spot/Low-priority Compute Nodes in the + Pool. + :vartype target_low_priority_nodes: int + :ivar enable_auto_scale: Whether the Pool size should automatically adjust over time. If false, + at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula property is required and the Pool automatically resizes according to the + formula. The default value is false. + :vartype enable_auto_scale: bool + :ivar auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool. This + property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + :vartype auto_scale_formula: str + :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the + Pool size according to the autoscale formula. This property is set only if the Pool + automatically scales, i.e. enableAutoScale is true. + :vartype auto_scale_evaluation_interval: ~datetime.timedelta + :ivar auto_scale_run: The results and errors from the last execution of the autoscale formula. + This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + :vartype auto_scale_run: ~azure.batch.models.AutoScaleRun + :ivar enable_inter_node_communication: Whether the Pool permits direct communication between + Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. + Specifying this value can reduce the chance of the requested number of Compute Nodes to be + allocated in the Pool. + :vartype enable_inter_node_communication: bool + :ivar network_configuration: The network configuration for the Pool. + :vartype network_configuration: ~azure.batch.models.NetworkConfiguration + :ivar start_task: A Task specified to run on each Compute Node as it joins the Pool. + :vartype start_task: ~azure.batch.models.BatchStartTask + :ivar application_package_references: The list of Packages to be installed on each Compute Node + in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not + affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar task_slots_per_node: The number of task slots that can be used to run concurrent tasks on + a single compute node in the pool. The default value is 1. The maximum value is the smaller of + 4 times the number of cores of the vmSize of the pool or 256. + :vartype task_slots_per_node: int + :ivar task_scheduling_policy: How Tasks are distributed across Compute Nodes in a Pool. If not + specified, the default is spread. + :vartype task_scheduling_policy: ~azure.batch.models.BatchTaskSchedulingPolicy + :ivar user_accounts: The list of user Accounts to be created on each Compute Node in the Pool. + :vartype user_accounts: list[~azure.batch.models.UserAccount] + :ivar metadata: A list of name-value pairs associated with the Pool as metadata. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar stats: Utilization and resource usage statistics for the entire lifetime of the Pool. + This property is populated only if the CloudPool was retrieved with an expand clause including + the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. + The Batch service performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :vartype stats: ~azure.batch.models.BatchPoolStatistics + :ivar mount_configuration: A list of file systems to mount on each node in the pool. This + supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] + :ivar identity: The identity of the Batch pool, if configured. The list of user identities + associated with the Batch pool. The user identity dictionary key references will be ARM + resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + :vartype identity: ~azure.batch.models.BatchPoolIdentity + :ivar target_node_communication_mode: The desired node communication mode for the pool. If + omitted, the default value is Default. Known values are: "default", "classic", and + "simplified". + :vartype target_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + :ivar current_node_communication_mode: The current state of the pool communication mode. Known + values are: "default", "classic", and "simplified". + :vartype current_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + :ivar upgrade_policy: The upgrade policy for the Pool. Describes an upgrade policy - automatic, + manual, or rolling. + :vartype upgrade_policy: ~azure.batch.models.UpgradePolicy + """ + + id: Optional[str] = rest_field(visibility=["read"]) + """A string that uniquely identifies the Pool within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two IDs within an Account that differ only by case).""" + display_name: Optional[str] = rest_field(name="displayName", visibility=["read"]) + """The display name for the Pool. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + url: Optional[str] = rest_field(visibility=["read"]) + """The URL of the Pool.""" + e_tag: Optional[str] = rest_field(name="eTag", visibility=["read"]) + """The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has + changed between requests. In particular, you can be pass the ETag when updating a Pool to + specify that your changes should take effect only if nobody else has modified the Pool in the + meantime.""" + last_modified: Optional[datetime.datetime] = rest_field(name="lastModified", visibility=["read"], format="rfc3339") + """The last modified time of the Pool. This is the last time at which the Pool level data, such as + the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level + changes such as a Compute Node changing state.""" + creation_time: Optional[datetime.datetime] = rest_field(name="creationTime", visibility=["read"], format="rfc3339") + """The creation time of the Pool.""" + state: Optional[Union[str, "_models.BatchPoolState"]] = rest_field(visibility=["read"]) + """The current state of the Pool. Known values are: \"active\" and \"deleting\".""" + state_transition_time: Optional[datetime.datetime] = rest_field( + name="stateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Pool entered its current state.""" + allocation_state: Optional[Union[str, "_models.AllocationState"]] = rest_field( + name="allocationState", visibility=["read"] + ) + """Whether the Pool is resizing. Known values are: \"steady\", \"resizing\", and \"stopping\".""" + allocation_state_transition_time: Optional[datetime.datetime] = rest_field( + name="allocationStateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Pool entered its current allocation state.""" + vm_size: Optional[str] = rest_field(name="vmSize", visibility=["read"]) + """The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For + information about available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).""" + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = rest_field( + name="virtualMachineConfiguration", visibility=["read"] + ) + """The virtual machine configuration for the Pool. This property must be specified.""" + resize_timeout: Optional[datetime.timedelta] = rest_field(name="resizeTimeout", visibility=["read"]) + """The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most + recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The + default value is 15 minutes.""" + resize_errors: Optional[List["_models.ResizeError"]] = rest_field(name="resizeErrors", visibility=["read"]) + """A list of errors encountered while performing the last resize on the Pool. This property is set + only if one or more errors occurred during the last Pool resize, and only when the Pool + allocationState is Steady.""" + resource_tags: Optional[Dict[str, str]] = rest_field(name="resourceTags", visibility=["read"]) + """The user-specified tags associated with the pool. The user-defined tags to be associated with + the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources + associated with the pool. This property can only be specified when the Batch account was + created with the poolAllocationMode property set to 'UserSubscription'.""" + current_dedicated_nodes: Optional[int] = rest_field(name="currentDedicatedNodes", visibility=["read"]) + """The number of dedicated Compute Nodes currently in the Pool.""" + current_low_priority_nodes: Optional[int] = rest_field(name="currentLowPriorityNodes", visibility=["read"]) + """The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute + Nodes which have been preempted are included in this count.""" + target_dedicated_nodes: Optional[int] = rest_field(name="targetDedicatedNodes", visibility=["read"]) + """The desired number of dedicated Compute Nodes in the Pool.""" + target_low_priority_nodes: Optional[int] = rest_field(name="targetLowPriorityNodes", visibility=["read"]) + """The desired number of Spot/Low-priority Compute Nodes in the Pool.""" + enable_auto_scale: Optional[bool] = rest_field(name="enableAutoScale", visibility=["read"]) + """Whether the Pool size should automatically adjust over time. If false, at least one of + targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula property is required and the Pool automatically resizes according to the + formula. The default value is false.""" + auto_scale_formula: Optional[str] = rest_field(name="autoScaleFormula", visibility=["read"]) + """A formula for the desired number of Compute Nodes in the Pool. This property is set only if the + Pool automatically scales, i.e. enableAutoScale is true.""" + auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field( + name="autoScaleEvaluationInterval", visibility=["read"] + ) + """The time interval at which to automatically adjust the Pool size according to the autoscale + formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is + true.""" + auto_scale_run: Optional["_models.AutoScaleRun"] = rest_field(name="autoScaleRun", visibility=["read"]) + """The results and errors from the last execution of the autoscale formula. This property is set + only if the Pool automatically scales, i.e. enableAutoScale is true.""" + enable_inter_node_communication: Optional[bool] = rest_field( + name="enableInterNodeCommunication", visibility=["read"] + ) + """Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions + on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance + of the requested number of Compute Nodes to be allocated in the Pool.""" + network_configuration: Optional["_models.NetworkConfiguration"] = rest_field( + name="networkConfiguration", visibility=["read"] + ) + """The network configuration for the Pool.""" + start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") + """A Task specified to run on each Compute Node as it joins the Pool.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences", visibility=["read"] + ) + """The list of Packages to be installed on each Compute Node in the Pool. Changes to Package + references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are + already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package + references on any given Pool.""" + task_slots_per_node: Optional[int] = rest_field(name="taskSlotsPerNode", visibility=["read"]) + """The number of task slots that can be used to run concurrent tasks on a single compute node in + the pool. The default value is 1. The maximum value is the smaller of 4 times the number of + cores of the vmSize of the pool or 256.""" + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = rest_field( + name="taskSchedulingPolicy", visibility=["read"] + ) + """How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is + spread.""" + user_accounts: Optional[List["_models.UserAccount"]] = rest_field(name="userAccounts", visibility=["read"]) + """The list of user Accounts to be created on each Compute Node in the Pool.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field(visibility=["read"]) + """A list of name-value pairs associated with the Pool as metadata.""" + stats: Optional["_models.BatchPoolStatistics"] = rest_field(visibility=["read"]) + """Utilization and resource usage statistics for the entire lifetime of the Pool. This property is + populated only if the CloudPool was retrieved with an expand clause including the 'stats' + attribute; otherwise it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is about 30 minutes.""" + mount_configuration: Optional[List["_models.MountConfiguration"]] = rest_field( + name="mountConfiguration", visibility=["read"] + ) + """A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, + CIFS/SMB, and Blobfuse.""" + identity: Optional["_models.BatchPoolIdentity"] = rest_field(visibility=["read"]) + """The identity of the Batch pool, if configured. The list of user identities associated with the + Batch pool. The user identity dictionary key references will be ARM resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long""" + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( + name="targetNodeCommunicationMode" + ) + """The desired node communication mode for the pool. If omitted, the default value is Default. + Known values are: \"default\", \"classic\", and \"simplified\".""" + current_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( + name="currentNodeCommunicationMode", visibility=["read"] + ) + """The current state of the pool communication mode. Known values are: \"default\", \"classic\", + and \"simplified\".""" + upgrade_policy: Optional["_models.UpgradePolicy"] = rest_field(name="upgradePolicy") + """The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.""" + + @overload + def __init__( + self, + *, + start_task: Optional["_models.BatchStartTask"] = None, + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, + upgrade_policy: Optional["_models.UpgradePolicy"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolCreateContent(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Parameters for creating an Azure Batch Pool. + + All required parameters must be populated in order to send to server. + + :ivar id: A string that uniquely identifies the Pool within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two Pool IDs within an Account that differ only by case). Required. + :vartype id: str + :ivar display_name: The display name for the Pool. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar vm_size: The size of virtual machines in the Pool. All virtual machines in a Pool are the + same size. For information about available VM sizes for Pools using Images from the Virtual + Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual + Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for + Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch + supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, + STANDARD_DS, and STANDARD_DSV2 series). Required. + :vartype vm_size: str + :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This + property must be specified. + :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration + :ivar resize_timeout: The timeout for allocation of Compute Nodes to the Pool. This timeout + applies only to manual scaling; it has no effect when enableAutoScale is set to true. The + default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request). + :vartype resize_timeout: ~datetime.timedelta + :ivar resource_tags: The user-specified tags associated with the pool. The user-defined tags to + be associated with the Azure Batch Pool. When specified, these tags are propagated to the + backing Azure resources associated with the pool. This property can only be specified when the + Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + :vartype resource_tags: dict[str, str] + :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. This + property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to + false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :vartype target_dedicated_nodes: int + :ivar target_low_priority_nodes: The desired number of Spot/Low-priority Compute Nodes in the + Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale + is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or + both. + :vartype target_low_priority_nodes: int + :ivar enable_auto_scale: Whether the Pool size should automatically adjust over time. If false, + at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula property is required and the Pool automatically resizes according to the + formula. The default value is false. + :vartype enable_auto_scale: bool + :ivar auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool. This + property must not be specified if enableAutoScale is set to false. It is required if + enableAutoScale is set to true. The formula is checked for validity before the Pool is created. + If the formula is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see 'Automatically scale + Compute Nodes in an Azure Batch Pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :vartype auto_scale_formula: str + :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the + Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and + maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service returns an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :vartype auto_scale_evaluation_interval: ~datetime.timedelta + :ivar enable_inter_node_communication: Whether the Pool permits direct communication between + Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to + deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not + reaching its desired size. The default value is false. + :vartype enable_inter_node_communication: bool + :ivar network_configuration: The network configuration for the Pool. + :vartype network_configuration: ~azure.batch.models.NetworkConfiguration + :ivar start_task: A Task specified to run on each Compute Node as it joins the Pool. The Task + runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + :vartype start_task: ~azure.batch.models.BatchStartTask + :ivar application_package_references: The list of Packages to be installed on each Compute Node + in the Pool. When creating a pool, the package's application ID must be fully qualified + (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). # pylint: disable=line-too-long + Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute + Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of + 10 Package references on any given Pool. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar task_slots_per_node: The number of task slots that can be used to run concurrent tasks on + a single compute node in the pool. The default value is 1. The maximum value is the smaller of + 4 times the number of cores of the vmSize of the pool or 256. + :vartype task_slots_per_node: int + :ivar task_scheduling_policy: How Tasks are distributed across Compute Nodes in a Pool. If not + specified, the default is spread. + :vartype task_scheduling_policy: ~azure.batch.models.BatchTaskSchedulingPolicy + :ivar user_accounts: The list of user Accounts to be created on each Compute Node in the Pool. + :vartype user_accounts: list[~azure.batch.models.UserAccount] + :ivar metadata: A list of name-value pairs associated with the Pool as metadata. The Batch + service does not assign any meaning to metadata; it is solely for the use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar mount_configuration: Mount storage using specified file system for the entire lifetime of + the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. + :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] + :ivar target_node_communication_mode: The desired node communication mode for the pool. If + omitted, the default value is Default. Known values are: "default", "classic", and + "simplified". + :vartype target_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + :ivar upgrade_policy: The upgrade policy for the Pool. Describes an upgrade policy - automatic, + manual, or rolling. + :vartype upgrade_policy: ~azure.batch.models.UpgradePolicy + """ + + id: str = rest_field() + """A string that uniquely identifies the Pool within the Account. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two Pool IDs within an Account that differ only by case). Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """The display name for the Pool. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + vm_size: str = rest_field(name="vmSize") + """The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For + information about available VM sizes for Pools using Images from the Virtual Machines + Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines + (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or + Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch + supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, + STANDARD_DS, and STANDARD_DSV2 series). Required.""" + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = rest_field( + name="virtualMachineConfiguration" + ) + """The virtual machine configuration for the Pool. This property must be specified.""" + resize_timeout: Optional[datetime.timedelta] = rest_field(name="resizeTimeout") + """The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual + scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. + The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service + returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad + Request).""" + resource_tags: Optional[Dict[str, str]] = rest_field(name="resourceTags") + """The user-specified tags associated with the pool. The user-defined tags to be associated with + the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources + associated with the pool. This property can only be specified when the Batch account was + created with the poolAllocationMode property set to 'UserSubscription'.""" + target_dedicated_nodes: Optional[int] = rest_field(name="targetDedicatedNodes") + """The desired number of dedicated Compute Nodes in the Pool. This property must not be specified + if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either + targetDedicatedNodes, targetLowPriorityNodes, or both.""" + target_low_priority_nodes: Optional[int] = rest_field(name="targetLowPriorityNodes") + """The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be + specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must + set either targetDedicatedNodes, targetLowPriorityNodes, or both.""" + enable_auto_scale: Optional[bool] = rest_field(name="enableAutoScale") + """Whether the Pool size should automatically adjust over time. If false, at least one of + targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula property is required and the Pool automatically resizes according to the + formula. The default value is false.""" + auto_scale_formula: Optional[str] = rest_field(name="autoScaleFormula") + """A formula for the desired number of Compute Nodes in the Pool. This property must not be + specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. + The formula is checked for validity before the Pool is created. If the formula is not valid, + the Batch service rejects the request with detailed error information. For more information + about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).""" + auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field(name="autoScaleEvaluationInterval") + """The time interval at which to automatically adjust the Pool size according to the autoscale + formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 + hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the + Batch service returns an error; if you are calling the REST API directly, the HTTP status code + is 400 (Bad Request).""" + enable_inter_node_communication: Optional[bool] = rest_field(name="enableInterNodeCommunication") + """Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node + communication limits the maximum size of the Pool due to deployment restrictions on the Compute + Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value + is false.""" + network_configuration: Optional["_models.NetworkConfiguration"] = rest_field(name="networkConfiguration") + """The network configuration for the Pool.""" + start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") + """A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the + Compute Node is added to the Pool or when the Compute Node is restarted.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences" + ) + """The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, + the package's application ID must be fully qualified + (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). # pylint: disable=line-too-long + Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute + Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of + 10 Package references on any given Pool.""" + task_slots_per_node: Optional[int] = rest_field(name="taskSlotsPerNode") + """The number of task slots that can be used to run concurrent tasks on a single compute node in + the pool. The default value is 1. The maximum value is the smaller of 4 times the number of + cores of the vmSize of the pool or 256.""" + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = rest_field(name="taskSchedulingPolicy") + """How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is + spread.""" + user_accounts: Optional[List["_models.UserAccount"]] = rest_field(name="userAccounts") + """The list of user Accounts to be created on each Compute Node in the Pool.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Pool as metadata. The Batch service does not + assign any meaning to metadata; it is solely for the use of user code.""" + mount_configuration: Optional[List["_models.MountConfiguration"]] = rest_field(name="mountConfiguration") + """Mount storage using specified file system for the entire lifetime of the pool. Mount the + storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.""" + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( + name="targetNodeCommunicationMode" + ) + """The desired node communication mode for the pool. If omitted, the default value is Default. + Known values are: \"default\", \"classic\", and \"simplified\".""" + upgrade_policy: Optional["_models.UpgradePolicy"] = rest_field(name="upgradePolicy") + """The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + vm_size: str, + display_name: Optional[str] = None, + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = None, + resize_timeout: Optional[datetime.timedelta] = None, + resource_tags: Optional[Dict[str, str]] = None, + target_dedicated_nodes: Optional[int] = None, + target_low_priority_nodes: Optional[int] = None, + enable_auto_scale: Optional[bool] = None, + auto_scale_formula: Optional[str] = None, + auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, + enable_inter_node_communication: Optional[bool] = None, + network_configuration: Optional["_models.NetworkConfiguration"] = None, + start_task: Optional["_models.BatchStartTask"] = None, + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, + task_slots_per_node: Optional[int] = None, + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = None, + user_accounts: Optional[List["_models.UserAccount"]] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + mount_configuration: Optional[List["_models.MountConfiguration"]] = None, + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, + upgrade_policy: Optional["_models.UpgradePolicy"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolEnableAutoScaleContent(_model_base.Model): + """Parameters for enabling automatic scaling on an Azure Batch Pool. + + :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The + formula is checked for validity before it is applied to the Pool. If the formula is not valid, + the Batch service rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :vartype auto_scale_formula: str + :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the + Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and + maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the request with an invalid + property value error; if you are calling the REST API directly, the HTTP status code is 400 + (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule + will be stopped and a new autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :vartype auto_scale_evaluation_interval: ~datetime.timedelta + """ + + auto_scale_formula: Optional[str] = rest_field(name="autoScaleFormula") + """The formula for the desired number of Compute Nodes in the Pool. The formula is checked for + validity before it is applied to the Pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information about specifying this + formula, see Automatically scale Compute Nodes in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).""" + auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field(name="autoScaleEvaluationInterval") + """The time interval at which to automatically adjust the Pool size according to the autoscale + formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 + hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the + Batch service rejects the request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation + schedule will be started, with its starting time being the time when this request was issued.""" + + @overload + def __init__( + self, + *, + auto_scale_formula: Optional[str] = None, + auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolEndpointConfiguration(_model_base.Model): + """The endpoint configuration for a Pool. - All required parameters must be populated in order to send to Azure. - :param type: Required. Possible values include: 'dockerCompatible', - 'criCompatible' - :type type: str or ~azure.batch.models.ContainerType - :param container_image_names: This is the full Image reference, as would - be specified to "docker pull". An Image will be sourced from the default - Docker registry unless the Image is fully qualified with an alternative - registry. - :type container_image_names: list[str] - :param container_registries: If any Images must be downloaded from a - private registry which requires credentials, then those credentials must - be provided here. - :type container_registries: list[~azure.batch.models.ContainerRegistry] + :ivar inbound_nat_pools: A list of inbound NAT Pools that can be used to address specific ports + on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch + Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP + status code 400. This cannot be specified if the IPAddressProvisioningType is + NoPublicIPAddresses. Required. + :vartype inbound_nat_pools: list[~azure.batch.models.InboundNatPool] """ - _validation = { - 'type': {'required': True}, - } + inbound_nat_pools: List["_models.InboundNatPool"] = rest_field(name="inboundNATPools") + """A list of inbound NAT Pools that can be used to address specific ports on an individual Compute + Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum + number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This + cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. Required.""" - _attribute_map = { - 'type': {'key': 'type', 'type': 'str'}, - 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, - 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, - } + @overload + def __init__( + self, + *, + inbound_nat_pools: List["_models.InboundNatPool"], + ): ... - def __init__(self, **kwargs): - super(ContainerConfiguration, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.container_image_names = kwargs.get('container_image_names', None) - self.container_registries = kwargs.get('container_registries', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class ContainerRegistry(Model): - """A private container registry. - :param user_name: - :type user_name: str - :param password: - :type password: str - :param registry_server: If omitted, the default is "docker.io". - :type registry_server: str - :param identity_reference: The reference to the user assigned identity to - use to access an Azure Container Registry instead of username and - password. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _attribute_map = { - 'user_name': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - 'registry_server': {'key': 'registryServer', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, **kwargs): - super(ContainerRegistry, self).__init__(**kwargs) - self.user_name = kwargs.get('user_name', None) - self.password = kwargs.get('password', None) - self.registry_server = kwargs.get('registry_server', None) - self.identity_reference = kwargs.get('identity_reference', None) - - -class DataDisk(Model): - """Settings which will be used by the data disks associated to Compute Nodes - in the Pool. When using attached data disks, you need to mount and format - the disks from within a VM to use them. - - All required parameters must be populated in order to send to Azure. - - :param lun: Required. The logical unit number. The lun is used to uniquely - identify each data disk. If attaching multiple disks, each should have a - distinct lun. The value must be between 0 and 63, inclusive. - :type lun: int - :param caching: The type of caching to be enabled for the data disks. The - default value for caching is readwrite. For information about the caching - options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Possible values include: 'none', 'readOnly', 'readWrite' - :type caching: str or ~azure.batch.models.CachingType - :param disk_size_gb: Required. The initial disk size in gigabytes. - :type disk_size_gb: int - :param storage_account_type: The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". Possible values - include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS' - :type storage_account_type: str or ~azure.batch.models.StorageAccountType - """ - - _validation = { - 'lun': {'required': True}, - 'disk_size_gb': {'required': True}, - } - - _attribute_map = { - 'lun': {'key': 'lun', 'type': 'int'}, - 'caching': {'key': 'caching', 'type': 'CachingType'}, - 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, - 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, - } - - def __init__(self, **kwargs): - super(DataDisk, self).__init__(**kwargs) - self.lun = kwargs.get('lun', None) - self.caching = kwargs.get('caching', None) - self.disk_size_gb = kwargs.get('disk_size_gb', None) - self.storage_account_type = kwargs.get('storage_account_type', None) - - -class DeleteCertificateError(Model): - """An error encountered by the Batch service when deleting a Certificate. - - :param code: - :type code: str - :param message: - :type message: str - :param values: This list includes details such as the active Pools and - Compute Nodes referencing this Certificate. However, if a large number of - resources reference the Certificate, the list contains only about the - first hundred. - :type values: list[~azure.batch.models.NameValuePair] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[NameValuePair]'}, - } - - def __init__(self, **kwargs): - super(DeleteCertificateError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.values = kwargs.get('values', None) - - -class DiffDiskSettings(Model): - """Specifies the ephemeral Disk Settings for the operating system disk used by - the compute node (VM). - - :param placement: Specifies the ephemeral disk placement for operating - system disk for all VMs in the pool. This property can be used by user in - the request to choose the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral OS disk size - requirements, please refer to Ephemeral OS disk size requirements for - Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - Possible values include: 'CacheDisk' - :type placement: str or ~azure.batch.models.DiffDiskPlacement - """ - - _attribute_map = { - 'placement': {'key': 'placement', 'type': 'DiffDiskPlacement'}, - } - - def __init__(self, **kwargs): - super(DiffDiskSettings, self).__init__(**kwargs) - self.placement = kwargs.get('placement', None) +class BatchPoolEvaluateAutoScaleContent(_model_base.Model): + """Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. + All required parameters must be populated in order to send to server. -class DiskEncryptionConfiguration(Model): - """The disk encryption configuration applied on compute nodes in the pool. - Disk encryption configuration is not supported on Linux pool created with - Azure Compute Gallery Image. - - :param targets: If omitted, no disks on the compute nodes in the pool will - be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - :type targets: list[str or ~azure.batch.models.DiskEncryptionTarget] + :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The + formula is validated and its results calculated, but it is not applied to the Pool. To apply + the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about + specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). Required. + :vartype auto_scale_formula: str """ - _attribute_map = { - 'targets': {'key': 'targets', 'type': '[DiskEncryptionTarget]'}, - } - - def __init__(self, **kwargs): - super(DiskEncryptionConfiguration, self).__init__(**kwargs) - self.targets = kwargs.get('targets', None) - - -class EnvironmentSetting(Model): - """An environment variable to be set on a Task process. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param value: - :type value: str - """ + auto_scale_formula: str = rest_field(name="autoScaleFormula") + """The formula for the desired number of Compute Nodes in the Pool. The formula is validated and + its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, + 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see + Automatically scale Compute Nodes in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). Required.""" - _validation = { - 'name': {'required': True}, - } + @overload + def __init__( + self, + *, + auto_scale_formula: str, + ): ... - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - def __init__(self, **kwargs): - super(EnvironmentSetting, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.value = kwargs.get('value', None) + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class ErrorMessage(Model): - """An error message received in an Azure Batch error response. +class BatchPoolIdentity(_model_base.Model): + """The identity of the Batch pool, if configured. - :param lang: - :type lang: str - :param value: - :type value: str - """ - _attribute_map = { - 'lang': {'key': 'lang', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } + :ivar type: The identity of the Batch pool, if configured. The list of user identities + associated with the Batch pool. The user identity dictionary key references will be ARM + resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + Required. Known values are: "UserAssigned" and "None". + :vartype type: str or ~azure.batch.models.BatchPoolIdentityType + :ivar user_assigned_identities: The list of user identities associated with the Batch account. + The user identity dictionary key references will be ARM resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + :vartype user_assigned_identities: list[~azure.batch.models.UserAssignedIdentity] + """ + + type: Union[str, "_models.BatchPoolIdentityType"] = rest_field() + """The identity of the Batch pool, if configured. The list of user identities associated with the + Batch pool. The user identity dictionary key references will be ARM resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long + Required. Known values are: \"UserAssigned\" and \"None\".""" + user_assigned_identities: Optional[List["_models.UserAssignedIdentity"]] = rest_field(name="userAssignedIdentities") + """The list of user identities associated with the Batch account. The user identity dictionary key + references will be ARM resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. # pylint: disable=line-too-long""" - def __init__(self, **kwargs): - super(ErrorMessage, self).__init__(**kwargs) - self.lang = kwargs.get('lang', None) - self.value = kwargs.get('value', None) + @overload + def __init__( + self, + *, + type: Union[str, "_models.BatchPoolIdentityType"], + user_assigned_identities: Optional[List["_models.UserAssignedIdentity"]] = None, + ): ... + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ -class ExitCodeMapping(Model): - """How the Batch service should respond if a Task exits with a particular exit - code. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - All required parameters must be populated in order to send to Azure. - :param code: Required. A process exit code. - :type code: int - :param exit_options: Required. How the Batch service should respond if the - Task exits with this exit code. - :type exit_options: ~azure.batch.models.ExitOptions - """ +class BatchPoolInfo(_model_base.Model): + """Specifies how a Job should be assigned to a Pool. - _validation = { - 'code': {'required': True}, - 'exit_options': {'required': True}, - } + :ivar pool_id: The ID of an existing Pool. All the Tasks of the Job will run on the specified + Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not + exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run + until you create a Pool with that id. Note that the Batch service will not reject the Job + request; it will simply not run Tasks until the Pool exists. You must specify either the Pool + ID or the auto Pool specification, but not both. + :vartype pool_id: str + :ivar auto_pool_specification: Characteristics for a temporary 'auto pool'. The Batch service + will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch + service moves the Job to a completed state, and the Pool creation error is set in the Job's + scheduling error property. The Batch service manages the lifetime (both creation and, unless + keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime + of the auto Pool while the Job is active will result in unexpected behavior. You must specify + either the Pool ID or the auto Pool specification, but not both. + :vartype auto_pool_specification: ~azure.batch.models.BatchAutoPoolSpecification + """ + + pool_id: Optional[str] = rest_field(name="poolId") + """The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must + ensure that the Pool referenced by this property exists. If the Pool does not exist at the time + the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a + Pool with that id. Note that the Batch service will not reject the Job request; it will simply + not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool + specification, but not both.""" + auto_pool_specification: Optional["_models.BatchAutoPoolSpecification"] = rest_field(name="autoPoolSpecification") + """Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when + the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a + completed state, and the Pool creation error is set in the Job's scheduling error property. The + Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) + of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is + active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool + specification, but not both.""" + + @overload + def __init__( + self, + *, + pool_id: Optional[str] = None, + auto_pool_specification: Optional["_models.BatchAutoPoolSpecification"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolNodeCounts(_model_base.Model): + """The number of Compute Nodes in each state for a Pool. - _attribute_map = { - 'code': {'key': 'code', 'type': 'int'}, - 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, - } - def __init__(self, **kwargs): - super(ExitCodeMapping, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.exit_options = kwargs.get('exit_options', None) + :ivar pool_id: The ID of the Pool. Required. + :vartype pool_id: str + :ivar dedicated: The number of dedicated Compute Nodes in each state. + :vartype dedicated: ~azure.batch.models.BatchNodeCounts + :ivar low_priority: The number of Spot/Low-priority Compute Nodes in each state. + :vartype low_priority: ~azure.batch.models.BatchNodeCounts + """ + + pool_id: str = rest_field(name="poolId") + """The ID of the Pool. Required.""" + dedicated: Optional["_models.BatchNodeCounts"] = rest_field() + """The number of dedicated Compute Nodes in each state.""" + low_priority: Optional["_models.BatchNodeCounts"] = rest_field(name="lowPriority") + """The number of Spot/Low-priority Compute Nodes in each state.""" + + @overload + def __init__( + self, + *, + pool_id: str, + dedicated: Optional["_models.BatchNodeCounts"] = None, + low_priority: Optional["_models.BatchNodeCounts"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolReplaceContent(_model_base.Model): + """Parameters for replacing properties on an Azure Batch Pool. + + All required parameters must be populated in order to send to server. + + :ivar start_task: A Task to run on each Compute Node as it joins the Pool. The Task runs when + the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is + present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed + from the Pool. + :vartype start_task: ~azure.batch.models.BatchStartTask + :ivar application_package_references: The list of Application Packages to be installed on each + Compute Node in the Pool. The list replaces any existing Application Package references on the + Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, + but do not affect Compute Nodes that are already in the Pool until they are rebooted or + reimaged. There is a maximum of 10 Application Package references on any given Pool. If + omitted, or if you specify an empty collection, any existing Application Packages references + are removed from the Pool. A maximum of 10 references may be specified on a given Pool. + Required. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar metadata: A list of name-value pairs associated with the Pool as metadata. This list + replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty + collection, any existing metadata is removed from the Pool. Required. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar target_node_communication_mode: The desired node communication mode for the pool. This + setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the + existing setting is default. Known values are: "default", "classic", and "simplified". + :vartype target_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + """ + + start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") + """A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is + added to the Pool or when the Compute Node is restarted. If this element is present, it + overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool.""" + application_package_references: List["_models.BatchApplicationPackageReference"] = rest_field( + name="applicationPackageReferences" + ) + """The list of Application Packages to be installed on each Compute Node in the Pool. The list + replaces any existing Application Package references on the Pool. Changes to Application + Package references affect all new Compute Nodes joining the Pool, but do not affect Compute + Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of + 10 Application Package references on any given Pool. If omitted, or if you specify an empty + collection, any existing Application Packages references are removed from the Pool. A maximum + of 10 references may be specified on a given Pool. Required.""" + metadata: List["_models.MetadataItem"] = rest_field() + """A list of name-value pairs associated with the Pool as metadata. This list replaces any + existing metadata configured on the Pool. If omitted, or if you specify an empty collection, + any existing metadata is removed from the Pool. Required.""" + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( + name="targetNodeCommunicationMode" + ) + """The desired node communication mode for the pool. This setting replaces any existing + targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. Known + values are: \"default\", \"classic\", and \"simplified\".""" + + @overload + def __init__( + self, + *, + application_package_references: List["_models.BatchApplicationPackageReference"], + metadata: List["_models.MetadataItem"], + start_task: Optional["_models.BatchStartTask"] = None, + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolResizeContent(_model_base.Model): + """Parameters for changing the size of an Azure Batch Pool. + + :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. + :vartype target_dedicated_nodes: int + :ivar target_low_priority_nodes: The desired number of Spot/Low-priority Compute Nodes in the + Pool. + :vartype target_low_priority_nodes: int + :ivar resize_timeout: The timeout for allocation of Nodes to the Pool or removal of Compute + Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you + specify a value less than 5 minutes, the Batch service returns an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :vartype resize_timeout: ~datetime.timedelta + :ivar node_deallocation_option: Determines what to do with a Compute Node and its running + task(s) if the Pool size is decreasing. The default value is requeue. Known values are: + "requeue", "terminate", "taskcompletion", and "retaineddata". + :vartype node_deallocation_option: str or ~azure.batch.models.BatchNodeDeallocationOption + """ + + target_dedicated_nodes: Optional[int] = rest_field(name="targetDedicatedNodes") + """The desired number of dedicated Compute Nodes in the Pool.""" + target_low_priority_nodes: Optional[int] = rest_field(name="targetLowPriorityNodes") + """The desired number of Spot/Low-priority Compute Nodes in the Pool.""" + resize_timeout: Optional[datetime.timedelta] = rest_field(name="resizeTimeout") + """The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The + default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request).""" + node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = rest_field( + name="nodeDeallocationOption" + ) + """Determines what to do with a Compute Node and its running task(s) if the Pool size is + decreasing. The default value is requeue. Known values are: \"requeue\", \"terminate\", + \"taskcompletion\", and \"retaineddata\".""" + + @overload + def __init__( + self, + *, + target_dedicated_nodes: Optional[int] = None, + target_low_priority_nodes: Optional[int] = None, + resize_timeout: Optional[datetime.timedelta] = None, + node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolResourceStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Statistics related to resource consumption by Compute Nodes in a Pool. -class ExitCodeRangeMapping(Model): - """A range of exit codes and how the Batch service should respond to exit - codes within that range. + :ivar start_time: The start time of the time range covered by the statistics. Required. + :vartype start_time: ~datetime.datetime + :ivar last_update_time: The time at which the statistics were last updated. All statistics are + limited to the range between startTime and lastUpdateTime. Required. + :vartype last_update_time: ~datetime.datetime + :ivar avg_cpu_percentage: The average CPU usage across all Compute Nodes in the Pool + (percentage per node). Required. + :vartype avg_cpu_percentage: float + :ivar avg_memory_gi_b: The average memory usage in GiB across all Compute Nodes in the Pool. + Required. + :vartype avg_memory_gi_b: float + :ivar peak_memory_gi_b: The peak memory usage in GiB across all Compute Nodes in the Pool. + Required. + :vartype peak_memory_gi_b: float + :ivar avg_disk_gi_b: The average used disk space in GiB across all Compute Nodes in the Pool. + Required. + :vartype avg_disk_gi_b: float + :ivar peak_disk_gi_b: The peak used disk space in GiB across all Compute Nodes in the Pool. + Required. + :vartype peak_disk_gi_b: float + :ivar disk_read_i_ops: The total number of disk read operations across all Compute Nodes in the + Pool. Required. + :vartype disk_read_i_ops: int + :ivar disk_write_i_ops: The total number of disk write operations across all Compute Nodes in + the Pool. Required. + :vartype disk_write_i_ops: int + :ivar disk_read_gi_b: The total amount of data in GiB of disk reads across all Compute Nodes in + the Pool. Required. + :vartype disk_read_gi_b: float + :ivar disk_write_gi_b: The total amount of data in GiB of disk writes across all Compute Nodes + in the Pool. Required. + :vartype disk_write_gi_b: float + :ivar network_read_gi_b: The total amount of data in GiB of network reads across all Compute + Nodes in the Pool. Required. + :vartype network_read_gi_b: float + :ivar network_write_gi_b: The total amount of data in GiB of network writes across all Compute + Nodes in the Pool. Required. + :vartype network_write_gi_b: float + """ + + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the time range covered by the statistics. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time at which the statistics were last updated. All statistics are limited to the range + between startTime and lastUpdateTime. Required.""" + avg_cpu_percentage: float = rest_field(name="avgCPUPercentage") + """The average CPU usage across all Compute Nodes in the Pool (percentage per node). Required.""" + avg_memory_gi_b: float = rest_field(name="avgMemoryGiB") + """The average memory usage in GiB across all Compute Nodes in the Pool. Required.""" + peak_memory_gi_b: float = rest_field(name="peakMemoryGiB") + """The peak memory usage in GiB across all Compute Nodes in the Pool. Required.""" + avg_disk_gi_b: float = rest_field(name="avgDiskGiB") + """The average used disk space in GiB across all Compute Nodes in the Pool. Required.""" + peak_disk_gi_b: float = rest_field(name="peakDiskGiB") + """The peak used disk space in GiB across all Compute Nodes in the Pool. Required.""" + disk_read_i_ops: int = rest_field(name="diskReadIOps") + """The total number of disk read operations across all Compute Nodes in the Pool. Required.""" + disk_write_i_ops: int = rest_field(name="diskWriteIOps") + """The total number of disk write operations across all Compute Nodes in the Pool. Required.""" + disk_read_gi_b: float = rest_field(name="diskReadGiB") + """The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. Required.""" + disk_write_gi_b: float = rest_field(name="diskWriteGiB") + """The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. Required.""" + network_read_gi_b: float = rest_field(name="networkReadGiB") + """The total amount of data in GiB of network reads across all Compute Nodes in the Pool. + Required.""" + network_write_gi_b: float = rest_field(name="networkWriteGiB") + """The total amount of data in GiB of network writes across all Compute Nodes in the Pool. + Required.""" + + @overload + def __init__( + self, + *, + start_time: datetime.datetime, + last_update_time: datetime.datetime, + avg_cpu_percentage: float, + avg_memory_gi_b: float, + peak_memory_gi_b: float, + avg_disk_gi_b: float, + peak_disk_gi_b: float, + disk_read_i_ops: int, + disk_write_i_ops: int, + disk_read_gi_b: float, + disk_write_gi_b: float, + network_read_gi_b: float, + network_write_gi_b: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolSpecification(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Specification for creating a new Pool. - All required parameters must be populated in order to send to Azure. - :param start: Required. The first exit code in the range. - :type start: int - :param end: Required. The last exit code in the range. - :type end: int - :param exit_options: Required. How the Batch service should respond if the - Task exits with an exit code in the range start to end (inclusive). - :type exit_options: ~azure.batch.models.ExitOptions - """ + :ivar display_name: The display name for the Pool. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar vm_size: The size of the virtual machines in the Pool. All virtual machines in a Pool are + the same size. For information about available sizes of virtual machines in Pools, see Choose a + VM size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. + :vartype vm_size: str + :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This + property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not + specified then the Batch service returns an error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration + :ivar task_slots_per_node: The number of task slots that can be used to run concurrent tasks on + a single compute node in the pool. The default value is 1. The maximum value is the smaller of + 4 times the number of cores of the vmSize of the pool or 256. + :vartype task_slots_per_node: int + :ivar task_scheduling_policy: How Tasks are distributed across Compute Nodes in a Pool. If not + specified, the default is spread. + :vartype task_scheduling_policy: ~azure.batch.models.BatchTaskSchedulingPolicy + :ivar resize_timeout: The timeout for allocation of Compute Nodes to the Pool. This timeout + applies only to manual scaling; it has no effect when enableAutoScale is set to true. The + default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service rejects the request with an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :vartype resize_timeout: ~datetime.timedelta + :ivar resource_tags: The user-specified tags associated with the pool.The user-defined tags to + be associated with the Azure Batch Pool. When specified, these tags are propagated to the + backing Azure resources associated with the pool. This property can only be specified when the + Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + :vartype resource_tags: str + :ivar target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. This + property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to + false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :vartype target_dedicated_nodes: int + :ivar target_low_priority_nodes: The desired number of Spot/Low-priority Compute Nodes in the + Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale + is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or + both. + :vartype target_low_priority_nodes: int + :ivar enable_auto_scale: Whether the Pool size should automatically adjust over time. If false, + at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula element is required. The Pool automatically resizes according to the formula. + The default value is false. + :vartype enable_auto_scale: bool + :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. This + property must not be specified if enableAutoScale is set to false. It is required if + enableAutoScale is set to true. The formula is checked for validity before the Pool is created. + If the formula is not valid, the Batch service rejects the request with detailed error + information. + :vartype auto_scale_formula: str + :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the + Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and + maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the request with an invalid + property value error; if you are calling the REST API directly, the HTTP status code is 400 + (Bad Request). + :vartype auto_scale_evaluation_interval: ~datetime.timedelta + :ivar enable_inter_node_communication: Whether the Pool permits direct communication between + Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to + deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not + reaching its desired size. The default value is false. + :vartype enable_inter_node_communication: bool + :ivar network_configuration: The network configuration for the Pool. + :vartype network_configuration: ~azure.batch.models.NetworkConfiguration + :ivar start_task: A Task to run on each Compute Node as it joins the Pool. The Task runs when + the Compute Node is added to the Pool or when the Compute Node is restarted. + :vartype start_task: ~azure.batch.models.BatchStartTask + :ivar application_package_references: The list of Packages to be installed on each Compute Node + in the Pool. When creating a pool, the package's application ID must be fully qualified + (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). # pylint: disable=line-too-long + Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute + Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of + 10 Package references on any given Pool. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar user_accounts: The list of user Accounts to be created on each Compute Node in the Pool. + :vartype user_accounts: list[~azure.batch.models.UserAccount] + :ivar metadata: A list of name-value pairs associated with the Pool as metadata. The Batch + service does not assign any meaning to metadata; it is solely for the use of user code. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar mount_configuration: A list of file systems to mount on each node in the pool. This + supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] + :ivar target_node_communication_mode: The desired node communication mode for the pool. If + omitted, the default value is Default. Known values are: "default", "classic", and + "simplified". + :vartype target_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + :ivar upgrade_policy: The upgrade policy for the Pool. Describes an upgrade policy - automatic, + manual, or rolling. + :vartype upgrade_policy: ~azure.batch.models.UpgradePolicy + """ + + display_name: Optional[str] = rest_field(name="displayName") + """The display name for the Pool. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + vm_size: str = rest_field(name="vmSize") + """The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. + For information about available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required.""" + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = rest_field( + name="virtualMachineConfiguration" + ) + """The virtual machine configuration for the Pool. This property must be specified if the Pool + needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns + an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).""" + task_slots_per_node: Optional[int] = rest_field(name="taskSlotsPerNode") + """The number of task slots that can be used to run concurrent tasks on a single compute node in + the pool. The default value is 1. The maximum value is the smaller of 4 times the number of + cores of the vmSize of the pool or 256.""" + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = rest_field(name="taskSchedulingPolicy") + """How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is + spread.""" + resize_timeout: Optional[datetime.timedelta] = rest_field(name="resizeTimeout") + """The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual + scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. + The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service + rejects the request with an error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request).""" + resource_tags: Optional[str] = rest_field(name="resourceTags") + """The user-specified tags associated with the pool.The user-defined tags to be associated with + the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources + associated with the pool. This property can only be specified when the Batch account was + created with the poolAllocationMode property set to 'UserSubscription'.""" + target_dedicated_nodes: Optional[int] = rest_field(name="targetDedicatedNodes") + """The desired number of dedicated Compute Nodes in the Pool. This property must not be specified + if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either + targetDedicatedNodes, targetLowPriorityNodes, or both.""" + target_low_priority_nodes: Optional[int] = rest_field(name="targetLowPriorityNodes") + """The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be + specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must + set either targetDedicatedNodes, targetLowPriorityNodes, or both.""" + enable_auto_scale: Optional[bool] = rest_field(name="enableAutoScale") + """Whether the Pool size should automatically adjust over time. If false, at least one of + targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the + autoScaleFormula element is required. The Pool automatically resizes according to the formula. + The default value is false.""" + auto_scale_formula: Optional[str] = rest_field(name="autoScaleFormula") + """The formula for the desired number of Compute Nodes in the Pool. This property must not be + specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. + The formula is checked for validity before the Pool is created. If the formula is not valid, + the Batch service rejects the request with detailed error information.""" + auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field(name="autoScaleEvaluationInterval") + """The time interval at which to automatically adjust the Pool size according to the autoscale + formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 + hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the + Batch service rejects the request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request).""" + enable_inter_node_communication: Optional[bool] = rest_field(name="enableInterNodeCommunication") + """Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node + communication limits the maximum size of the Pool due to deployment restrictions on the Compute + Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value + is false.""" + network_configuration: Optional["_models.NetworkConfiguration"] = rest_field(name="networkConfiguration") + """The network configuration for the Pool.""" + start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") + """A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is + added to the Pool or when the Compute Node is restarted.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences" + ) + """The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, + the package's application ID must be fully qualified + (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). # pylint: disable=line-too-long + Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute + Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of + 10 Package references on any given Pool.""" + user_accounts: Optional[List["_models.UserAccount"]] = rest_field(name="userAccounts") + """The list of user Accounts to be created on each Compute Node in the Pool.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Pool as metadata. The Batch service does not + assign any meaning to metadata; it is solely for the use of user code.""" + mount_configuration: Optional[List["_models.MountConfiguration"]] = rest_field(name="mountConfiguration") + """A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, + CIFS/SMB, and Blobfuse.""" + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( + name="targetNodeCommunicationMode" + ) + """The desired node communication mode for the pool. If omitted, the default value is Default. + Known values are: \"default\", \"classic\", and \"simplified\".""" + upgrade_policy: Optional["_models.UpgradePolicy"] = rest_field(name="upgradePolicy") + """The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.""" + + @overload + def __init__( + self, + *, + vm_size: str, + display_name: Optional[str] = None, + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = None, + task_slots_per_node: Optional[int] = None, + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = None, + resize_timeout: Optional[datetime.timedelta] = None, + resource_tags: Optional[str] = None, + target_dedicated_nodes: Optional[int] = None, + target_low_priority_nodes: Optional[int] = None, + enable_auto_scale: Optional[bool] = None, + auto_scale_formula: Optional[str] = None, + auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, + enable_inter_node_communication: Optional[bool] = None, + network_configuration: Optional["_models.NetworkConfiguration"] = None, + start_task: Optional["_models.BatchStartTask"] = None, + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, + user_accounts: Optional[List["_models.UserAccount"]] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + mount_configuration: Optional[List["_models.MountConfiguration"]] = None, + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, + upgrade_policy: Optional["_models.UpgradePolicy"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolStatistics(_model_base.Model): + """Contains utilization and resource usage statistics for the lifetime of a Pool. + + + :ivar url: The URL for the statistics. Required. + :vartype url: str + :ivar start_time: The start time of the time range covered by the statistics. Required. + :vartype start_time: ~datetime.datetime + :ivar last_update_time: The time at which the statistics were last updated. All statistics are + limited to the range between startTime and lastUpdateTime. Required. + :vartype last_update_time: ~datetime.datetime + :ivar usage_stats: Statistics related to Pool usage, such as the amount of core-time used. + :vartype usage_stats: ~azure.batch.models.BatchPoolUsageStatistics + :ivar resource_stats: Statistics related to resource consumption by Compute Nodes in the Pool. + :vartype resource_stats: ~azure.batch.models.BatchPoolResourceStatistics + """ + + url: str = rest_field() + """The URL for the statistics. Required.""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the time range covered by the statistics. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time at which the statistics were last updated. All statistics are limited to the range + between startTime and lastUpdateTime. Required.""" + usage_stats: Optional["_models.BatchPoolUsageStatistics"] = rest_field(name="usageStats") + """Statistics related to Pool usage, such as the amount of core-time used.""" + resource_stats: Optional["_models.BatchPoolResourceStatistics"] = rest_field(name="resourceStats") + """Statistics related to resource consumption by Compute Nodes in the Pool.""" + + @overload + def __init__( + self, + *, + url: str, + start_time: datetime.datetime, + last_update_time: datetime.datetime, + usage_stats: Optional["_models.BatchPoolUsageStatistics"] = None, + resource_stats: Optional["_models.BatchPoolResourceStatistics"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolUpdateContent(_model_base.Model): + """Parameters for updating an Azure Batch Pool. + + :ivar start_task: A Task to run on each Compute Node as it joins the Pool. The Task runs when + the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is + present, it overwrites any existing StartTask. If omitted, any existing StartTask is left + unchanged. + :vartype start_task: ~azure.batch.models.BatchStartTask + :ivar application_package_references: A list of Packages to be installed on each Compute Node + in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not + affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this + element is present, it replaces any existing Package references. If you specify an empty + collection, then all Package references are removed from the Pool. If omitted, any existing + Package references are left unchanged. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar metadata: A list of name-value pairs associated with the Pool as metadata. If this + element is present, it replaces any existing metadata configured on the Pool. If you specify an + empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is + left unchanged. + :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar target_node_communication_mode: The desired node communication mode for the pool. If this + element is present, it replaces the existing targetNodeCommunicationMode configured on the + Pool. If omitted, any existing metadata is left unchanged. Known values are: "default", + "classic", and "simplified". + :vartype target_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + """ + + start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") + """A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is + added to the Pool or when the Compute Node is restarted. If this element is present, it + overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences" + ) + """A list of Packages to be installed on each Compute Node in the Pool. Changes to Package + references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are + already in the Pool until they are rebooted or reimaged. If this element is present, it + replaces any existing Package references. If you specify an empty collection, then all Package + references are removed from the Pool. If omitted, any existing Package references are left + unchanged.""" + metadata: Optional[List["_models.MetadataItem"]] = rest_field() + """A list of name-value pairs associated with the Pool as metadata. If this element is present, it + replaces any existing metadata configured on the Pool. If you specify an empty collection, any + metadata is removed from the Pool. If omitted, any existing metadata is left unchanged.""" + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( + name="targetNodeCommunicationMode" + ) + """The desired node communication mode for the pool. If this element is present, it replaces the + existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata + is left unchanged. Known values are: \"default\", \"classic\", and \"simplified\".""" + + @overload + def __init__( + self, + *, + start_task: Optional["_models.BatchStartTask"] = None, + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, + metadata: Optional[List["_models.MetadataItem"]] = None, + target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolUsageMetrics(_model_base.Model): + """Usage metrics for a Pool across an aggregation interval. - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - 'exit_options': {'required': True}, - } - _attribute_map = { - 'start': {'key': 'start', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, - } + :ivar pool_id: The ID of the Pool whose metrics are aggregated in this entry. Required. + :vartype pool_id: str + :ivar start_time: The start time of the aggregation interval covered by this entry. Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of the aggregation interval covered by this entry. Required. + :vartype end_time: ~datetime.datetime + :ivar vm_size: The size of virtual machines in the Pool. All VMs in a Pool are the same size. + For information about available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. + :vartype vm_size: str + :ivar total_core_hours: The total core hours used in the Pool during this aggregation interval. + Required. + :vartype total_core_hours: float + """ + + pool_id: str = rest_field(name="poolId") + """The ID of the Pool whose metrics are aggregated in this entry. Required.""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the aggregation interval covered by this entry. Required.""" + end_time: datetime.datetime = rest_field(name="endTime", format="rfc3339") + """The end time of the aggregation interval covered by this entry. Required.""" + vm_size: str = rest_field(name="vmSize") + """The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information + about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in + an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required.""" + total_core_hours: float = rest_field(name="totalCoreHours") + """The total core hours used in the Pool during this aggregation interval. Required.""" + + @overload + def __init__( + self, + *, + pool_id: str, + start_time: datetime.datetime, + end_time: datetime.datetime, + vm_size: str, + total_core_hours: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchPoolUsageStatistics(_model_base.Model): + """Statistics related to Pool usage information. - def __init__(self, **kwargs): - super(ExitCodeRangeMapping, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - self.exit_options = kwargs.get('exit_options', None) + :ivar start_time: The start time of the time range covered by the statistics. Required. + :vartype start_time: ~datetime.datetime + :ivar last_update_time: The time at which the statistics were last updated. All statistics are + limited to the range between startTime and lastUpdateTime. Required. + :vartype last_update_time: ~datetime.datetime + :ivar dedicated_core_time: The aggregated wall-clock time of the dedicated Compute Node cores + being part of the Pool. Required. + :vartype dedicated_core_time: ~datetime.timedelta + """ + + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the time range covered by the statistics. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time at which the statistics were last updated. All statistics are limited to the range + between startTime and lastUpdateTime. Required.""" + dedicated_core_time: datetime.timedelta = rest_field(name="dedicatedCoreTime") + """The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. + Required.""" + + @overload + def __init__( + self, + *, + start_time: datetime.datetime, + last_update_time: datetime.datetime, + dedicated_core_time: datetime.timedelta, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchStartTask(_model_base.Model): + """Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + Retries due to recovery operations are independent of and are not counted + against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + retry due to a recovery operation may occur. Because of this, all Tasks should + be idempotent. This means Tasks need to tolerate being interrupted and + restarted without causing any corruption or duplicate data. The best practice + for long running Tasks is to use some form of checkpointing. In some cases the + StartTask may be re-run even though the Compute Node was not rebooted. Special + care should be taken to avoid StartTasks which create breakaway process or + install/launch services from the StartTask working directory, as this will + block Batch from being able to re-run the StartTask. + + + :ivar command_line: The command line of the StartTask. The command line does not run under a + shell, and therefore cannot take advantage of shell features such as environment variable + expansion. If you want to take advantage of such features, you should invoke the shell in the + command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in + Linux. If the command line refers to file paths, it should use a relative path (relative to the + Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required. + :vartype command_line: str + :ivar container_settings: The settings for the container under which the StartTask runs. When + this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of + Azure Batch directories on the node) are mapped into the container, all Task environment + variables are mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be + reflected to the host disk, meaning that Batch file APIs will not be able to access those + files. + :vartype container_settings: ~azure.batch.models.BatchTaskContainerSettings + :ivar resource_files: A list of files that the Batch service will download to the Compute Node + before running the command line. There is a maximum size for the list of resource files. When + the max size is exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. + This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed + under this element are located in the Task's working directory. + :vartype resource_files: list[~azure.batch.models.ResourceFile] + :ivar environment_settings: A list of environment variable settings for the StartTask. + :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar user_identity: The user identity under which the StartTask runs. If omitted, the Task + runs as a non-administrative user unique to the Task. + :vartype user_identity: ~azure.batch.models.UserIdentity + :ivar max_task_retry_count: The maximum number of times the Task may be retried. The Batch + service retries a Task if its exit code is nonzero. Note that this value specifically controls + the number of retries. The Batch service will try the Task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one + initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry + the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, + however this is not recommended for a start task or any task. The default value is 0 (no + retries). + :vartype max_task_retry_count: int + :ivar wait_for_success: Whether the Batch service should wait for the StartTask to complete + successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute + Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to + its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully + after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks + to it. This condition can be detected via the Compute Node state and failure info details. If + false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks + can start executing on the Compute Node while the StartTask is still running; and even if the + StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is + true. + :vartype wait_for_success: bool + """ + + command_line: str = rest_field(name="commandLine") + """The command line of the StartTask. The command line does not run under a shell, and therefore + cannot take advantage of shell features such as environment variable expansion. If you want to + take advantage of such features, you should invoke the shell in the command line, for example + using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line + refers to file paths, it should use a relative path (relative to the Task working directory), + or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + Required.""" + container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") + """The settings for the container under which the StartTask runs. When this is specified, all + directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories + on the node) are mapped into the container, all Task environment variables are mapped into the + container, and the Task command line is executed in the container. Files produced in the + container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files.""" + resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="resourceFiles") + """A list of files that the Batch service will download to the Compute Node before running the + command line. There is a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If + this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved + using .zip files, Application Packages, or Docker Containers. Files listed under this element + are located in the Task's working directory.""" + environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field(name="environmentSettings") + """A list of environment variable settings for the StartTask.""" + user_identity: Optional["_models.UserIdentity"] = rest_field(name="userIdentity") + """The user identity under which the StartTask runs. If omitted, the Task runs as a + non-administrative user unique to the Task.""" + max_task_retry_count: Optional[int] = rest_field(name="maxTaskRetryCount") + """The maximum number of times the Task may be retried. The Batch service retries a Task if its + exit code is nonzero. Note that this value specifically controls the number of retries. The + Batch service will try the Task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). + If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum + retry count is -1, the Batch service retries the Task without limit, however this is not + recommended for a start task or any task. The default value is 0 (no retries).""" + wait_for_success: Optional[bool] = rest_field(name="waitForSuccess") + """Whether the Batch service should wait for the StartTask to complete successfully (that is, to + exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the + StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry + count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, + then the Batch service marks the Node unusable, and will not schedule Tasks to it. This + condition can be detected via the Compute Node state and failure info details. If false, the + Batch service will not wait for the StartTask to complete. In this case, other Tasks can start + executing on the Compute Node while the StartTask is still running; and even if the StartTask + fails, new Tasks will continue to be scheduled on the Compute Node. The default is true.""" + + @overload + def __init__( + self, + *, + command_line: str, + container_settings: Optional["_models.BatchTaskContainerSettings"] = None, + resource_files: Optional[List["_models.ResourceFile"]] = None, + environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + user_identity: Optional["_models.UserIdentity"] = None, + max_task_retry_count: Optional[int] = None, + wait_for_success: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchStartTaskInfo(_model_base.Model): + """Information about a StartTask running on a Compute Node. -class ExitConditions(Model): - """Specifies how the Batch service should respond when the Task completes. - :param exit_codes: - :type exit_codes: list[~azure.batch.models.ExitCodeMapping] - :param exit_code_ranges: - :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] - :param pre_processing_error: How the Batch service should respond if the - Task fails to start due to an error. - :type pre_processing_error: ~azure.batch.models.ExitOptions - :param file_upload_error: How the Batch service should respond if a file - upload error occurs. If the Task exited with an exit code that was - specified via exitCodes or exitCodeRanges, and then encountered a file - upload error, then the action specified by the exit code takes precedence. - :type file_upload_error: ~azure.batch.models.ExitOptions - :param default: How the Batch service should respond if the Task fails - with an exit condition not covered by any of the other properties. This - value is used if the Task exits with any nonzero exit code not listed in - the exitCodes or exitCodeRanges collection, with a pre-processing error if - the preProcessingError property is not present, or with a file upload - error if the fileUploadError property is not present. If you want - non-default behavior on exit code 0, you must list it explicitly using the - exitCodes or exitCodeRanges collection. - :type default: ~azure.batch.models.ExitOptions - """ - - _attribute_map = { - 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, - 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, - 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, - 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, - 'default': {'key': 'default', 'type': 'ExitOptions'}, - } - - def __init__(self, **kwargs): - super(ExitConditions, self).__init__(**kwargs) - self.exit_codes = kwargs.get('exit_codes', None) - self.exit_code_ranges = kwargs.get('exit_code_ranges', None) - self.pre_processing_error = kwargs.get('pre_processing_error', None) - self.file_upload_error = kwargs.get('file_upload_error', None) - self.default = kwargs.get('default', None) - - -class ExitOptions(Model): - """Specifies how the Batch service responds to a particular exit condition. + :ivar state: The state of the StartTask on the Compute Node. Required. Known values are: + "running" and "completed". + :vartype state: str or ~azure.batch.models.BatchStartTaskState + :ivar start_time: The time at which the StartTask started running. This value is reset every + time the Task is restarted or retried (that is, this is the most recent time at which the + StartTask started running). Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The time at which the StartTask stopped running. This is the end time of the + most recent run of the StartTask, if that run has completed (even if that run failed and a + retry is pending). This element is not present if the StartTask is currently running. + :vartype end_time: ~datetime.datetime + :ivar exit_code: The exit code of the program specified on the StartTask command line. This + property is set only if the StartTask is in the completed state. In general, the exit code for + a process reflects the specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be sure that you know + the exit code convention used by the application process. However, if the Batch service + terminates the StartTask (due to timeout, or user termination via the API) you may see an + operating system-defined exit code. + :vartype exit_code: int + :ivar container_info: Information about the container under which the Task is executing. This + property is set only if the Task runs in a container context. + :vartype container_info: ~azure.batch.models.BatchTaskContainerExecutionInfo + :ivar failure_info: Information describing the Task failure, if any. This property is set only + if the Task is in the completed state and encountered a failure. + :vartype failure_info: ~azure.batch.models.BatchTaskFailureInfo + :ivar retry_count: The number of times the Task has been retried by the Batch service. Task + application failures (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will retry the Task up to + the limit specified by the constraints. Required. + :vartype retry_count: int + :ivar last_retry_time: The most recent time at which a retry of the Task started running. This + element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this + is typically the same as startTime, but may be different if the Task has been restarted for + reasons other than retry; for example, if the Compute Node was rebooted during a retry, then + the startTime is updated but the lastRetryTime is not. + :vartype last_retry_time: ~datetime.datetime + :ivar result: The result of the Task execution. If the value is 'failed', then the details of + the failure can be found in the failureInfo property. Known values are: "success" and + "failure". + :vartype result: str or ~azure.batch.models.BatchTaskExecutionResult + """ + + state: Union[str, "_models.BatchStartTaskState"] = rest_field() + """The state of the StartTask on the Compute Node. Required. Known values are: \"running\" and + \"completed\".""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The time at which the StartTask started running. This value is reset every time the Task is + restarted or retried (that is, this is the most recent time at which the StartTask started + running). Required.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The time at which the StartTask stopped running. This is the end time of the most recent run of + the StartTask, if that run has completed (even if that run failed and a retry is pending). This + element is not present if the StartTask is currently running.""" + exit_code: Optional[int] = rest_field(name="exitCode") + """The exit code of the program specified on the StartTask command line. This property is set only + if the StartTask is in the completed state. In general, the exit code for a process reflects + the specific convention implemented by the application developer for that process. If you use + the exit code value to make decisions in your code, be sure that you know the exit code + convention used by the application process. However, if the Batch service terminates the + StartTask (due to timeout, or user termination via the API) you may see an operating + system-defined exit code.""" + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = rest_field(name="containerInfo") + """Information about the container under which the Task is executing. This property is set only if + the Task runs in a container context.""" + failure_info: Optional["_models.BatchTaskFailureInfo"] = rest_field(name="failureInfo") + """Information describing the Task failure, if any. This property is set only if the Task is in + the completed state and encountered a failure.""" + retry_count: int = rest_field(name="retryCount") + """The number of times the Task has been retried by the Batch service. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file + upload errors are not retried. The Batch service will retry the Task up to the limit specified + by the constraints. Required.""" + last_retry_time: Optional[datetime.datetime] = rest_field(name="lastRetryTime", format="rfc3339") + """The most recent time at which a retry of the Task started running. This element is present only + if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons other than retry; + for example, if the Compute Node was rebooted during a retry, then the startTime is updated but + the lastRetryTime is not.""" + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = rest_field() + """The result of the Task execution. If the value is 'failed', then the details of the failure can + be found in the failureInfo property. Known values are: \"success\" and \"failure\".""" + + @overload + def __init__( + self, + *, + state: Union[str, "_models.BatchStartTaskState"], + start_time: datetime.datetime, + retry_count: int, + end_time: Optional[datetime.datetime] = None, + exit_code: Optional[int] = None, + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = None, + failure_info: Optional["_models.BatchTaskFailureInfo"] = None, + last_retry_time: Optional[datetime.datetime] = None, + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchSubtask(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Information about an Azure Batch subtask. - :param job_action: The default is none for exit code 0 and terminate for - all other exit conditions. If the Job's onTaskFailed property is noaction, - then specifying this property returns an error and the add Task request - fails with an invalid property value error; if you are calling the REST - API directly, the HTTP status code is 400 (Bad Request). Possible values - include: 'none', 'disable', 'terminate' - :type job_action: str or ~azure.batch.models.JobAction - :param dependency_action: Possible values are 'satisfy' (allowing - dependent tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent tasks. - Possible values include: 'satisfy', 'block' - :type dependency_action: str or ~azure.batch.models.DependencyAction - """ - - _attribute_map = { - 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, - 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, - } - - def __init__(self, **kwargs): - super(ExitOptions, self).__init__(**kwargs) - self.job_action = kwargs.get('job_action', None) - self.dependency_action = kwargs.get('dependency_action', None) - - -class FileDeleteFromComputeNodeOptions(Model): - """Additional parameters for delete_from_compute_node operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class FileDeleteFromTaskOptions(Model): - """Additional parameters for delete_from_task operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileDeleteFromTaskOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class FileGetFromComputeNodeOptions(Model): - """Additional parameters for get_from_compute_node operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param ocp_range: The byte range to be retrieved. The default is to - retrieve the entire file. The format is bytes=startRange-endRange. - :type ocp_range: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'ocp_range': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.ocp_range = kwargs.get('ocp_range', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class FileGetFromTaskOptions(Model): - """Additional parameters for get_from_task operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param ocp_range: The byte range to be retrieved. The default is to - retrieve the entire file. The format is bytes=startRange-endRange. - :type ocp_range: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'ocp_range': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileGetFromTaskOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.ocp_range = kwargs.get('ocp_range', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class FileGetPropertiesFromComputeNodeOptions(Model): - """Additional parameters for get_properties_from_compute_node operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class FileGetPropertiesFromTaskOptions(Model): - """Additional parameters for get_properties_from_task operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class FileListFromComputeNodeOptions(Model): - """Additional parameters for list_from_compute_node operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 files can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileListFromComputeNodeOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class FileListFromTaskOptions(Model): - """Additional parameters for list_from_task operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 files can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(FileListFromTaskOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class FileProperties(Model): - """The properties of a file on a Compute Node. + :ivar id: The ID of the subtask. + :vartype id: int + :ivar node_info: Information about the Compute Node on which the subtask ran. + :vartype node_info: ~azure.batch.models.BatchNodeInfo + :ivar start_time: The time at which the subtask started running. If the subtask has been + restarted or retried, this is the most recent time at which the subtask started running. + :vartype start_time: ~datetime.datetime + :ivar end_time: The time at which the subtask completed. This property is set only if the + subtask is in the Completed state. + :vartype end_time: ~datetime.datetime + :ivar exit_code: The exit code of the program specified on the subtask command line. This + property is set only if the subtask is in the completed state. In general, the exit code for a + process reflects the specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be sure that you know + the exit code convention used by the application process. However, if the Batch service + terminates the subtask (due to timeout, or user termination via the API) you may see an + operating system-defined exit code. + :vartype exit_code: int + :ivar container_info: Information about the container under which the Task is executing. This + property is set only if the Task runs in a container context. + :vartype container_info: ~azure.batch.models.BatchTaskContainerExecutionInfo + :ivar failure_info: Information describing the Task failure, if any. This property is set only + if the Task is in the completed state and encountered a failure. + :vartype failure_info: ~azure.batch.models.BatchTaskFailureInfo + :ivar state: The current state of the subtask. Known values are: "preparing", "running", and + "completed". + :vartype state: str or ~azure.batch.models.BatchSubtaskState + :ivar state_transition_time: The time at which the subtask entered its current state. + :vartype state_transition_time: ~datetime.datetime + :ivar previous_state: The previous state of the subtask. This property is not set if the + subtask is in its initial running state. Known values are: "preparing", "running", and + "completed". + :vartype previous_state: str or ~azure.batch.models.BatchSubtaskState + :ivar previous_state_transition_time: The time at which the subtask entered its previous state. + This property is not set if the subtask is in its initial running state. + :vartype previous_state_transition_time: ~datetime.datetime + :ivar result: The result of the Task execution. If the value is 'failed', then the details of + the failure can be found in the failureInfo property. Known values are: "success" and + "failure". + :vartype result: str or ~azure.batch.models.BatchTaskExecutionResult + """ + + id: Optional[int] = rest_field() + """The ID of the subtask.""" + node_info: Optional["_models.BatchNodeInfo"] = rest_field(name="nodeInfo") + """Information about the Compute Node on which the subtask ran.""" + start_time: Optional[datetime.datetime] = rest_field(name="startTime", format="rfc3339") + """The time at which the subtask started running. If the subtask has been restarted or retried, + this is the most recent time at which the subtask started running.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The time at which the subtask completed. This property is set only if the subtask is in the + Completed state.""" + exit_code: Optional[int] = rest_field(name="exitCode") + """The exit code of the program specified on the subtask command line. This property is set only + if the subtask is in the completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that process. If you use the + exit code value to make decisions in your code, be sure that you know the exit code convention + used by the application process. However, if the Batch service terminates the subtask (due to + timeout, or user termination via the API) you may see an operating system-defined exit code.""" + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = rest_field(name="containerInfo") + """Information about the container under which the Task is executing. This property is set only if + the Task runs in a container context.""" + failure_info: Optional["_models.BatchTaskFailureInfo"] = rest_field(name="failureInfo") + """Information describing the Task failure, if any. This property is set only if the Task is in + the completed state and encountered a failure.""" + state: Optional[Union[str, "_models.BatchSubtaskState"]] = rest_field() + """The current state of the subtask. Known values are: \"preparing\", \"running\", and + \"completed\".""" + state_transition_time: Optional[datetime.datetime] = rest_field(name="stateTransitionTime", format="rfc3339") + """The time at which the subtask entered its current state.""" + previous_state: Optional[Union[str, "_models.BatchSubtaskState"]] = rest_field(name="previousState") + """The previous state of the subtask. This property is not set if the subtask is in its initial + running state. Known values are: \"preparing\", \"running\", and \"completed\".""" + previous_state_transition_time: Optional[datetime.datetime] = rest_field( + name="previousStateTransitionTime", format="rfc3339" + ) + """The time at which the subtask entered its previous state. This property is not set if the + subtask is in its initial running state.""" + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = rest_field() + """The result of the Task execution. If the value is 'failed', then the details of the failure can + be found in the failureInfo property. Known values are: \"success\" and \"failure\".""" + + @overload + def __init__( + self, + *, + id: Optional[int] = None, # pylint: disable=redefined-builtin + node_info: Optional["_models.BatchNodeInfo"] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + exit_code: Optional[int] = None, + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = None, + failure_info: Optional["_models.BatchTaskFailureInfo"] = None, + state: Optional[Union[str, "_models.BatchSubtaskState"]] = None, + state_transition_time: Optional[datetime.datetime] = None, + previous_state: Optional[Union[str, "_models.BatchSubtaskState"]] = None, + previous_state_transition_time: Optional[datetime.datetime] = None, + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchSupportedImage(_model_base.Model): + """A reference to the Azure Virtual Machines Marketplace Image and additional + information about the Image. - All required parameters must be populated in order to send to Azure. - - :param creation_time: The creation time is not returned for files on Linux - Compute Nodes. - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param content_length: Required. The length of the file. - :type content_length: long - :param content_type: - :type content_type: str - :param file_mode: The file mode is returned only for files on Linux - Compute Nodes. - :type file_mode: str - """ - - _validation = { - 'last_modified': {'required': True}, - 'content_length': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'file_mode': {'key': 'fileMode', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(FileProperties, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs.get('last_modified', None) - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.file_mode = kwargs.get('file_mode', None) - - -class HttpHeader(Model): - """An HTTP header name-value pair. - All required parameters must be populated in order to send to Azure. + :ivar node_agent_sku_id: The ID of the Compute Node agent SKU which the Image supports. + Required. + :vartype node_agent_sku_id: str + :ivar image_reference: The reference to the Azure Virtual Machine's Marketplace Image. + Required. + :vartype image_reference: ~azure.batch.models.ImageReference + :ivar os_type: The type of operating system (e.g. Windows or Linux) of the Image. Required. + Known values are: "linux" and "windows". + :vartype os_type: str or ~azure.batch.models.OSType + :ivar capabilities: The capabilities or features which the Image supports. Not every capability + of the Image is listed. Capabilities in this list are considered of special interest and are + generally related to integration with other features in the Azure Batch service. + :vartype capabilities: list[str] + :ivar batch_support_end_of_life: The time when the Azure Batch service will stop accepting + create Pool requests for the Image. + :vartype batch_support_end_of_life: ~datetime.datetime + :ivar verification_type: Whether the Azure Batch service actively verifies that the Image is + compatible with the associated Compute Node agent SKU. Required. Known values are: "verified" + and "unverified". + :vartype verification_type: str or ~azure.batch.models.ImageVerificationType + """ + + node_agent_sku_id: str = rest_field(name="nodeAgentSKUId") + """The ID of the Compute Node agent SKU which the Image supports. Required.""" + image_reference: "_models.ImageReference" = rest_field(name="imageReference") + """The reference to the Azure Virtual Machine's Marketplace Image. Required.""" + os_type: Union[str, "_models.OSType"] = rest_field(name="osType") + """The type of operating system (e.g. Windows or Linux) of the Image. Required. Known values are: + \"linux\" and \"windows\".""" + capabilities: Optional[List[str]] = rest_field() + """The capabilities or features which the Image supports. Not every capability of the Image is + listed. Capabilities in this list are considered of special interest and are generally related + to integration with other features in the Azure Batch service.""" + batch_support_end_of_life: Optional[datetime.datetime] = rest_field(name="batchSupportEndOfLife", format="rfc3339") + """The time when the Azure Batch service will stop accepting create Pool requests for the Image.""" + verification_type: Union[str, "_models.ImageVerificationType"] = rest_field(name="verificationType") + """Whether the Azure Batch service actively verifies that the Image is compatible with the + associated Compute Node agent SKU. Required. Known values are: \"verified\" and \"unverified\".""" + + @overload + def __init__( + self, + *, + node_agent_sku_id: str, + image_reference: "_models.ImageReference", + os_type: Union[str, "_models.OSType"], + verification_type: Union[str, "_models.ImageVerificationType"], + capabilities: Optional[List[str]] = None, + batch_support_end_of_life: Optional[datetime.datetime] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTask(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + Retries due to recovery operations are independent of and are not counted + against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + retry due to a recovery operation may occur. Because of this, all Tasks should + be idempotent. This means Tasks need to tolerate being interrupted and + restarted without causing any corruption or duplicate data. The best practice + for long running Tasks is to use some form of checkpointing. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: A string that uniquely identifies the Task within the Job. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. + :vartype id: str + :ivar display_name: A display name for the Task. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar url: The URL of the Task. + :vartype url: str + :ivar e_tag: The ETag of the Task. This is an opaque string. You can use it to detect whether + the Task has changed between requests. In particular, you can be pass the ETag when updating a + Task to specify that your changes should take effect only if nobody else has modified the Task + in the meantime. + :vartype e_tag: str + :ivar last_modified: The last modified time of the Task. + :vartype last_modified: ~datetime.datetime + :ivar creation_time: The creation time of the Task. + :vartype creation_time: ~datetime.datetime + :ivar exit_conditions: How the Batch service should respond when the Task completes. + :vartype exit_conditions: ~azure.batch.models.ExitConditions + :ivar state: The current state of the Task. Known values are: "active", "preparing", "running", + and "completed". + :vartype state: str or ~azure.batch.models.BatchTaskState + :ivar state_transition_time: The time at which the Task entered its current state. + :vartype state_transition_time: ~datetime.datetime + :ivar previous_state: The previous state of the Task. This property is not set if the Task is + in its initial Active state. Known values are: "active", "preparing", "running", and + "completed". + :vartype previous_state: str or ~azure.batch.models.BatchTaskState + :ivar previous_state_transition_time: The time at which the Task entered its previous state. + This property is not set if the Task is in its initial Active state. + :vartype previous_state_transition_time: ~datetime.datetime + :ivar command_line: The command line of the Task. For multi-instance Tasks, the command line is + executed as the primary Task, after the primary Task and all subtasks have finished executing + the coordination command line. The command line does not run under a shell, and therefore + cannot take advantage of shell features such as environment variable expansion. If you want to + take advantage of such features, you should invoke the shell in the command line, for example + using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line + refers to file paths, it should use a relative path (relative to the Task working directory), + or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :vartype command_line: str + :ivar container_settings: The settings for the container under which the Task runs. If the Pool + that will run this Task has containerConfiguration set, this must be set as well. If the Pool + that will run this Task doesn't have containerConfiguration set, this must not be set. When + this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of + Azure Batch directories on the node) are mapped into the container, all Task environment + variables are mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be + reflected to the host disk, meaning that Batch file APIs will not be able to access those + files. + :vartype container_settings: ~azure.batch.models.BatchTaskContainerSettings + :ivar resource_files: A list of files that the Batch service will download to the Compute Node + before running the command line. For multi-instance Tasks, the resource files will only be + downloaded to the Compute Node on which the primary Task is executed. There is a maximum size + for the list of resource files. When the max size is exceeded, the request will fail and the + response error code will be RequestEntityTooLarge. If this occurs, the collection of + ResourceFiles must be reduced in size. This can be achieved using .zip files, Application + Packages, or Docker Containers. + :vartype resource_files: list[~azure.batch.models.ResourceFile] + :ivar output_files: A list of files that the Batch service will upload from the Compute Node + after running the command line. For multi-instance Tasks, the files will only be uploaded from + the Compute Node on which the primary Task is executed. + :vartype output_files: list[~azure.batch.models.OutputFile] + :ivar environment_settings: A list of environment variable settings for the Task. + :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar affinity_info: A locality hint that can be used by the Batch service to select a Compute + Node on which to start the new Task. + :vartype affinity_info: ~azure.batch.models.AffinityInfo + :ivar constraints: The execution constraints that apply to this Task. + :vartype constraints: ~azure.batch.models.BatchTaskConstraints + :ivar required_slots: The number of scheduling slots that the Task requires to run. The default + is 1. A Task can only be scheduled to run on a compute node if the node has enough free + scheduling slots available. For multi-instance Tasks, this must be 1. + :vartype required_slots: int + :ivar user_identity: The user identity under which the Task runs. If omitted, the Task runs as + a non-administrative user unique to the Task. + :vartype user_identity: ~azure.batch.models.UserIdentity + :ivar execution_info: Information about the execution of the Task. + :vartype execution_info: ~azure.batch.models.BatchTaskExecutionInfo + :ivar node_info: Information about the Compute Node on which the Task ran. + :vartype node_info: ~azure.batch.models.BatchNodeInfo + :ivar multi_instance_settings: An object that indicates that the Task is a multi-instance Task, + and contains information about how to run the multi-instance Task. + :vartype multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :ivar stats: Resource usage statistics for the Task. + :vartype stats: ~azure.batch.models.BatchTaskStatistics + :ivar depends_on: The Tasks that this Task depends on. This Task will not be scheduled until + all Tasks that it depends on have completed successfully. If any of those Tasks fail and + exhaust their retry counts, this Task will never be scheduled. + :vartype depends_on: ~azure.batch.models.BatchTaskDependencies + :ivar application_package_references: A list of Packages that the Batch service will deploy to + the Compute Node before running the command line. Application packages are downloaded and + deployed to a shared directory, not the Task working directory. Therefore, if a referenced + package is already on the Node, and is up to date, then it is not re-downloaded; the existing + copy on the Compute Node is used. If a referenced Package cannot be installed, for example + because the package has been deleted or because download failed, the Task fails. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar authentication_token_settings: The settings for an authentication token that the Task can + use to perform Batch service operations. If this property is set, the Batch service provides + the Task with an authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided via the + AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out + using the token depend on the settings. For example, a Task can request Job permissions in + order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the + Job. + :vartype authentication_token_settings: ~azure.batch.models.AuthenticationTokenSettings + """ + + id: Optional[str] = rest_field(visibility=["read"]) + """A string that uniquely identifies the Task within the Job. The ID can contain any combination + of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 + characters.""" + display_name: Optional[str] = rest_field(name="displayName", visibility=["read"]) + """A display name for the Task. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + url: Optional[str] = rest_field(visibility=["read"]) + """The URL of the Task.""" + e_tag: Optional[str] = rest_field(name="eTag", visibility=["read"]) + """The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has + changed between requests. In particular, you can be pass the ETag when updating a Task to + specify that your changes should take effect only if nobody else has modified the Task in the + meantime.""" + last_modified: Optional[datetime.datetime] = rest_field(name="lastModified", visibility=["read"], format="rfc3339") + """The last modified time of the Task.""" + creation_time: Optional[datetime.datetime] = rest_field(name="creationTime", visibility=["read"], format="rfc3339") + """The creation time of the Task.""" + exit_conditions: Optional["_models.ExitConditions"] = rest_field(name="exitConditions", visibility=["read"]) + """How the Batch service should respond when the Task completes.""" + state: Optional[Union[str, "_models.BatchTaskState"]] = rest_field(visibility=["read"]) + """The current state of the Task. Known values are: \"active\", \"preparing\", \"running\", and + \"completed\".""" + state_transition_time: Optional[datetime.datetime] = rest_field( + name="stateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Task entered its current state.""" + previous_state: Optional[Union[str, "_models.BatchTaskState"]] = rest_field( + name="previousState", visibility=["read"] + ) + """The previous state of the Task. This property is not set if the Task is in its initial Active + state. Known values are: \"active\", \"preparing\", \"running\", and \"completed\".""" + previous_state_transition_time: Optional[datetime.datetime] = rest_field( + name="previousStateTransitionTime", visibility=["read"], format="rfc3339" + ) + """The time at which the Task entered its previous state. This property is not set if the Task is + in its initial Active state.""" + command_line: Optional[str] = rest_field(name="commandLine", visibility=["read"]) + """The command line of the Task. For multi-instance Tasks, the command line is executed as the + primary Task, after the primary Task and all subtasks have finished executing the coordination + command line. The command line does not run under a shell, and therefore cannot take advantage + of shell features such as environment variable expansion. If you want to take advantage of such + features, you should invoke the shell in the command line, for example using \"cmd /c + MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working directory), or use the Batch + provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).""" + container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field( + name="containerSettings", visibility=["read"] + ) + """The settings for the container under which the Task runs. If the Pool that will run this Task + has containerConfiguration set, this must be set as well. If the Pool that will run this Task + doesn't have containerConfiguration set, this must not be set. When this is specified, all + directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories + on the node) are mapped into the container, all Task environment variables are mapped into the + container, and the Task command line is executed in the container. Files produced in the + container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files.""" + resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="resourceFiles", visibility=["read"]) + """A list of files that the Batch service will download to the Compute Node before running the + command line. For multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum size for the list of + resource files. When the max size is exceeded, the request will fail and the response error + code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be + reduced in size. This can be achieved using .zip files, Application Packages, or Docker + Containers.""" + output_files: Optional[List["_models.OutputFile"]] = rest_field(name="outputFiles", visibility=["read"]) + """A list of files that the Batch service will upload from the Compute Node after running the + command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node + on which the primary Task is executed.""" + environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field( + name="environmentSettings", visibility=["read"] + ) + """A list of environment variable settings for the Task.""" + affinity_info: Optional["_models.AffinityInfo"] = rest_field(name="affinityInfo", visibility=["read"]) + """A locality hint that can be used by the Batch service to select a Compute Node on which to + start the new Task.""" + constraints: Optional["_models.BatchTaskConstraints"] = rest_field() + """The execution constraints that apply to this Task.""" + required_slots: Optional[int] = rest_field(name="requiredSlots", visibility=["read"]) + """The number of scheduling slots that the Task requires to run. The default is 1. A Task can only + be scheduled to run on a compute node if the node has enough free scheduling slots available. + For multi-instance Tasks, this must be 1.""" + user_identity: Optional["_models.UserIdentity"] = rest_field(name="userIdentity", visibility=["read"]) + """The user identity under which the Task runs. If omitted, the Task runs as a non-administrative + user unique to the Task.""" + execution_info: Optional["_models.BatchTaskExecutionInfo"] = rest_field(name="executionInfo", visibility=["read"]) + """Information about the execution of the Task.""" + node_info: Optional["_models.BatchNodeInfo"] = rest_field(name="nodeInfo", visibility=["read"]) + """Information about the Compute Node on which the Task ran.""" + multi_instance_settings: Optional["_models.MultiInstanceSettings"] = rest_field( + name="multiInstanceSettings", visibility=["read"] + ) + """An object that indicates that the Task is a multi-instance Task, and contains information about + how to run the multi-instance Task.""" + stats: Optional["_models.BatchTaskStatistics"] = rest_field(visibility=["read"]) + """Resource usage statistics for the Task.""" + depends_on: Optional["_models.BatchTaskDependencies"] = rest_field(name="dependsOn", visibility=["read"]) + """The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it + depends on have completed successfully. If any of those Tasks fail and exhaust their retry + counts, this Task will never be scheduled.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences", visibility=["read"] + ) + """A list of Packages that the Batch service will deploy to the Compute Node before running the + command line. Application packages are downloaded and deployed to a shared directory, not the + Task working directory. Therefore, if a referenced package is already on the Node, and is up to + date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a + referenced Package cannot be installed, for example because the package has been deleted or + because download failed, the Task fails.""" + authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = rest_field( + name="authenticationTokenSettings", visibility=["read"] + ) + """The settings for an authentication token that the Task can use to perform Batch service + operations. If this property is set, the Batch service provides the Task with an authentication + token which can be used to authenticate Batch service operations without requiring an Account + access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. + The operations that the Task can carry out using the token depend on the settings. For example, + a Task can request Job permissions in order to add other Tasks to the Job, or check the status + of the Job or of other Tasks under the Job.""" + + @overload + def __init__( + self, + *, + constraints: Optional["_models.BatchTaskConstraints"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskAddCollectionResult(_model_base.Model): + """The result of adding a collection of Tasks to a Job. - :param name: Required. - :type name: str - :param value: - :type value: str + :ivar value: The results of the add Task collection operation. + :vartype value: list[~azure.batch.models.BatchTaskAddResult] """ - _validation = { - 'name': {'required': True}, - } + value: Optional[List["_models.BatchTaskAddResult"]] = rest_field() + """The results of the add Task collection operation.""" - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } + @overload + def __init__( + self, + *, + value: Optional[List["_models.BatchTaskAddResult"]] = None, + ): ... - def __init__(self, **kwargs): - super(HttpHeader, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.value = kwargs.get('value', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class ImageInformation(Model): - """A reference to the Azure Virtual Machines Marketplace Image and additional - information about the Image. - All required parameters must be populated in order to send to Azure. - - :param node_agent_sku_id: Required. - :type node_agent_sku_id: str - :param image_reference: Required. The reference to the Azure Virtual - Machine's Marketplace Image. - :type image_reference: ~azure.batch.models.ImageReference - :param os_type: Required. Possible values include: 'linux', 'windows' - :type os_type: str or ~azure.batch.models.OSType - :param capabilities: Not every capability of the Image is listed. - Capabilities in this list are considered of special interest and are - generally related to integration with other features in the Azure Batch - service. - :type capabilities: list[str] - :param batch_support_end_of_life: - :type batch_support_end_of_life: datetime - :param verification_type: Required. Possible values include: 'verified', - 'unverified' - :type verification_type: str or ~azure.batch.models.VerificationType - """ - - _validation = { - 'node_agent_sku_id': {'required': True}, - 'image_reference': {'required': True}, - 'os_type': {'required': True}, - 'verification_type': {'required': True}, - } - - _attribute_map = { - 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'os_type': {'key': 'osType', 'type': 'OSType'}, - 'capabilities': {'key': 'capabilities', 'type': '[str]'}, - 'batch_support_end_of_life': {'key': 'batchSupportEndOfLife', 'type': 'iso-8601'}, - 'verification_type': {'key': 'verificationType', 'type': 'VerificationType'}, - } - - def __init__(self, **kwargs): - super(ImageInformation, self).__init__(**kwargs) - self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) - self.image_reference = kwargs.get('image_reference', None) - self.os_type = kwargs.get('os_type', None) - self.capabilities = kwargs.get('capabilities', None) - self.batch_support_end_of_life = kwargs.get('batch_support_end_of_life', None) - self.verification_type = kwargs.get('verification_type', None) - - -class ImageReference(Model): - """A reference to an Azure Virtual Machines Marketplace Image or a Azure - Compute Gallery Image. To get the list of all Azure Marketplace Image - references verified by Azure Batch, see the 'List Supported Images' - operation. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param publisher: For example, Canonical or MicrosoftWindowsServer. - :type publisher: str - :param offer: For example, UbuntuServer or WindowsServer. - :type offer: str - :param sku: For example, 18.04-LTS or 2019-Datacenter. - :type sku: str - :param version: A value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - :type version: str - :param virtual_machine_image_id: This property is mutually exclusive with - other ImageReference properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same subscription as the - Azure Batch account. If the image version is not specified in the imageId, - the latest version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - :type virtual_machine_image_id: str - :ivar exact_version: The specific version of the platform image or - marketplace image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when the pool was - created was 'latest'. - :vartype exact_version: str - """ - - _validation = { - 'exact_version': {'readonly': True}, - } - - _attribute_map = { - 'publisher': {'key': 'publisher', 'type': 'str'}, - 'offer': {'key': 'offer', 'type': 'str'}, - 'sku': {'key': 'sku', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, - 'exact_version': {'key': 'exactVersion', 'type': 'str'}, - } +class BatchTaskAddResult(_model_base.Model): + """Result for a single Task added as part of an add Task collection operation. - def __init__(self, **kwargs): - super(ImageReference, self).__init__(**kwargs) - self.publisher = kwargs.get('publisher', None) - self.offer = kwargs.get('offer', None) - self.sku = kwargs.get('sku', None) - self.version = kwargs.get('version', None) - self.virtual_machine_image_id = kwargs.get('virtual_machine_image_id', None) - self.exact_version = None + :ivar status: The status of the add Task request. Required. Known values are: "success", + "clienterror", and "servererror". + :vartype status: str or ~azure.batch.models.BatchTaskAddStatus + :ivar task_id: The ID of the Task for which this is the result. Required. + :vartype task_id: str + :ivar e_tag: The ETag of the Task, if the Task was successfully added. You can use this to + detect whether the Task has changed between requests. In particular, you can be pass the ETag + with an Update Task request to specify that your changes should take effect only if nobody else + has modified the Job in the meantime. + :vartype e_tag: str + :ivar last_modified: The last modified time of the Task. + :vartype last_modified: ~datetime.datetime + :ivar location: The URL of the Task, if the Task was successfully added. + :vartype location: str + :ivar error: The error encountered while attempting to add the Task. + :vartype error: ~azure.batch.models.BatchError + """ + + status: Union[str, "_models.BatchTaskAddStatus"] = rest_field() + """The status of the add Task request. Required. Known values are: \"success\", \"clienterror\", + and \"servererror\".""" + task_id: str = rest_field(name="taskId") + """The ID of the Task for which this is the result. Required.""" + e_tag: Optional[str] = rest_field(name="eTag") + """The ETag of the Task, if the Task was successfully added. You can use this to detect whether + the Task has changed between requests. In particular, you can be pass the ETag with an Update + Task request to specify that your changes should take effect only if nobody else has modified + the Job in the meantime.""" + last_modified: Optional[datetime.datetime] = rest_field(name="lastModified", format="rfc3339") + """The last modified time of the Task.""" + location: Optional[str] = rest_field() + """The URL of the Task, if the Task was successfully added.""" + error: Optional["_models.BatchError"] = rest_field() + """The error encountered while attempting to add the Task.""" + + @overload + def __init__( + self, + *, + status: Union[str, "_models.BatchTaskAddStatus"], + task_id: str, + e_tag: Optional[str] = None, + last_modified: Optional[datetime.datetime] = None, + location: Optional[str] = None, + error: Optional["_models.BatchError"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskConstraints(_model_base.Model): + """Execution constraints to apply to a Task. -class InboundEndpoint(Model): - """An inbound endpoint on a Compute Node. + :ivar max_wall_clock_time: The maximum elapsed time that the Task may run, measured from the + time the Task starts. If the Task does not complete within the time limit, the Batch service + terminates it. If this is not specified, there is no time limit on how long the Task may run. + :vartype max_wall_clock_time: ~datetime.timedelta + :ivar retention_time: The minimum time to retain the Task directory on the Compute Node where + it ran, from the time it completes execution. After this time, the Batch service may delete the + Task directory and all its contents. The default is 7 days, i.e. the Task directory will be + retained for 7 days unless the Compute Node is removed or the Job is deleted. + :vartype retention_time: ~datetime.timedelta + :ivar max_task_retry_count: The maximum number of times the Task may be retried. The Batch + service retries a Task if its exit code is nonzero. Note that this value specifically controls + the number of retries for the Task executable due to a nonzero exit code. The Batch service + will try the Task once, and may then retry up to this limit. For example, if the maximum retry + count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum + retry count is 0, the Batch service does not retry the Task after the first attempt. If the + maximum retry count is -1, the Batch service retries the Task without limit, however this is + not recommended for a start task or any task. The default value is 0 (no retries). + :vartype max_task_retry_count: int + """ + + max_wall_clock_time: Optional[datetime.timedelta] = rest_field(name="maxWallClockTime") + """The maximum elapsed time that the Task may run, measured from the time the Task starts. If the + Task does not complete within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the Task may run.""" + retention_time: Optional[datetime.timedelta] = rest_field(name="retentionTime") + """The minimum time to retain the Task directory on the Compute Node where it ran, from the time + it completes execution. After this time, the Batch service may delete the Task directory and + all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days + unless the Compute Node is removed or the Job is deleted.""" + max_task_retry_count: Optional[int] = rest_field(name="maxTaskRetryCount") + """The maximum number of times the Task may be retried. The Batch service retries a Task if its + exit code is nonzero. Note that this value specifically controls the number of retries for the + Task executable due to a nonzero exit code. The Batch service will try the Task once, and may + then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task + up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch + service does not retry the Task after the first attempt. If the maximum retry count is -1, the + Batch service retries the Task without limit, however this is not recommended for a start task + or any task. The default value is 0 (no retries).""" + + @overload + def __init__( + self, + *, + max_wall_clock_time: Optional[datetime.timedelta] = None, + retention_time: Optional[datetime.timedelta] = None, + max_task_retry_count: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskContainerExecutionInfo(_model_base.Model): + """Contains information about the container which a Task is executing. - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param protocol: Required. The protocol of the endpoint. Possible values - include: 'tcp', 'udp' - :type protocol: str or ~azure.batch.models.InboundEndpointProtocol - :param public_ip_address: Required. - :type public_ip_address: str - :param public_fqdn: Required. - :type public_fqdn: str - :param frontend_port: Required. The public port number of the endpoint. - :type frontend_port: int - :param backend_port: Required. The backend port number of the endpoint. - :type backend_port: int - """ - - _validation = { - 'name': {'required': True}, - 'protocol': {'required': True}, - 'public_ip_address': {'required': True}, - 'public_fqdn': {'required': True}, - 'frontend_port': {'required': True}, - 'backend_port': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, - 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, - 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, - 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, - 'backend_port': {'key': 'backendPort', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(InboundEndpoint, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.protocol = kwargs.get('protocol', None) - self.public_ip_address = kwargs.get('public_ip_address', None) - self.public_fqdn = kwargs.get('public_fqdn', None) - self.frontend_port = kwargs.get('frontend_port', None) - self.backend_port = kwargs.get('backend_port', None) - - -class InboundNATPool(Model): - """A inbound NAT Pool that can be used to address specific ports on Compute - Nodes in a Batch Pool externally. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name must be unique within a Batch Pool, can - contain letters, numbers, underscores, periods, and hyphens. Names must - start with a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid values are - provided the request fails with HTTP status code 400. - :type name: str - :param protocol: Required. The protocol of the endpoint. Possible values - include: 'tcp', 'udp' - :type protocol: str or ~azure.batch.models.InboundEndpointProtocol - :param backend_port: Required. The port number on the Compute Node. This - must be unique within a Batch Pool. Acceptable values are between 1 and - 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - reserved values are provided the request fails with HTTP status code 400. - :type backend_port: int - :param frontend_port_range_start: Required. The first port number in the - range of external ports that will be used to provide inbound access to the - backendPort on individual Compute Nodes. Acceptable values range between 1 - and 65534 except ports from 50000 to 55000 which are reserved. All ranges - within a Pool must be distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping values are provided the - request fails with HTTP status code 400. - :type frontend_port_range_start: int - :param frontend_port_range_end: Required. The last port number in the - range of external ports that will be used to provide inbound access to the - backendPort on individual Compute Nodes. Acceptable values range between 1 - and 65534 except ports from 50000 to 55000 which are reserved by the Batch - service. All ranges within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP status code 400. - :type frontend_port_range_end: int - :param network_security_group_rules: The maximum number of rules that can - be specified across all the endpoints on a Batch Pool is 25. If no network - security group rules are specified, a default rule will be created to - allow inbound access to the specified backendPort. If the maximum number - of network security group rules is exceeded the request fails with HTTP - status code 400. - :type network_security_group_rules: - list[~azure.batch.models.NetworkSecurityGroupRule] - """ - - _validation = { - 'name': {'required': True}, - 'protocol': {'required': True}, - 'backend_port': {'required': True}, - 'frontend_port_range_start': {'required': True}, - 'frontend_port_range_end': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, - 'backend_port': {'key': 'backendPort', 'type': 'int'}, - 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, - 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, - 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, - } - - def __init__(self, **kwargs): - super(InboundNATPool, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.protocol = kwargs.get('protocol', None) - self.backend_port = kwargs.get('backend_port', None) - self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) - self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) - self.network_security_group_rules = kwargs.get('network_security_group_rules', None) - - -class InstanceViewStatus(Model): - """The instance view status. + :ivar container_id: The ID of the container. + :vartype container_id: str + :ivar state: The state of the container. This is the state of the container according to the + Docker service. It is equivalent to the status field returned by "docker inspect". + :vartype state: str + :ivar error: Detailed error information about the container. This is the detailed error string + from the Docker service, if available. It is equivalent to the error field returned by "docker + inspect". + :vartype error: str + """ + + container_id: Optional[str] = rest_field(name="containerId") + """The ID of the container.""" + state: Optional[str] = rest_field() + """The state of the container. This is the state of the container according to the Docker service. + It is equivalent to the status field returned by \"docker inspect\".""" + error: Optional[str] = rest_field() + """Detailed error information about the container. This is the detailed error string from the + Docker service, if available. It is equivalent to the error field returned by \"docker + inspect\".""" + + @overload + def __init__( + self, + *, + container_id: Optional[str] = None, + state: Optional[str] = None, + error: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskContainerSettings(_model_base.Model): + """The container settings for a Task. - :param code: - :type code: str - :param display_status: - :type display_status: str - :param level: Possible values include: 'Error', 'Info', 'Warning' - :type level: str or ~azure.batch.models.StatusLevelTypes - :param message: - :type message: str - :param time: The time of the status. - :type time: str - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'display_status': {'key': 'displayStatus', 'type': 'str'}, - 'level': {'key': 'level', 'type': 'StatusLevelTypes'}, - 'message': {'key': 'message', 'type': 'str'}, - 'time': {'key': 'time', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(InstanceViewStatus, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.display_status = kwargs.get('display_status', None) - self.level = kwargs.get('level', None) - self.message = kwargs.get('message', None) - self.time = kwargs.get('time', None) - - -class JobAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobAddOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobAddParameter(Model): - """An Azure Batch Job to add. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - :type priority: int - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param constraints: The execution constraints for the Job. - :type constraints: ~azure.batch.models.JobConstraints - :param job_manager_task: Details of a Job Manager Task to be launched when - the Job is started. If the Job does not specify a Job Manager Task, the - user must explicitly add Tasks to the Job. If the Job does specify a Job - Manager Task, the Batch service creates the Job Manager Task when the Job - is created, and will try to schedule the Job Manager Task before - scheduling other Tasks in the Job. The Job Manager Task's typical purpose - is to control and/or monitor Job execution, for example by deciding what - additional Tasks to run, determining when the work is complete, etc. - (However, a Job Manager Task is not restricted to these activities - it is - a fully-fledged Task in the system and perform whatever actions are - required for the Job.) For example, a Job Manager Task might download a - file specified as a parameter, analyze the contents of that file and - submit additional Tasks based on those contents. - :type job_manager_task: ~azure.batch.models.JobManagerTask - :param job_preparation_task: The Job Preparation Task. If a Job has a Job - Preparation Task, the Batch service will run the Job Preparation Task on a - Node before starting any Tasks of that Job on that Compute Node. - :type job_preparation_task: ~azure.batch.models.JobPreparationTask - :param job_release_task: The Job Release Task. A Job Release Task cannot - be specified without also specifying a Job Preparation Task for the Job. - The Batch service runs the Job Release Task on the Nodes that have run the - Job Preparation Task. The primary purpose of the Job Release Task is to - undo changes to Compute Nodes made by the Job Preparation Task. Example - activities include deleting local files, or shutting down services that - were started as part of Job preparation. - :type job_release_task: ~azure.batch.models.JobReleaseTask - :param common_environment_settings: Individual Tasks can override an - environment setting specified here by specifying the same setting name - with a different value. - :type common_environment_settings: - list[~azure.batch.models.EnvironmentSetting] - :param pool_info: Required. The Pool on which the Batch service runs the - Job's Tasks. - :type pool_info: ~azure.batch.models.PoolInformation - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. Note that if a Job - contains no Tasks, then all Tasks are considered complete. This option is - therefore most commonly used with a Job Manager task; if you want to use - automatic Job termination without a Job Manager, you should initially set - onAllTasksComplete to noaction and update the Job properties to set - onAllTasksComplete to terminatejob once you have finished adding Tasks. - The default is noaction. Possible values include: 'noAction', - 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param on_task_failure: The action the Batch service should take when any - Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero - exit code after exhausting its retry count, or if there was an error - starting the Task, for example due to a resource file download error. The - default is noaction. Possible values include: 'noAction', - 'performExitOptionsJobAction' - :type on_task_failure: str or ~azure.batch.models.OnTaskFailure - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param uses_task_dependencies: Whether Tasks in the Job can define - dependencies on each other. The default is false. - :type uses_task_dependencies: bool - :param network_configuration: The network configuration for the Job. - :type network_configuration: ~azure.batch.models.JobNetworkConfiguration - """ - - _validation = { - 'id': {'required': True}, - 'pool_info': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, - 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, - 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, - 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, - } - - def __init__(self, **kwargs): - super(JobAddParameter, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.priority = kwargs.get('priority', None) - self.max_parallel_tasks = kwargs.get('max_parallel_tasks', -1) - self.allow_task_preemption = kwargs.get('allow_task_preemption', None) - self.constraints = kwargs.get('constraints', None) - self.job_manager_task = kwargs.get('job_manager_task', None) - self.job_preparation_task = kwargs.get('job_preparation_task', None) - self.job_release_task = kwargs.get('job_release_task', None) - self.common_environment_settings = kwargs.get('common_environment_settings', None) - self.pool_info = kwargs.get('pool_info', None) - self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) - self.on_task_failure = kwargs.get('on_task_failure', None) - self.metadata = kwargs.get('metadata', None) - self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) - self.network_configuration = kwargs.get('network_configuration', None) - - -class JobConstraints(Model): - """The execution constraints for a Job. - :param max_wall_clock_time: If the Job does not complete within the time - limit, the Batch service terminates it and any Tasks that are still - running. In this case, the termination reason will be - MaxWallClockTimeExpiry. If this property is not specified, there is no - time limit on how long the Job may run. - :type max_wall_clock_time: timedelta - :param max_task_retry_count: The maximum number of times each Task may be - retried. The Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of retries. The - Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is - 0, the Batch service does not retry Tasks. If the maximum retry count is - -1, the Batch service retries Tasks without limit. The default value is 0 - (no retries). - :type max_task_retry_count: int - """ - - _attribute_map = { - 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(JobConstraints, self).__init__(**kwargs) - self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) - self.max_task_retry_count = kwargs.get('max_task_retry_count', None) - - -class JobDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobDeleteOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobDisableOptions(Model): - """Additional parameters for disable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobDisableOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobDisableParameter(Model): - """Options when disabling a Job. - - All required parameters must be populated in order to send to Azure. - - :param disable_tasks: Required. Possible values include: 'requeue', - 'terminate', 'wait' - :type disable_tasks: str or ~azure.batch.models.DisableJobOption - """ - - _validation = { - 'disable_tasks': {'required': True}, - } - - _attribute_map = { - 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, - } - - def __init__(self, **kwargs): - super(JobDisableParameter, self).__init__(**kwargs) - self.disable_tasks = kwargs.get('disable_tasks', None) - - -class JobEnableOptions(Model): - """Additional parameters for enable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobEnableOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobExecutionInformation(Model): - """Contains information about the execution of a Job in the Azure Batch - service. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. This is the time at which the Job was - created. - :type start_time: datetime - :param end_time: This property is set only if the Job is in the completed - state. - :type end_time: datetime - :param pool_id: This element contains the actual Pool where the Job is - assigned. When you get Job details from the service, they also contain a - poolInfo element, which contains the Pool configuration data from when the - Job was added or updated. That poolInfo element may also contain a poolId - element. If it does, the two IDs are the same. If it does not, it means - the Job ran on an auto Pool, and this property contains the ID of that - auto Pool. - :type pool_id: str - :param scheduling_error: Details of any error encountered by the service - in starting the Job. This property is not set if there was no error - starting the Job. - :type scheduling_error: ~azure.batch.models.JobSchedulingError - :param terminate_reason: This property is set only if the Job is in the - completed state. If the Batch service terminates the Job, it sets the - reason as follows: JMComplete - the Job Manager Task completed, and - killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job - reached its maxWallClockTime constraint. TerminateJobSchedule - the Job - ran as part of a schedule, and the schedule terminated. AllTasksComplete - - the Job's onAllTasksComplete attribute is set to terminatejob, and all - Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure - attribute is set to performExitOptionsJobAction, and a Task in the Job - failed with an exit condition that specified a jobAction of terminatejob. - Any other string is a user-defined reason specified in a call to the - 'Terminate a Job' operation. - :type terminate_reason: str - """ - - _validation = { - 'start_time': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, - 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobExecutionInformation, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.pool_id = kwargs.get('pool_id', None) - self.scheduling_error = kwargs.get('scheduling_error', None) - self.terminate_reason = kwargs.get('terminate_reason', None) - - -class JobGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobGetTaskCountsOptions(Model): - """Additional parameters for get_task_counts operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobGetTaskCountsOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobListFromJobScheduleOptions(Model): - """Additional parameters for list_from_job_schedule operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Jobs can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobListFromJobScheduleOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Jobs can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobListOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobListPreparationAndReleaseTaskStatusOptions(Model): - """Additional parameters for list_preparation_and_release_task_status - operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Tasks can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobManagerTask(Model): - """Specifies details of a Job Manager Task. + :ivar container_run_options: Additional options to the container create command. These + additional options are supplied as arguments to the "docker create" command, in addition to + those controlled by the Batch Service. + :vartype container_run_options: str + :ivar image_name: The Image to use to create the container in which the Task will run. This is + the full Image reference, as would be specified to "docker pull". If no tag is provided as part + of the Image name, the tag ":latest" is used as a default. Required. + :vartype image_name: str + :ivar registry: The private registry which contains the container Image. This setting can be + omitted if was already provided at Pool creation. + :vartype registry: ~azure.batch.models.ContainerRegistryReference + :ivar working_directory: The location of the container Task working directory. The default is + 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and "containerImageDefault". + :vartype working_directory: str or ~azure.batch.models.ContainerWorkingDirectory + """ + + container_run_options: Optional[str] = rest_field(name="containerRunOptions") + """Additional options to the container create command. These additional options are supplied as + arguments to the \"docker create\" command, in addition to those controlled by the Batch + Service.""" + image_name: str = rest_field(name="imageName") + """The Image to use to create the container in which the Task will run. This is the full Image + reference, as would be specified to \"docker pull\". If no tag is provided as part of the Image + name, the tag \":latest\" is used as a default. Required.""" + registry: Optional["_models.ContainerRegistryReference"] = rest_field() + """The private registry which contains the container Image. This setting can be omitted if was + already provided at Pool creation.""" + working_directory: Optional[Union[str, "_models.ContainerWorkingDirectory"]] = rest_field(name="workingDirectory") + """The location of the container Task working directory. The default is 'taskWorkingDirectory'. + Known values are: \"taskWorkingDirectory\" and \"containerImageDefault\".""" + + @overload + def __init__( + self, + *, + image_name: str, + container_run_options: Optional[str] = None, + registry: Optional["_models.ContainerRegistryReference"] = None, + working_directory: Optional[Union[str, "_models.ContainerWorkingDirectory"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskCounts(_model_base.Model): + """The Task counts for a Job. - The Job Manager Task is automatically started when the Job is created. The - Batch service tries to schedule the Job Manager Task before any other Tasks - in the Job. When shrinking a Pool, the Batch service tries to preserve - Nodes where Job Manager Tasks are running for as long as possible (that is, - Compute Nodes running 'normal' Tasks are removed before Compute Nodes - running Job Manager Tasks). When a Job Manager Task fails and needs to be - restarted, the system tries to schedule it at the highest priority. If - there are no idle Compute Nodes available, the system may terminate one of - the running Tasks in the Pool and return it to the queue in order to make - room for the Job Manager Task to restart. Note that a Job Manager Task in - one Job does not have priority over Tasks in other Jobs. Across Jobs, only - Job level priorities are observed. For example, if a Job Manager in a - priority 0 Job needs to be restarted, it will not displace Tasks of a - priority 1 Job. Batch will retry Tasks when a recovery operation is - triggered on a Node. Examples of recovery operations include (but are not - limited to) when an unhealthy Node is rebooted or a Compute Node - disappeared due to host failure. Retries due to recovery operations are - independent of and are not counted against the maxTaskRetryCount. Even if - the maxTaskRetryCount is 0, an internal retry due to a recovery operation - may occur. Because of this, all Tasks should be idempotent. This means - Tasks need to tolerate being interrupted and restarted without causing any - corruption or duplicate data. The best practice for long running Tasks is - to use some form of checkpointing. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than - 64 characters. - :type id: str - :param display_name: It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - :type display_name: str - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Job Manager Task runs. If the Pool that will run this Task has - containerConfiguration set, this must be set as well. If the Pool that - will run this Task doesn't have containerConfiguration set, this must not - be set. When this is specified, all directories recursively below the - AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) - are mapped into the container, all Task environment variables are mapped - into the container, and the Task command line is executed in the - container. Files produced in the container outside of - AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. There is a maximum size for the list of resource - files. When the max size is exceeded, the request will fail and the - response error code will be RequestEntityTooLarge. If this occurs, the - collection of ResourceFiles must be reduced in size. This can be achieved - using .zip files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param output_files: For multi-instance Tasks, the files will only be - uploaded from the Compute Node on which the primary Task is executed. - :type output_files: list[~azure.batch.models.OutputFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param constraints: Constraints that apply to the Job Manager Task. - :type constraints: ~azure.batch.models.TaskConstraints - :param required_slots: The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on - a compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this property is not supported and must not be - specified. - :type required_slots: int - :param kill_job_on_completion: Whether completion of the Job Manager Task - signifies completion of the entire Job. If true, when the Job Manager Task - completes, the Batch service marks the Job as complete. If any Tasks are - still running at this time (other than Job Release), those Tasks are - terminated. If false, the completion of the Job Manager Task does not - affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or - user terminate the Job explicitly. An example of this is if the Job - Manager creates a set of Tasks but then takes no further role in their - execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - :type kill_job_on_completion: bool - :param user_identity: The user identity under which the Job Manager Task - runs. If omitted, the Task runs as a non-administrative user unique to the - Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param run_exclusive: Whether the Job Manager Task requires exclusive use - of the Compute Node where it runs. If true, no other Tasks will run on the - same Node for as long as the Job Manager is running. If false, other Tasks - can run simultaneously with the Job Manager on a Compute Node. The Job - Manager Task counts normally against the Compute Node's concurrent Task - limit, so this is only relevant if the Compute Node allows multiple - concurrent Tasks. The default value is true. - :type run_exclusive: bool - :param application_package_references: Application Packages are downloaded - and deployed to a shared directory, not the Task working directory. - Therefore, if a referenced Application Package is already on the Compute - Node, and is up to date, then it is not re-downloaded; the existing copy - on the Compute Node is used. If a referenced Application Package cannot be - installed, for example because the package has been deleted or because - download failed, the Task fails. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param authentication_token_settings: The settings for an authentication - token that the Task can use to perform Batch service operations. If this - property is set, the Batch service provides the Task with an - authentication token which can be used to authenticate Batch service - operations without requiring an Account access key. The token is provided - via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations - that the Task can carry out using the token depend on the settings. For - example, a Task can request Job permissions in order to add other Tasks to - the Job, or check the status of the Job or of other Tasks under the Job. - :type authentication_token_settings: - ~azure.batch.models.AuthenticationTokenSettings - :param allow_low_priority_node: Whether the Job Manager Task may run on a - Spot/Low-priority Compute Node. The default value is true. - :type allow_low_priority_node: bool - """ - - _validation = { - 'id': {'required': True}, - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'required_slots': {'key': 'requiredSlots', 'type': 'int'}, - 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(JobManagerTask, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.command_line = kwargs.get('command_line', None) - self.container_settings = kwargs.get('container_settings', None) - self.resource_files = kwargs.get('resource_files', None) - self.output_files = kwargs.get('output_files', None) - self.environment_settings = kwargs.get('environment_settings', None) - self.constraints = kwargs.get('constraints', None) - self.required_slots = kwargs.get('required_slots', None) - self.kill_job_on_completion = kwargs.get('kill_job_on_completion', None) - self.user_identity = kwargs.get('user_identity', None) - self.run_exclusive = kwargs.get('run_exclusive', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.authentication_token_settings = kwargs.get('authentication_token_settings', None) - self.allow_low_priority_node = kwargs.get('allow_low_priority_node', None) - - -class JobNetworkConfiguration(Model): - """The network configuration for the Job. - All required parameters must be populated in order to send to Azure. - - :param subnet_id: Required. The virtual network must be in the same region - and subscription as the Azure Batch Account. The specified subnet should - have enough free IP addresses to accommodate the number of Compute Nodes - which will run Tasks from the Job. This can be up to the number of Compute - Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have - the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) - role for the specified VNet so that Azure Batch service can schedule Tasks - on the Nodes. This can be verified by checking if the specified VNet has - any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound communication - from the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux - and port 3389 for Windows. Port 443 is also required to be open for - outbound connections for communications to Azure Storage. For more details - see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - :type subnet_id: str - """ - - _validation = { - 'subnet_id': {'required': True}, - } - - _attribute_map = { - 'subnet_id': {'key': 'subnetId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobNetworkConfiguration, self).__init__(**kwargs) - self.subnet_id = kwargs.get('subnet_id', None) - - -class JobPatchOptions(Model): - """Additional parameters for patch operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobPatchOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobPatchParameter(Model): - """The set of changes to be made to a Job. - - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. If omitted, the priority of the Job is left unchanged. - :type priority: int - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. - :type max_parallel_tasks: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. If omitted, the - completion behavior is left unchanged. You may not change the value from - terminatejob to noaction - that is, once you have engaged automatic Job - termination, you cannot turn it off again. If you try to do this, the - request fails with an 'invalid property value' error response; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - Possible values include: 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param constraints: The execution constraints for the Job. If omitted, the - existing execution constraints are left unchanged. - :type constraints: ~azure.batch.models.JobConstraints - :param pool_info: The Pool on which the Batch service runs the Job's - Tasks. You may change the Pool for a Job only when the Job is disabled. - The Patch Job call will fail if you include the poolInfo element and the - Job is not disabled. If you specify an autoPoolSpecification in the - poolInfo, only the keepAlive property of the autoPoolSpecification can be - updated, and then only if the autoPoolSpecification has a - poolLifetimeOption of Job (other job properties can be updated as normal). - If omitted, the Job continues to run on its current Pool. - :type pool_info: ~azure.batch.models.PoolInformation - :param metadata: If omitted, the existing Job metadata is left unchanged. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, **kwargs): - super(JobPatchParameter, self).__init__(**kwargs) - self.priority = kwargs.get('priority', None) - self.max_parallel_tasks = kwargs.get('max_parallel_tasks', None) - self.allow_task_preemption = kwargs.get('allow_task_preemption', None) - self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) - self.constraints = kwargs.get('constraints', None) - self.pool_info = kwargs.get('pool_info', None) - self.metadata = kwargs.get('metadata', None) - - -class JobPreparationAndReleaseTaskExecutionInformation(Model): - """The status of the Job Preparation and Job Release Tasks on a Compute Node. + :ivar active: The number of Tasks in the active state. Required. + :vartype active: int + :ivar running: The number of Tasks in the running or preparing state. Required. + :vartype running: int + :ivar completed: The number of Tasks in the completed state. Required. + :vartype completed: int + :ivar succeeded: The number of Tasks which succeeded. A Task succeeds if its result (found in + the executionInfo property) is 'success'. Required. + :vartype succeeded: int + :ivar failed: The number of Tasks which failed. A Task fails if its result (found in the + executionInfo property) is 'failure'. Required. + :vartype failed: int + """ + + active: int = rest_field() + """The number of Tasks in the active state. Required.""" + running: int = rest_field() + """The number of Tasks in the running or preparing state. Required.""" + completed: int = rest_field() + """The number of Tasks in the completed state. Required.""" + succeeded: int = rest_field() + """The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo + property) is 'success'. Required.""" + failed: int = rest_field() + """The number of Tasks which failed. A Task fails if its result (found in the executionInfo + property) is 'failure'. Required.""" + + @overload + def __init__( + self, + *, + active: int, + running: int, + completed: int, + succeeded: int, + failed: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskCountsResult(_model_base.Model): + """The Task and TaskSlot counts for a Job. - :param pool_id: - :type pool_id: str - :param node_id: - :type node_id: str - :param node_url: - :type node_url: str - :param job_preparation_task_execution_info: Information about the - execution status of the Job Preparation Task on this Compute Node. - :type job_preparation_task_execution_info: - ~azure.batch.models.JobPreparationTaskExecutionInformation - :param job_release_task_execution_info: Information about the execution - status of the Job Release Task on this Compute Node. This property is set - only if the Job Release Task has run on the Compute Node. - :type job_release_task_execution_info: - ~azure.batch.models.JobReleaseTaskExecutionInformation - """ - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'node_id': {'key': 'nodeId', 'type': 'str'}, - 'node_url': {'key': 'nodeUrl', 'type': 'str'}, - 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, - 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, - } - - def __init__(self, **kwargs): - super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) - self.pool_id = kwargs.get('pool_id', None) - self.node_id = kwargs.get('node_id', None) - self.node_url = kwargs.get('node_url', None) - self.job_preparation_task_execution_info = kwargs.get('job_preparation_task_execution_info', None) - self.job_release_task_execution_info = kwargs.get('job_release_task_execution_info', None) - - -class JobPreparationTask(Model): - """A Job Preparation Task to run before any Tasks of the Job on any given - Compute Node. - You can use Job Preparation to prepare a Node to run Tasks for the Job. - Activities commonly performed in Job Preparation include: Downloading - common resource files used by all the Tasks in the Job. The Job Preparation - Task can download these common resource files to the shared location on the - Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the - Node so that all Tasks of that Job can communicate with it. If the Job - Preparation Task fails (that is, exhausts its retry count before exiting - with exit code 0), Batch will not run Tasks of this Job on the Node. The - Compute Node remains ineligible to run Tasks of this Job until it is - reimaged. The Compute Node remains active and can be used for other Jobs. - The Job Preparation Task can run multiple times on the same Node. - Therefore, you should write the Job Preparation Task to handle - re-execution. If the Node is rebooted, the Job Preparation Task is run - again on the Compute Node before scheduling any other Task of the Job, if - rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did - not previously complete. If the Node is reimaged, the Job Preparation Task - is run again before scheduling any Task of the Job. Batch will retry Tasks - when a recovery operation is triggered on a Node. Examples of recovery - operations include (but are not limited to) when an unhealthy Node is - rebooted or a Compute Node disappeared due to host failure. Retries due to - recovery operations are independent of and are not counted against the - maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry - due to a recovery operation may occur. Because of this, all Tasks should be - idempotent. This means Tasks need to tolerate being interrupted and - restarted without causing any corruption or duplicate data. The best - practice for long running Tasks is to use some form of checkpointing. - - All required parameters must be populated in order to send to Azure. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores and cannot contain more than 64 - characters. If you do not specify this property, the Batch service assigns - a default value of 'jobpreparation'. No other Task in the Job can have the - same ID as the Job Preparation Task. If you try to submit a Task with the - same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, - the HTTP status code is 409 (Conflict). - :type id: str - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable + :ivar task_counts: The number of Tasks per state. Required. + :vartype task_counts: ~azure.batch.models.BatchTaskCounts + :ivar task_slot_counts: The number of TaskSlots required by Tasks per state. Required. + :vartype task_slot_counts: ~azure.batch.models.BatchTaskSlotCounts + """ + + task_counts: "_models.BatchTaskCounts" = rest_field(name="taskCounts") + """The number of Tasks per state. Required.""" + task_slot_counts: "_models.BatchTaskSlotCounts" = rest_field(name="taskSlotCounts") + """The number of TaskSlots required by Tasks per state. Required.""" + + @overload + def __init__( + self, + *, + task_counts: "_models.BatchTaskCounts", + task_slot_counts: "_models.BatchTaskSlotCounts", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskCreateContent(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Parameters for creating an Azure Batch Task. + + All required parameters must be populated in order to send to server. + + :ivar id: A string that uniquely identifies the Task within the Job. The ID can contain any + combination of alphanumeric characters including hyphens and underscores, and cannot contain + more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not + have two IDs within a Job that differ only by case). Required. + :vartype id: str + :ivar display_name: A display name for the Task. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. + :vartype display_name: str + :ivar exit_conditions: How the Batch service should respond when the Task completes. + :vartype exit_conditions: ~azure.batch.models.ExitConditions + :ivar command_line: The command line of the Task. For multi-instance Tasks, the command line is + executed as the primary Task, after the primary Task and all subtasks have finished executing + the coordination command line. The command line does not run under a shell, and therefore + cannot take advantage of shell features such as environment variable expansion. If you want to + take advantage of such features, you should invoke the shell in the command line, for example + using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line + refers to file paths, it should use a relative path (relative to the Task working directory), + or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Job Preparation Task runs. When this is specified, all directories - recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch - directories on the node) are mapped into the container, all Task - environment variables are mapped into the container, and the Task command - line is executed in the container. Files produced in the container outside - of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. There is a maximum size for the list of - resource files. When the max size is exceeded, the request will fail and - the response error code will be RequestEntityTooLarge. If this occurs, the - collection of ResourceFiles must be reduced in size. This can be achieved - using .zip files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param constraints: Constraints that apply to the Job Preparation Task. - :type constraints: ~azure.batch.models.TaskConstraints - :param wait_for_success: Whether the Batch service should wait for the Job - Preparation Task to complete successfully before scheduling any other - Tasks of the Job on the Compute Node. A Job Preparation Task has completed - successfully if it exits with exit code 0. If true and the Job Preparation - Task fails on a Node, the Batch service retries the Job Preparation Task - up to its maximum retry count (as specified in the constraints element). - If the Task has still not completed successfully after all retries, then - the Batch service will not schedule Tasks of the Job to the Node. The Node - remains active and eligible to run Tasks of other Jobs. If false, the - Batch service will not wait for the Job Preparation Task to complete. In - this case, other Tasks of the Job can start executing on the Compute Node - while the Job Preparation Task is still running; and even if the Job - Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - :type wait_for_success: bool - :param user_identity: The user identity under which the Job Preparation - Task runs. If omitted, the Task runs as a non-administrative user unique - to the Task on Windows Compute Nodes, or a non-administrative user unique - to the Pool on Linux Compute Nodes. - :type user_identity: ~azure.batch.models.UserIdentity - :param rerun_on_node_reboot_after_success: Whether the Batch service - should rerun the Job Preparation Task after a Compute Node reboots. The - Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot - occurred while the Task was running). Therefore, you should always write a - Job Preparation Task to be idempotent and to behave correctly if run - multiple times. The default value is true. - :type rerun_on_node_reboot_after_success: bool - """ - - _validation = { - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(JobPreparationTask, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.command_line = kwargs.get('command_line', None) - self.container_settings = kwargs.get('container_settings', None) - self.resource_files = kwargs.get('resource_files', None) - self.environment_settings = kwargs.get('environment_settings', None) - self.constraints = kwargs.get('constraints', None) - self.wait_for_success = kwargs.get('wait_for_success', None) - self.user_identity = kwargs.get('user_identity', None) - self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None) - - -class JobPreparationTaskExecutionInformation(Model): - """Contains information about the execution of a Job Preparation Task on a - Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. If the Task has been restarted or retried, - this is the most recent time at which the Task started running. - :type start_time: datetime - :param end_time: This property is set only if the Task is in the Completed - state. - :type end_time: datetime - :param state: Required. Possible values include: 'running', 'completed' - :type state: str or ~azure.batch.models.JobPreparationTaskState - :param task_root_directory: - :type task_root_directory: str - :param task_root_directory_url: - :type task_root_directory_url: str - :param exit_code: The exit code of the program specified on the Task - command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. Note - that the exit code may also be generated by the Compute Node operating - system, such as when a process is forcibly terminated. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param retry_count: Required. The number of times the Task has been - retried by the Batch service. Task application failures (non-zero exit - code) are retried, pre-processing errors (the Task could not be run) and - file upload errors are not retried. The Batch service will retry the Task - up to the limit specified by the constraints. Task application failures - (non-zero exit code) are retried, pre-processing errors (the Task could - not be run) and file upload errors are not retried. The Batch service will - retry the Task up to the limit specified by the constraints. - :type retry_count: int - :param last_retry_time: This property is set only if the Task was retried - (i.e. retryCount is nonzero). If present, this is typically the same as - startTime, but may be different if the Task has been restarted for reasons - other than retry; for example, if the Compute Node was rebooted during a - retry, then the startTime is updated but the lastRetryTime is not. - :type last_retry_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'start_time': {'required': True}, - 'state': {'required': True}, - 'retry_count': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, - 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, - 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'retry_count': {'key': 'retryCount', 'type': 'int'}, - 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, **kwargs): - super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.state = kwargs.get('state', None) - self.task_root_directory = kwargs.get('task_root_directory', None) - self.task_root_directory_url = kwargs.get('task_root_directory_url', None) - self.exit_code = kwargs.get('exit_code', None) - self.container_info = kwargs.get('container_info', None) - self.failure_info = kwargs.get('failure_info', None) - self.retry_count = kwargs.get('retry_count', None) - self.last_retry_time = kwargs.get('last_retry_time', None) - self.result = kwargs.get('result', None) - - -class JobReleaseTask(Model): - """A Job Release Task to run on Job completion on any Compute Node where the - Job has run. - - The Job Release Task runs when the Job ends, because of one of the - following: The user calls the Terminate Job API, or the Delete Job API - while the Job is still active, the Job's maximum wall clock time constraint - is reached, and the Job is still active, or the Job's Job Manager Task - completed, and the Job is configured to terminate when the Job Manager - completes. The Job Release Task runs on each Node where Tasks of the Job - have run and the Job Preparation Task ran and completed. If you reimage a - Node after it has run the Job Preparation Task, and the Job ends without - any further Tasks of the Job running on that Node (and hence the Job - Preparation Task does not re-run), then the Job Release Task does not run - on that Compute Node. If a Node reboots while the Job Release Task is still - running, the Job Release Task runs again when the Compute Node starts up. - The Job is not marked as complete until all Job Release Tasks have - completed. The Job Release Task runs in the background. It does not occupy - a scheduling slot; that is, it does not count towards the taskSlotsPerNode - limit specified on the Pool. - - All required parameters must be populated in order to send to Azure. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores and cannot contain more than 64 - characters. If you do not specify this property, the Batch service assigns - a default value of 'jobrelease'. No other Task in the Job can have the - same ID as the Job Release Task. If you try to submit a Task with the same - id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - :type id: str - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable + Required. + :vartype command_line: str + :ivar container_settings: The settings for the container under which the Task runs. If the Pool + that will run this Task has containerConfiguration set, this must be set as well. If the Pool + that will run this Task doesn't have containerConfiguration set, this must not be set. When + this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of + Azure Batch directories on the node) are mapped into the container, all Task environment + variables are mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be + reflected to the host disk, meaning that Batch file APIs will not be able to access those + files. + :vartype container_settings: ~azure.batch.models.BatchTaskContainerSettings + :ivar resource_files: A list of files that the Batch service will download to the Compute Node + before running the command line. For multi-instance Tasks, the resource files will only be + downloaded to the Compute Node on which the primary Task is executed. There is a maximum size + for the list of resource files. When the max size is exceeded, the request will fail and the + response error code will be RequestEntityTooLarge. If this occurs, the collection of + ResourceFiles must be reduced in size. This can be achieved using .zip files, Application + Packages, or Docker Containers. + :vartype resource_files: list[~azure.batch.models.ResourceFile] + :ivar output_files: A list of files that the Batch service will upload from the Compute Node + after running the command line. For multi-instance Tasks, the files will only be uploaded from + the Compute Node on which the primary Task is executed. + :vartype output_files: list[~azure.batch.models.OutputFile] + :ivar environment_settings: A list of environment variable settings for the Task. + :vartype environment_settings: list[~azure.batch.models.EnvironmentSetting] + :ivar affinity_info: A locality hint that can be used by the Batch service to select a Compute + Node on which to start the new Task. + :vartype affinity_info: ~azure.batch.models.AffinityInfo + :ivar constraints: The execution constraints that apply to this Task. If you do not specify + constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the + maxWallClockTime is infinite, and the retentionTime is 7 days. + :vartype constraints: ~azure.batch.models.BatchTaskConstraints + :ivar required_slots: The number of scheduling slots that the Task required to run. The default + is 1. A Task can only be scheduled to run on a compute node if the node has enough free + scheduling slots available. For multi-instance Tasks, this must be 1. + :vartype required_slots: int + :ivar user_identity: The user identity under which the Task runs. If omitted, the Task runs as + a non-administrative user unique to the Task. + :vartype user_identity: ~azure.batch.models.UserIdentity + :ivar multi_instance_settings: An object that indicates that the Task is a multi-instance Task, + and contains information about how to run the multi-instance Task. + :vartype multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :ivar depends_on: The Tasks that this Task depends on. This Task will not be scheduled until + all Tasks that it depends on have completed successfully. If any of those Tasks fail and + exhaust their retry counts, this Task will never be scheduled. If the Job does not have + usesTaskDependencies set to true, and this element is present, the request fails with error + code TaskDependenciesNotSpecifiedOnJob. + :vartype depends_on: ~azure.batch.models.BatchTaskDependencies + :ivar application_package_references: A list of Packages that the Batch service will deploy to + the Compute Node before running the command line. Application packages are downloaded and + deployed to a shared directory, not the Task working directory. Therefore, if a referenced + package is already on the Node, and is up to date, then it is not re-downloaded; the existing + copy on the Compute Node is used. If a referenced Package cannot be installed, for example + because the package has been deleted or because download failed, the Task fails. + :vartype application_package_references: + list[~azure.batch.models.BatchApplicationPackageReference] + :ivar authentication_token_settings: The settings for an authentication token that the Task can + use to perform Batch service operations. If this property is set, the Batch service provides + the Task with an authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided via the + AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out + using the token depend on the settings. For example, a Task can request Job permissions in + order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the + Job. + :vartype authentication_token_settings: ~azure.batch.models.AuthenticationTokenSettings + """ + + id: str = rest_field() + """A string that uniquely identifies the Task within the Job. The ID can contain any combination + of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs + within a Job that differ only by case). Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """A display name for the Task. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024.""" + exit_conditions: Optional["_models.ExitConditions"] = rest_field(name="exitConditions") + """How the Batch service should respond when the Task completes.""" + command_line: str = rest_field(name="commandLine") + """The command line of the Task. For multi-instance Tasks, the command line is executed as the + primary Task, after the primary Task and all subtasks have finished executing the coordination + command line. The command line does not run under a shell, and therefore cannot take advantage + of shell features such as environment variable expansion. If you want to take advantage of such + features, you should invoke the shell in the command line, for example using \"cmd /c + MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working directory), or use the Batch + provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Job Release Task runs. When this is specified, all directories recursively - below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on - the node) are mapped into the container, all Task environment variables - are mapped into the container, and the Task command line is executed in - the container. Files produced in the container outside of - AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param max_wall_clock_time: - :type max_wall_clock_time: timedelta - :param retention_time: The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - :type retention_time: timedelta - :param user_identity: The user identity under which the Job Release Task - runs. If omitted, the Task runs as a non-administrative user unique to the - Task. - :type user_identity: ~azure.batch.models.UserIdentity - """ - - _validation = { - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, - 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - } - - def __init__(self, **kwargs): - super(JobReleaseTask, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.command_line = kwargs.get('command_line', None) - self.container_settings = kwargs.get('container_settings', None) - self.resource_files = kwargs.get('resource_files', None) - self.environment_settings = kwargs.get('environment_settings', None) - self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) - self.retention_time = kwargs.get('retention_time', None) - self.user_identity = kwargs.get('user_identity', None) - - -class JobReleaseTaskExecutionInformation(Model): - """Contains information about the execution of a Job Release Task on a Compute - Node. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. If the Task has been restarted or retried, - this is the most recent time at which the Task started running. - :type start_time: datetime - :param end_time: This property is set only if the Task is in the Completed - state. - :type end_time: datetime - :param state: Required. Possible values include: 'running', 'completed' - :type state: str or ~azure.batch.models.JobReleaseTaskState - :param task_root_directory: - :type task_root_directory: str - :param task_root_directory_url: - :type task_root_directory_url: str - :param exit_code: The exit code of the program specified on the Task - command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. Note - that the exit code may also be generated by the Compute Node operating - system, such as when a process is forcibly terminated. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'start_time': {'required': True}, - 'state': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, - 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, - 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, **kwargs): - super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.state = kwargs.get('state', None) - self.task_root_directory = kwargs.get('task_root_directory', None) - self.task_root_directory_url = kwargs.get('task_root_directory_url', None) - self.exit_code = kwargs.get('exit_code', None) - self.container_info = kwargs.get('container_info', None) - self.failure_info = kwargs.get('failure_info', None) - self.result = kwargs.get('result', None) - - -class JobScheduleAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleAddOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobScheduleAddParameter(Model): - """A Job Schedule that allows recurring Jobs by specifying when to run Jobs - and a specification used to create each Job. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param schedule: Required. The schedule according to which Jobs will be - created. All times are fixed respective to UTC and are not impacted by - daylight saving time. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: Required. The details of the Jobs to be created - on this schedule. - :type job_specification: ~azure.batch.models.JobSpecification - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _validation = { - 'id': {'required': True}, - 'schedule': {'required': True}, - 'job_specification': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, **kwargs): - super(JobScheduleAddParameter, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.schedule = kwargs.get('schedule', None) - self.job_specification = kwargs.get('job_specification', None) - self.metadata = kwargs.get('metadata', None) - - -class JobScheduleDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleDeleteOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleDisableOptions(Model): - """Additional parameters for disable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleDisableOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleEnableOptions(Model): - """Additional parameters for enable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleEnableOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleExecutionInformation(Model): - """Contains information about Jobs that have been and will be run under a Job - Schedule. - - :param next_run_time: This property is meaningful only if the schedule is - in the active state when the time comes around. For example, if the - schedule is disabled, no Job will be created at nextRunTime unless the Job - is enabled before then. - :type next_run_time: datetime - :param recent_job: Information about the most recent Job under the Job - Schedule. This property is present only if the at least one Job has run - under the schedule. - :type recent_job: ~azure.batch.models.RecentJob - :param end_time: This property is set only if the Job Schedule is in the - completed state. - :type end_time: datetime - """ - - _attribute_map = { - 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, - 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(JobScheduleExecutionInformation, self).__init__(**kwargs) - self.next_run_time = kwargs.get('next_run_time', None) - self.recent_job = kwargs.get('recent_job', None) - self.end_time = kwargs.get('end_time', None) - - -class JobScheduleExistsOptions(Model): - """Additional parameters for exists operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleExistsOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Job Schedules can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleListOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class JobSchedulePatchOptions(Model): - """Additional parameters for patch operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobSchedulePatchOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobSchedulePatchParameter(Model): - """The set of changes to be made to a Job Schedule. - - :param schedule: The schedule according to which Jobs will be created. All - times are fixed respective to UTC and are not impacted by daylight saving - time. If you do not specify this element, the existing schedule is left - unchanged. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: The details of the Jobs to be created on this - schedule. Updates affect only Jobs that are started after the update has - taken place. Any currently active Job continues with the older - specification. - :type job_specification: ~azure.batch.models.JobSpecification - :param metadata: If you do not specify this element, existing metadata is - left unchanged. - :type metadata: list[~azure.batch.models.MetadataItem] - """ + Required.""" + container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") + """The settings for the container under which the Task runs. If the Pool that will run this Task + has containerConfiguration set, this must be set as well. If the Pool that will run this Task + doesn't have containerConfiguration set, this must not be set. When this is specified, all + directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories + on the node) are mapped into the container, all Task environment variables are mapped into the + container, and the Task command line is executed in the container. Files produced in the + container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files.""" + resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="resourceFiles") + """A list of files that the Batch service will download to the Compute Node before running the + command line. For multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum size for the list of + resource files. When the max size is exceeded, the request will fail and the response error + code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be + reduced in size. This can be achieved using .zip files, Application Packages, or Docker + Containers.""" + output_files: Optional[List["_models.OutputFile"]] = rest_field(name="outputFiles") + """A list of files that the Batch service will upload from the Compute Node after running the + command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node + on which the primary Task is executed.""" + environment_settings: Optional[List["_models.EnvironmentSetting"]] = rest_field(name="environmentSettings") + """A list of environment variable settings for the Task.""" + affinity_info: Optional["_models.AffinityInfo"] = rest_field(name="affinityInfo") + """A locality hint that can be used by the Batch service to select a Compute Node on which to + start the new Task.""" + constraints: Optional["_models.BatchTaskConstraints"] = rest_field() + """The execution constraints that apply to this Task. If you do not specify constraints, the + maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is + infinite, and the retentionTime is 7 days.""" + required_slots: Optional[int] = rest_field(name="requiredSlots") + """The number of scheduling slots that the Task required to run. The default is 1. A Task can only + be scheduled to run on a compute node if the node has enough free scheduling slots available. + For multi-instance Tasks, this must be 1.""" + user_identity: Optional["_models.UserIdentity"] = rest_field(name="userIdentity") + """The user identity under which the Task runs. If omitted, the Task runs as a non-administrative + user unique to the Task.""" + multi_instance_settings: Optional["_models.MultiInstanceSettings"] = rest_field(name="multiInstanceSettings") + """An object that indicates that the Task is a multi-instance Task, and contains information about + how to run the multi-instance Task.""" + depends_on: Optional["_models.BatchTaskDependencies"] = rest_field(name="dependsOn") + """The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it + depends on have completed successfully. If any of those Tasks fail and exhaust their retry + counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to + true, and this element is present, the request fails with error code + TaskDependenciesNotSpecifiedOnJob.""" + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = rest_field( + name="applicationPackageReferences" + ) + """A list of Packages that the Batch service will deploy to the Compute Node before running the + command line. Application packages are downloaded and deployed to a shared directory, not the + Task working directory. Therefore, if a referenced package is already on the Node, and is up to + date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a + referenced Package cannot be installed, for example because the package has been deleted or + because download failed, the Task fails.""" + authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = rest_field( + name="authenticationTokenSettings" + ) + """The settings for an authentication token that the Task can use to perform Batch service + operations. If this property is set, the Batch service provides the Task with an authentication + token which can be used to authenticate Batch service operations without requiring an Account + access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. + The operations that the Task can carry out using the token depend on the settings. For example, + a Task can request Job permissions in order to add other Tasks to the Job, or check the status + of the Job or of other Tasks under the Job.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + command_line: str, + display_name: Optional[str] = None, + exit_conditions: Optional["_models.ExitConditions"] = None, + container_settings: Optional["_models.BatchTaskContainerSettings"] = None, + resource_files: Optional[List["_models.ResourceFile"]] = None, + output_files: Optional[List["_models.OutputFile"]] = None, + environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, + affinity_info: Optional["_models.AffinityInfo"] = None, + constraints: Optional["_models.BatchTaskConstraints"] = None, + required_slots: Optional[int] = None, + user_identity: Optional["_models.UserIdentity"] = None, + multi_instance_settings: Optional["_models.MultiInstanceSettings"] = None, + depends_on: Optional["_models.BatchTaskDependencies"] = None, + application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, + authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskDependencies(_model_base.Model): + """Specifies any dependencies of a Task. Any Task that is explicitly specified or + within a dependency range must complete before the dependant Task will be + scheduled. - _attribute_map = { - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } + :ivar task_ids: The list of Task IDs that this Task depends on. All Tasks in this list must + complete successfully before the dependent Task can be scheduled. The taskIds collection is + limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds + collection exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using Task ID ranges instead. + :vartype task_ids: list[str] + :ivar task_id_ranges: The list of Task ID ranges that this Task depends on. All Tasks in all + ranges must complete successfully before the dependent Task can be scheduled. + :vartype task_id_ranges: list[~azure.batch.models.BatchTaskIdRange] + """ + + task_ids: Optional[List[str]] = rest_field(name="taskIds") + """The list of Task IDs that this Task depends on. All Tasks in this list must complete + successfully before the dependent Task can be scheduled. The taskIds collection is limited to + 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using Task ID ranges instead.""" + task_id_ranges: Optional[List["_models.BatchTaskIdRange"]] = rest_field(name="taskIdRanges") + """The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete + successfully before the dependent Task can be scheduled.""" + + @overload + def __init__( + self, + *, + task_ids: Optional[List[str]] = None, + task_id_ranges: Optional[List["_models.BatchTaskIdRange"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskExecutionInfo(_model_base.Model): + """Information about the execution of a Task. - def __init__(self, **kwargs): - super(JobSchedulePatchParameter, self).__init__(**kwargs) - self.schedule = kwargs.get('schedule', None) - self.job_specification = kwargs.get('job_specification', None) - self.metadata = kwargs.get('metadata', None) + :ivar start_time: The time at which the Task started running. 'Running' corresponds to the + running state, so if the Task specifies resource files or Packages, then the start time + reflects the time at which the Task started downloading or deploying these. If the Task has + been restarted or retried, this is the most recent time at which the Task started running. This + property is present only for Tasks that are in the running or completed state. + :vartype start_time: ~datetime.datetime + :ivar end_time: The time at which the Task completed. This property is set only if the Task is + in the Completed state. + :vartype end_time: ~datetime.datetime + :ivar exit_code: The exit code of the program specified on the Task command line. This property + is set only if the Task is in the completed state. In general, the exit code for a process + reflects the specific convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that you know the exit code + convention used by the application process. However, if the Batch service terminates the Task + (due to timeout, or user termination via the API) you may see an operating system-defined exit + code. + :vartype exit_code: int + :ivar container_info: Information about the container under which the Task is executing. This + property is set only if the Task runs in a container context. + :vartype container_info: ~azure.batch.models.BatchTaskContainerExecutionInfo + :ivar failure_info: Information describing the Task failure, if any. This property is set only + if the Task is in the completed state and encountered a failure. + :vartype failure_info: ~azure.batch.models.BatchTaskFailureInfo + :ivar retry_count: The number of times the Task has been retried by the Batch service. Task + application failures (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will retry the Task up to + the limit specified by the constraints. Required. + :vartype retry_count: int + :ivar last_retry_time: The most recent time at which a retry of the Task started running. This + element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this + is typically the same as startTime, but may be different if the Task has been restarted for + reasons other than retry; for example, if the Compute Node was rebooted during a retry, then + the startTime is updated but the lastRetryTime is not. + :vartype last_retry_time: ~datetime.datetime + :ivar requeue_count: The number of times the Task has been requeued by the Batch service as the + result of a user request. When the user removes Compute Nodes from a Pool (by + resizing/shrinking the pool) or when the Job is being disabled, the user can specify that + running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times + the Task has been requeued for these reasons. Required. + :vartype requeue_count: int + :ivar last_requeue_time: The most recent time at which the Task has been requeued by the Batch + service as the result of a user request. This property is set only if the requeueCount is + nonzero. + :vartype last_requeue_time: ~datetime.datetime + :ivar result: The result of the Task execution. If the value is 'failed', then the details of + the failure can be found in the failureInfo property. Known values are: "success" and + "failure". + :vartype result: str or ~azure.batch.models.BatchTaskExecutionResult + """ + + start_time: Optional[datetime.datetime] = rest_field(name="startTime", format="rfc3339") + """The time at which the Task started running. 'Running' corresponds to the running state, so if + the Task specifies resource files or Packages, then the start time reflects the time at which + the Task started downloading or deploying these. If the Task has been restarted or retried, + this is the most recent time at which the Task started running. This property is present only + for Tasks that are in the running or completed state.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The time at which the Task completed. This property is set only if the Task is in the Completed + state.""" + exit_code: Optional[int] = rest_field(name="exitCode") + """The exit code of the program specified on the Task command line. This property is set only if + the Task is in the completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that process. If you use the + exit code value to make decisions in your code, be sure that you know the exit code convention + used by the application process. However, if the Batch service terminates the Task (due to + timeout, or user termination via the API) you may see an operating system-defined exit code.""" + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = rest_field(name="containerInfo") + """Information about the container under which the Task is executing. This property is set only if + the Task runs in a container context.""" + failure_info: Optional["_models.BatchTaskFailureInfo"] = rest_field(name="failureInfo") + """Information describing the Task failure, if any. This property is set only if the Task is in + the completed state and encountered a failure.""" + retry_count: int = rest_field(name="retryCount") + """The number of times the Task has been retried by the Batch service. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file + upload errors are not retried. The Batch service will retry the Task up to the limit specified + by the constraints. Required.""" + last_retry_time: Optional[datetime.datetime] = rest_field(name="lastRetryTime", format="rfc3339") + """The most recent time at which a retry of the Task started running. This element is present only + if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons other than retry; + for example, if the Compute Node was rebooted during a retry, then the startTime is updated but + the lastRetryTime is not.""" + requeue_count: int = rest_field(name="requeueCount") + """The number of times the Task has been requeued by the Batch service as the result of a user + request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or + when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be + requeued for execution. This count tracks how many times the Task has been requeued for these + reasons. Required.""" + last_requeue_time: Optional[datetime.datetime] = rest_field(name="lastRequeueTime", format="rfc3339") + """The most recent time at which the Task has been requeued by the Batch service as the result of + a user request. This property is set only if the requeueCount is nonzero.""" + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = rest_field() + """The result of the Task execution. If the value is 'failed', then the details of the failure can + be found in the failureInfo property. Known values are: \"success\" and \"failure\".""" + + @overload + def __init__( + self, + *, + retry_count: int, + requeue_count: int, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + exit_code: Optional[int] = None, + container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = None, + failure_info: Optional["_models.BatchTaskFailureInfo"] = None, + last_retry_time: Optional[datetime.datetime] = None, + last_requeue_time: Optional[datetime.datetime] = None, + result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskFailureInfo(_model_base.Model): + """Information about a Task failure. -class JobScheduleStatistics(Model): - """Resource usage statistics for a Job Schedule. - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param user_cpu_time: Required. - :type user_cpu_time: timedelta - :param kernel_cpu_time: Required. - :type kernel_cpu_time: timedelta - :param wall_clock_time: Required. The wall clock time is the elapsed time - from when the Task started running on a Compute Node to when it finished - (or to the last time the statistics were updated, if the Task had not - finished by then). If a Task was retried, this includes the wall clock - time of all the Task retries. - :type wall_clock_time: timedelta - :param read_iops: Required. The total number of disk read operations made - by all Tasks in all Jobs created under the schedule. - :type read_iops: long - :param write_iops: Required. The total number of disk write operations - made by all Tasks in all Jobs created under the schedule. - :type write_iops: long - :param read_io_gi_b: Required. The total gibibytes read from disk by all - Tasks in all Jobs created under the schedule. - :type read_io_gi_b: float - :param write_io_gi_b: Required. The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. - :type write_io_gi_b: float - :param num_succeeded_tasks: Required. The total number of Tasks - successfully completed during the given time range in Jobs created under - the schedule. A Task completes successfully if it returns exit code 0. - :type num_succeeded_tasks: long - :param num_failed_tasks: Required. The total number of Tasks that failed - during the given time range in Jobs created under the schedule. A Task - fails if it exhausts its maximum retry count without returning exit code - 0. - :type num_failed_tasks: long - :param num_task_retries: Required. The total number of retries during the - given time range on all Tasks in all Jobs created under the schedule. - :type num_task_retries: long - :param wait_time: Required. This value is only reported in the Account - lifetime statistics; it is not included in the Job statistics. - :type wait_time: timedelta - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'user_cpu_time': {'required': True}, - 'kernel_cpu_time': {'required': True}, - 'wall_clock_time': {'required': True}, - 'read_iops': {'required': True}, - 'write_iops': {'required': True}, - 'read_io_gi_b': {'required': True}, - 'write_io_gi_b': {'required': True}, - 'num_succeeded_tasks': {'required': True}, - 'num_failed_tasks': {'required': True}, - 'num_task_retries': {'required': True}, - 'wait_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, - 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, - 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, - 'read_iops': {'key': 'readIOps', 'type': 'long'}, - 'write_iops': {'key': 'writeIOps', 'type': 'long'}, - 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, - 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, - 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, - 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, - 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, - 'wait_time': {'key': 'waitTime', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(JobScheduleStatistics, self).__init__(**kwargs) - self.url = kwargs.get('url', None) - self.start_time = kwargs.get('start_time', None) - self.last_update_time = kwargs.get('last_update_time', None) - self.user_cpu_time = kwargs.get('user_cpu_time', None) - self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) - self.wall_clock_time = kwargs.get('wall_clock_time', None) - self.read_iops = kwargs.get('read_iops', None) - self.write_iops = kwargs.get('write_iops', None) - self.read_io_gi_b = kwargs.get('read_io_gi_b', None) - self.write_io_gi_b = kwargs.get('write_io_gi_b', None) - self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) - self.num_failed_tasks = kwargs.get('num_failed_tasks', None) - self.num_task_retries = kwargs.get('num_task_retries', None) - self.wait_time = kwargs.get('wait_time', None) - - -class JobScheduleTerminateOptions(Model): - """Additional parameters for terminate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleTerminateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleUpdateOptions(Model): - """Additional parameters for update operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobScheduleUpdateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobScheduleUpdateParameter(Model): - """The set of changes to be made to a Job Schedule. - - All required parameters must be populated in order to send to Azure. - - :param schedule: Required. The schedule according to which Jobs will be - created. All times are fixed respective to UTC and are not impacted by - daylight saving time. If you do not specify this element, it is equivalent - to passing the default schedule: that is, a single Job scheduled to run - immediately. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: Required. Details of the Jobs to be created on - this schedule. Updates affect only Jobs that are started after the update - has taken place. Any currently active Job continues with the older - specification. - :type job_specification: ~azure.batch.models.JobSpecification - :param metadata: If you do not specify this element, it takes the default - value of an empty list; in effect, any existing metadata is deleted. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _validation = { - 'schedule': {'required': True}, - 'job_specification': {'required': True}, - } - - _attribute_map = { - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, **kwargs): - super(JobScheduleUpdateParameter, self).__init__(**kwargs) - self.schedule = kwargs.get('schedule', None) - self.job_specification = kwargs.get('job_specification', None) - self.metadata = kwargs.get('metadata', None) - - -class JobSchedulingError(Model): - """An error encountered by the Batch service when scheduling a Job. + :ivar category: The category of the Task error. Required. Known values are: "usererror" and + "servererror". + :vartype category: str or ~azure.batch.models.ErrorCategory + :ivar code: An identifier for the Task error. Codes are invariant and are intended to be + consumed programmatically. + :vartype code: str + :ivar message: A message describing the Task error, intended to be suitable for display in a + user interface. + :vartype message: str + :ivar details: A list of additional details related to the error. + :vartype details: list[~azure.batch.models.NameValuePair] + """ + + category: Union[str, "_models.ErrorCategory"] = rest_field() + """The category of the Task error. Required. Known values are: \"usererror\" and \"servererror\".""" + code: Optional[str] = rest_field() + """An identifier for the Task error. Codes are invariant and are intended to be consumed + programmatically.""" + message: Optional[str] = rest_field() + """A message describing the Task error, intended to be suitable for display in a user interface.""" + details: Optional[List["_models.NameValuePair"]] = rest_field() + """A list of additional details related to the error.""" + + @overload + def __init__( + self, + *, + category: Union[str, "_models.ErrorCategory"], + code: Optional[str] = None, + message: Optional[str] = None, + details: Optional[List["_models.NameValuePair"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskGroup(_model_base.Model): + """A collection of Azure Batch Tasks to add. - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to server. - :param category: Required. The category of the Job scheduling error. - Possible values include: 'userError', 'serverError' - :type category: str or ~azure.batch.models.ErrorCategory - :param code: - :type code: str - :param message: - :type message: str - :param details: - :type details: list[~azure.batch.models.NameValuePair] + :ivar value: The collection of Tasks to add. The maximum count of Tasks is 100. The total + serialized size of this collection must be less than 1MB. If it is greater than 1MB (for + example if each Task has 100's of resource files or environment variables), the request will + fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. Required. + :vartype value: list[~azure.batch.models.BatchTaskCreateContent] """ - _validation = { - 'category': {'required': True}, - } + value: List["_models.BatchTaskCreateContent"] = rest_field() + """The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of + this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has + 100's of resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer Tasks. Required.""" - _attribute_map = { - 'category': {'key': 'category', 'type': 'ErrorCategory'}, - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[NameValuePair]'}, - } + @overload + def __init__( + self, + *, + value: List["_models.BatchTaskCreateContent"], + ): ... - def __init__(self, **kwargs): - super(JobSchedulingError, self).__init__(**kwargs) - self.category = kwargs.get('category', None) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.details = kwargs.get('details', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class JobSpecification(Model): - """Specifies details of the Jobs to be created on a schedule. - All required parameters must be populated in order to send to Azure. - - :param priority: The priority of Jobs created under this schedule. - Priority values can range from -1000 to 1000, with -1000 being the lowest - priority and 1000 being the highest priority. The default value is 0. This - priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - :type priority: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param display_name: The name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - :type display_name: str - :param uses_task_dependencies: Whether Tasks in the Job can define - dependencies on each other. The default is false. - :type uses_task_dependencies: bool - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in a Job created under this schedule are in the completed - state. Note that if a Job contains no Tasks, then all Tasks are considered - complete. This option is therefore most commonly used with a Job Manager - task; if you want to use automatic Job termination without a Job Manager, - you should initially set onAllTasksComplete to noaction and update the Job - properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Possible values include: - 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param on_task_failure: The action the Batch service should take when any - Task fails in a Job created under this schedule. A Task is considered to - have failed if it have failed if has a failureInfo. A failureInfo is set - if the Task completes with a non-zero exit code after exhausting its retry - count, or if there was an error starting the Task, for example due to a - resource file download error. The default is noaction. Possible values - include: 'noAction', 'performExitOptionsJobAction' - :type on_task_failure: str or ~azure.batch.models.OnTaskFailure - :param network_configuration: The network configuration for the Job. - :type network_configuration: ~azure.batch.models.JobNetworkConfiguration - :param constraints: The execution constraints for Jobs created under this - schedule. - :type constraints: ~azure.batch.models.JobConstraints - :param job_manager_task: The details of a Job Manager Task to be launched - when a Job is started under this schedule. If the Job does not specify a - Job Manager Task, the user must explicitly add Tasks to the Job using the - Task API. If the Job does specify a Job Manager Task, the Batch service - creates the Job Manager Task when the Job is created, and will try to - schedule the Job Manager Task before scheduling other Tasks in the Job. - :type job_manager_task: ~azure.batch.models.JobManagerTask - :param job_preparation_task: The Job Preparation Task for Jobs created - under this schedule. If a Job has a Job Preparation Task, the Batch - service will run the Job Preparation Task on a Node before starting any - Tasks of that Job on that Compute Node. - :type job_preparation_task: ~azure.batch.models.JobPreparationTask - :param job_release_task: The Job Release Task for Jobs created under this - schedule. The primary purpose of the Job Release Task is to undo changes - to Nodes made by the Job Preparation Task. Example activities include - deleting local files, or shutting down services that were started as part - of Job preparation. A Job Release Task cannot be specified without also - specifying a Job Preparation Task for the Job. The Batch service runs the - Job Release Task on the Compute Nodes that have run the Job Preparation - Task. - :type job_release_task: ~azure.batch.models.JobReleaseTask - :param common_environment_settings: Individual Tasks can override an - environment setting specified here by specifying the same setting name - with a different value. - :type common_environment_settings: - list[~azure.batch.models.EnvironmentSetting] - :param pool_info: Required. The Pool on which the Batch service runs the - Tasks of Jobs created under this schedule. - :type pool_info: ~azure.batch.models.PoolInformation - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _validation = { - 'pool_info': {'required': True}, - } - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, - 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, - 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, - 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, **kwargs): - super(JobSpecification, self).__init__(**kwargs) - self.priority = kwargs.get('priority', None) - self.allow_task_preemption = kwargs.get('allow_task_preemption', None) - self.max_parallel_tasks = kwargs.get('max_parallel_tasks', -1) - self.display_name = kwargs.get('display_name', None) - self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) - self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) - self.on_task_failure = kwargs.get('on_task_failure', None) - self.network_configuration = kwargs.get('network_configuration', None) - self.constraints = kwargs.get('constraints', None) - self.job_manager_task = kwargs.get('job_manager_task', None) - self.job_preparation_task = kwargs.get('job_preparation_task', None) - self.job_release_task = kwargs.get('job_release_task', None) - self.common_environment_settings = kwargs.get('common_environment_settings', None) - self.pool_info = kwargs.get('pool_info', None) - self.metadata = kwargs.get('metadata', None) - - -class JobStatistics(Model): - """Resource usage statistics for a Job. +class BatchTaskIdRange(_model_base.Model): + """The start and end of the range are inclusive. For example, if a range has start + 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param user_cpu_time: Required. - :type user_cpu_time: timedelta - :param kernel_cpu_time: Required. - :type kernel_cpu_time: timedelta - :param wall_clock_time: Required. The wall clock time is the elapsed time - from when the Task started running on a Compute Node to when it finished - (or to the last time the statistics were updated, if the Task had not - finished by then). If a Task was retried, this includes the wall clock - time of all the Task retries. - :type wall_clock_time: timedelta - :param read_iops: Required. The total number of disk read operations made - by all Tasks in the Job. - :type read_iops: long - :param write_iops: Required. The total number of disk write operations - made by all Tasks in the Job. - :type write_iops: long - :param read_io_gi_b: Required. The total amount of data in GiB read from - disk by all Tasks in the Job. - :type read_io_gi_b: float - :param write_io_gi_b: Required. The total amount of data in GiB written to - disk by all Tasks in the Job. - :type write_io_gi_b: float - :param num_succeeded_tasks: Required. The total number of Tasks - successfully completed in the Job during the given time range. A Task - completes successfully if it returns exit code 0. - :type num_succeeded_tasks: long - :param num_failed_tasks: Required. The total number of Tasks in the Job - that failed during the given time range. A Task fails if it exhausts its - maximum retry count without returning exit code 0. - :type num_failed_tasks: long - :param num_task_retries: Required. The total number of retries on all the - Tasks in the Job during the given time range. - :type num_task_retries: long - :param wait_time: Required. The wait time for a Task is defined as the - elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the - time to the most recent Task execution.) This value is only reported in - the Account lifetime statistics; it is not included in the Job statistics. - :type wait_time: timedelta - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'user_cpu_time': {'required': True}, - 'kernel_cpu_time': {'required': True}, - 'wall_clock_time': {'required': True}, - 'read_iops': {'required': True}, - 'write_iops': {'required': True}, - 'read_io_gi_b': {'required': True}, - 'write_io_gi_b': {'required': True}, - 'num_succeeded_tasks': {'required': True}, - 'num_failed_tasks': {'required': True}, - 'num_task_retries': {'required': True}, - 'wait_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, - 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, - 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, - 'read_iops': {'key': 'readIOps', 'type': 'long'}, - 'write_iops': {'key': 'writeIOps', 'type': 'long'}, - 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, - 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, - 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, - 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, - 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, - 'wait_time': {'key': 'waitTime', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(JobStatistics, self).__init__(**kwargs) - self.url = kwargs.get('url', None) - self.start_time = kwargs.get('start_time', None) - self.last_update_time = kwargs.get('last_update_time', None) - self.user_cpu_time = kwargs.get('user_cpu_time', None) - self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) - self.wall_clock_time = kwargs.get('wall_clock_time', None) - self.read_iops = kwargs.get('read_iops', None) - self.write_iops = kwargs.get('write_iops', None) - self.read_io_gi_b = kwargs.get('read_io_gi_b', None) - self.write_io_gi_b = kwargs.get('write_io_gi_b', None) - self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) - self.num_failed_tasks = kwargs.get('num_failed_tasks', None) - self.num_task_retries = kwargs.get('num_task_retries', None) - self.wait_time = kwargs.get('wait_time', None) - - -class JobTerminateOptions(Model): - """Additional parameters for terminate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobTerminateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobTerminateParameter(Model): - """Options when terminating a Job. - - :param terminate_reason: - :type terminate_reason: str - """ - - _attribute_map = { - 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobTerminateParameter, self).__init__(**kwargs) - self.terminate_reason = kwargs.get('terminate_reason', None) - - -class JobUpdateOptions(Model): - """Additional parameters for update operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(JobUpdateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class JobUpdateParameter(Model): - """The set of changes to be made to a Job. - - All required parameters must be populated in order to send to Azure. - - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. If omitted, it is set to the default value 0. - :type priority: int - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param constraints: The execution constraints for the Job. If omitted, the - constraints are cleared. - :type constraints: ~azure.batch.models.JobConstraints - :param pool_info: Required. The Pool on which the Batch service runs the - Job's Tasks. You may change the Pool for a Job only when the Job is - disabled. The Update Job call will fail if you include the poolInfo - element and the Job is not disabled. If you specify an - autoPoolSpecification in the poolInfo, only the keepAlive property of the - autoPoolSpecification can be updated, and then only if the - autoPoolSpecification has a poolLifetimeOption of Job (other job - properties can be updated as normal). - :type pool_info: ~azure.batch.models.PoolInformation - :param metadata: If omitted, it takes the default value of an empty list; - in effect, any existing metadata is deleted. - :type metadata: list[~azure.batch.models.MetadataItem] - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. If omitted, the - completion behavior is set to noaction. If the current value is - terminatejob, this is an error because a Job's completion behavior may not - be changed from terminatejob to noaction. You may not change the value - from terminatejob to noaction - that is, once you have engaged automatic - Job termination, you cannot turn it off again. If you try to do this, the - request fails and Batch returns status code 400 (Bad Request) and an - 'invalid property value' error response. If you do not specify this - element in a PUT request, it is equivalent to passing noaction. This is an - error if the current value is terminatejob. Possible values include: - 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - """ - - _validation = { - 'pool_info': {'required': True}, - } - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - } - - def __init__(self, **kwargs): - super(JobUpdateParameter, self).__init__(**kwargs) - self.priority = kwargs.get('priority', None) - self.max_parallel_tasks = kwargs.get('max_parallel_tasks', -1) - self.allow_task_preemption = kwargs.get('allow_task_preemption', None) - self.constraints = kwargs.get('constraints', None) - self.pool_info = kwargs.get('pool_info', None) - self.metadata = kwargs.get('metadata', None) - self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) - - -class LinuxUserConfiguration(Model): - """Properties used to create a user Account on a Linux Compute Node. - :param uid: The user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not specified the underlying - operating system picks the uid. - :type uid: int - :param gid: The group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not specified the underlying - operating system picks the gid. - :type gid: int - :param ssh_private_key: The private key must not be password protected. - The private key is used to automatically configure asymmetric-key based - authentication for SSH between Compute Nodes in a Linux Pool when the - Pool's enableInterNodeCommunication property is true (it is ignored if - enableInterNodeCommunication is false). It does this by placing the key - pair into the user's .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no modification of the user's - .ssh directory is done). - :type ssh_private_key: str + :ivar start: The first Task ID in the range. Required. + :vartype start: int + :ivar end: The last Task ID in the range. Required. + :vartype end: int """ - _attribute_map = { - 'uid': {'key': 'uid', 'type': 'int'}, - 'gid': {'key': 'gid', 'type': 'int'}, - 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, - } + start: int = rest_field() + """The first Task ID in the range. Required.""" + end: int = rest_field() + """The last Task ID in the range. Required.""" - def __init__(self, **kwargs): - super(LinuxUserConfiguration, self).__init__(**kwargs) - self.uid = kwargs.get('uid', None) - self.gid = kwargs.get('gid', None) - self.ssh_private_key = kwargs.get('ssh_private_key', None) + @overload + def __init__( + self, + *, + start: int, + end: int, + ): ... + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ -class ManagedDisk(Model): - """ManagedDisk. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - :param storage_account_type: The storage account type for managed disk. - Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS' - :type storage_account_type: str or ~azure.batch.models.StorageAccountType - """ - _attribute_map = { - 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, - } +class BatchTaskInfo(_model_base.Model): + """Information about a Task running on a Compute Node. - def __init__(self, **kwargs): - super(ManagedDisk, self).__init__(**kwargs) - self.storage_account_type = kwargs.get('storage_account_type', None) + :ivar task_url: The URL of the Task. + :vartype task_url: str + :ivar job_id: The ID of the Job to which the Task belongs. + :vartype job_id: str + :ivar task_id: The ID of the Task. + :vartype task_id: str + :ivar subtask_id: The ID of the subtask if the Task is a multi-instance Task. + :vartype subtask_id: int + :ivar task_state: The current state of the Task. Required. Known values are: "active", + "preparing", "running", and "completed". + :vartype task_state: str or ~azure.batch.models.BatchTaskState + :ivar execution_info: Information about the execution of the Task. + :vartype execution_info: ~azure.batch.models.BatchTaskExecutionInfo + """ + + task_url: Optional[str] = rest_field(name="taskUrl") + """The URL of the Task.""" + job_id: Optional[str] = rest_field(name="jobId") + """The ID of the Job to which the Task belongs.""" + task_id: Optional[str] = rest_field(name="taskId") + """The ID of the Task.""" + subtask_id: Optional[int] = rest_field(name="subtaskId") + """The ID of the subtask if the Task is a multi-instance Task.""" + task_state: Union[str, "_models.BatchTaskState"] = rest_field(name="taskState") + """The current state of the Task. Required. Known values are: \"active\", \"preparing\", + \"running\", and \"completed\".""" + execution_info: Optional["_models.BatchTaskExecutionInfo"] = rest_field(name="executionInfo") + """Information about the execution of the Task.""" + + @overload + def __init__( + self, + *, + task_state: Union[str, "_models.BatchTaskState"], + task_url: Optional[str] = None, + job_id: Optional[str] = None, + task_id: Optional[str] = None, + subtask_id: Optional[int] = None, + execution_info: Optional["_models.BatchTaskExecutionInfo"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskSchedulingPolicy(_model_base.Model): + """Specifies how Tasks should be distributed across Compute Nodes. -class MetadataItem(Model): - """A name-value pair associated with a Batch service resource. - The Batch service does not assign any meaning to this metadata; it is - solely for the use of user code. + :ivar node_fill_type: How Tasks are distributed across Compute Nodes in a Pool. If not + specified, the default is spread. Required. Known values are: "spread" and "pack". + :vartype node_fill_type: str or ~azure.batch.models.BatchNodeFillType + """ - All required parameters must be populated in order to send to Azure. + node_fill_type: Union[str, "_models.BatchNodeFillType"] = rest_field(name="nodeFillType") + """How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is + spread. Required. Known values are: \"spread\" and \"pack\".""" - :param name: Required. - :type name: str - :param value: Required. - :type value: str - """ + @overload + def __init__( + self, + *, + node_fill_type: Union[str, "_models.BatchNodeFillType"], + ): ... - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - def __init__(self, **kwargs): - super(MetadataItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.value = kwargs.get('value', None) +class BatchTaskSlotCounts(_model_base.Model): + """The TaskSlot counts for a Job. -class MountConfiguration(Model): - """The file system to mount on each node. - :param azure_blob_file_system_configuration: The Azure Storage Container - to mount using blob FUSE on each node. This property is mutually exclusive - with all other properties. - :type azure_blob_file_system_configuration: - ~azure.batch.models.AzureBlobFileSystemConfiguration - :param nfs_mount_configuration: The NFS file system to mount on each node. - This property is mutually exclusive with all other properties. - :type nfs_mount_configuration: ~azure.batch.models.NFSMountConfiguration - :param cifs_mount_configuration: The CIFS/SMB file system to mount on each - node. This property is mutually exclusive with all other properties. - :type cifs_mount_configuration: ~azure.batch.models.CIFSMountConfiguration - :param azure_file_share_configuration: The Azure File Share to mount on - each node. This property is mutually exclusive with all other properties. - :type azure_file_share_configuration: - ~azure.batch.models.AzureFileShareConfiguration - """ - - _attribute_map = { - 'azure_blob_file_system_configuration': {'key': 'azureBlobFileSystemConfiguration', 'type': 'AzureBlobFileSystemConfiguration'}, - 'nfs_mount_configuration': {'key': 'nfsMountConfiguration', 'type': 'NFSMountConfiguration'}, - 'cifs_mount_configuration': {'key': 'cifsMountConfiguration', 'type': 'CIFSMountConfiguration'}, - 'azure_file_share_configuration': {'key': 'azureFileShareConfiguration', 'type': 'AzureFileShareConfiguration'}, - } - - def __init__(self, **kwargs): - super(MountConfiguration, self).__init__(**kwargs) - self.azure_blob_file_system_configuration = kwargs.get('azure_blob_file_system_configuration', None) - self.nfs_mount_configuration = kwargs.get('nfs_mount_configuration', None) - self.cifs_mount_configuration = kwargs.get('cifs_mount_configuration', None) - self.azure_file_share_configuration = kwargs.get('azure_file_share_configuration', None) - - -class MultiInstanceSettings(Model): - """Settings which specify how to run a multi-instance Task. - - Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI - case, if any of the subtasks fail (for example due to exiting with a - non-zero exit code) the entire multi-instance Task fails. The - multi-instance Task is then terminated and retried, up to its retry limit. - - All required parameters must be populated in order to send to Azure. - - :param number_of_instances: The number of Compute Nodes required by the - Task. If omitted, the default is 1. - :type number_of_instances: int - :param coordination_command_line: Required. A typical coordination command - line launches a background service and verifies that the service is ready - to process inter-node messages. - :type coordination_command_line: str - :param common_resource_files: The difference between common resource files - and Task resource files is that common resource files are downloaded for - all subtasks including the primary, whereas Task resource files are - downloaded only for the primary. Also note that these resource files are - not downloaded to the Task working directory, but instead are downloaded - to the Task root directory (one directory above the working directory). - There is a maximum size for the list of resource files. When the max size - is exceeded, the request will fail and the response error code will be - RequestEntityTooLarge. If this occurs, the collection of ResourceFiles - must be reduced in size. This can be achieved using .zip files, - Application Packages, or Docker Containers. - :type common_resource_files: list[~azure.batch.models.ResourceFile] - """ - - _validation = { - 'coordination_command_line': {'required': True}, - } - - _attribute_map = { - 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, - 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, - 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, - } - - def __init__(self, **kwargs): - super(MultiInstanceSettings, self).__init__(**kwargs) - self.number_of_instances = kwargs.get('number_of_instances', None) - self.coordination_command_line = kwargs.get('coordination_command_line', None) - self.common_resource_files = kwargs.get('common_resource_files', None) - - -class NameValuePair(Model): - """Represents a name-value pair. + :ivar active: The number of TaskSlots for active Tasks. Required. + :vartype active: int + :ivar running: The number of TaskSlots for running Tasks. Required. + :vartype running: int + :ivar completed: The number of TaskSlots for completed Tasks. Required. + :vartype completed: int + :ivar succeeded: The number of TaskSlots for succeeded Tasks. Required. + :vartype succeeded: int + :ivar failed: The number of TaskSlots for failed Tasks. Required. + :vartype failed: int + """ + + active: int = rest_field() + """The number of TaskSlots for active Tasks. Required.""" + running: int = rest_field() + """The number of TaskSlots for running Tasks. Required.""" + completed: int = rest_field() + """The number of TaskSlots for completed Tasks. Required.""" + succeeded: int = rest_field() + """The number of TaskSlots for succeeded Tasks. Required.""" + failed: int = rest_field() + """The number of TaskSlots for failed Tasks. Required.""" + + @overload + def __init__( + self, + *, + active: int, + running: int, + completed: int, + succeeded: int, + failed: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BatchTaskStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Resource usage statistics for a Task. - :param name: - :type name: str - :param value: - :type value: str - """ - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } + :ivar url: The URL of the statistics. Required. + :vartype url: str + :ivar start_time: The start time of the time range covered by the statistics. Required. + :vartype start_time: ~datetime.datetime + :ivar last_update_time: The time at which the statistics were last updated. All statistics are + limited to the range between startTime and lastUpdateTime. Required. + :vartype last_update_time: ~datetime.datetime + :ivar user_cpu_time: The total user mode CPU time (summed across all cores and all Compute + Nodes) consumed by the Task. Required. + :vartype user_cpu_time: ~datetime.timedelta + :ivar kernel_cpu_time: The total kernel mode CPU time (summed across all cores and all Compute + Nodes) consumed by the Task. Required. + :vartype kernel_cpu_time: ~datetime.timedelta + :ivar wall_clock_time: The total wall clock time of the Task. The wall clock time is the + elapsed time from when the Task started running on a Compute Node to when it finished (or to + the last time the statistics were updated, if the Task had not finished by then). If the Task + was retried, this includes the wall clock time of all the Task retries. Required. + :vartype wall_clock_time: ~datetime.timedelta + :ivar read_i_ops: The total number of disk read operations made by the Task. Required. + :vartype read_i_ops: int + :ivar write_i_ops: The total number of disk write operations made by the Task. Required. + :vartype write_i_ops: int + :ivar read_i_o_gi_b: The total gibibytes read from disk by the Task. Required. + :vartype read_i_o_gi_b: float + :ivar write_i_o_gi_b: The total gibibytes written to disk by the Task. Required. + :vartype write_i_o_gi_b: float + :ivar wait_time: The total wait time of the Task. The wait time for a Task is defined as the + elapsed time between the creation of the Task and the start of Task execution. (If the Task is + retried due to failures, the wait time is the time to the most recent Task execution.). + Required. + :vartype wait_time: ~datetime.timedelta + """ + + url: str = rest_field() + """The URL of the statistics. Required.""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start time of the time range covered by the statistics. Required.""" + last_update_time: datetime.datetime = rest_field(name="lastUpdateTime", format="rfc3339") + """The time at which the statistics were last updated. All statistics are limited to the range + between startTime and lastUpdateTime. Required.""" + user_cpu_time: datetime.timedelta = rest_field(name="userCPUTime") + """The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the + Task. Required.""" + kernel_cpu_time: datetime.timedelta = rest_field(name="kernelCPUTime") + """The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the + Task. Required.""" + wall_clock_time: datetime.timedelta = rest_field(name="wallClockTime") + """The total wall clock time of the Task. The wall clock time is the elapsed time from when the + Task started running on a Compute Node to when it finished (or to the last time the statistics + were updated, if the Task had not finished by then). If the Task was retried, this includes the + wall clock time of all the Task retries. Required.""" + read_i_ops: int = rest_field(name="readIOps") + """The total number of disk read operations made by the Task. Required.""" + write_i_ops: int = rest_field(name="writeIOps") + """The total number of disk write operations made by the Task. Required.""" + read_i_o_gi_b: float = rest_field(name="readIOGiB") + """The total gibibytes read from disk by the Task. Required.""" + write_i_o_gi_b: float = rest_field(name="writeIOGiB") + """The total gibibytes written to disk by the Task. Required.""" + wait_time: datetime.timedelta = rest_field(name="waitTime") + """The total wait time of the Task. The wait time for a Task is defined as the elapsed time + between the creation of the Task and the start of Task execution. (If the Task is retried due + to failures, the wait time is the time to the most recent Task execution.). Required.""" + + @overload + def __init__( + self, + *, + url: str, + start_time: datetime.datetime, + last_update_time: datetime.datetime, + user_cpu_time: datetime.timedelta, + kernel_cpu_time: datetime.timedelta, + wall_clock_time: datetime.timedelta, + read_i_ops: int, + write_i_ops: int, + read_i_o_gi_b: float, + write_i_o_gi_b: float, + wait_time: datetime.timedelta, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CifsMountConfiguration(_model_base.Model): + """Information used to connect to a CIFS file system. - def __init__(self, **kwargs): - super(NameValuePair, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.value = kwargs.get('value', None) + :ivar username: The user to use for authentication against the CIFS file system. Required. + :vartype username: str + :ivar source: The URI of the file system to mount. Required. + :vartype source: str + :ivar relative_mount_path: The relative path on the compute node where the file system will be + mounted. All file systems are mounted relative to the Batch mounts directory, accessible via + the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. + :vartype relative_mount_path: str + :ivar mount_options: Additional command line options to pass to the mount command. These are + 'net use' options in Windows and 'mount' options in Linux. + :vartype mount_options: str + :ivar password: The password to use for authentication against the CIFS file system. Required. + :vartype password: str + """ + + username: str = rest_field() + """The user to use for authentication against the CIFS file system. Required.""" + source: str = rest_field() + """The URI of the file system to mount. Required.""" + relative_mount_path: str = rest_field(name="relativeMountPath") + """The relative path on the compute node where the file system will be mounted. All file systems + are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR + environment variable. Required.""" + mount_options: Optional[str] = rest_field(name="mountOptions") + """Additional command line options to pass to the mount command. These are 'net use' options in + Windows and 'mount' options in Linux.""" + password: str = rest_field() + """The password to use for authentication against the CIFS file system. Required.""" + + @overload + def __init__( + self, + *, + username: str, + source: str, + relative_mount_path: str, + password: str, + mount_options: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ContainerConfiguration(_model_base.Model): + """The configuration for container-enabled Pools. -class NetworkConfiguration(Model): - """The network configuration for a Pool. - :param subnet_id: The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute Nodes in the - Pool. If the subnet doesn't have enough free IP addresses, the Pool will - partially allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual - Machine Contributor' Role-Based Access Control (RBAC) role for the - specified VNet. The specified subnet must allow communication from the - Azure Batch service to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the specified - subnet is denied by an NSG, then the Batch service will set the state of - the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported, but for Pools created - with cloudServiceConfiguration both ARM and classic virtual networks are - supported. If the specified VNet has any associated Network Security - Groups (NSG), then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine configuration, - enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 - for Windows. For Pools created with a cloud service configuration, enable - ports 10100, 20100, and 30100. Also enable outbound connections to Azure - Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - :type subnet_id: str - :param dynamic_vnet_assignment_scope: Possible values include: 'none', - 'job' - :type dynamic_vnet_assignment_scope: str or - ~azure.batch.models.DynamicVNetAssignmentScope - :param endpoint_configuration: The configuration for endpoints on Compute - Nodes in the Batch Pool. Pool endpoint configuration is only supported on - Pools with the virtualMachineConfiguration property. - :type endpoint_configuration: - ~azure.batch.models.PoolEndpointConfiguration - :param public_ip_address_configuration: The Public IPAddress configuration - for Compute Nodes in the Batch Pool. Public IP configuration property is - only supported on Pools with the virtualMachineConfiguration property. - :type public_ip_address_configuration: - ~azure.batch.models.PublicIPAddressConfiguration - :param enable_accelerated_networking: Whether this pool should enable - accelerated networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - :type enable_accelerated_networking: bool + :ivar type: The container technology to be used. Required. Known values are: "dockerCompatible" + and "criCompatible". + :vartype type: str or ~azure.batch.models.ContainerType + :ivar container_image_names: The collection of container Image names. This is the full Image + reference, as would be specified to "docker pull". An Image will be sourced from the default + Docker registry unless the Image is fully qualified with an alternative registry. + :vartype container_image_names: list[str] + :ivar container_registries: Additional private registries from which containers can be pulled. + If any Images must be downloaded from a private registry which requires credentials, then those + credentials must be provided here. + :vartype container_registries: list[~azure.batch.models.ContainerRegistryReference] + """ + + type: Union[str, "_models.ContainerType"] = rest_field() + """The container technology to be used. Required. Known values are: \"dockerCompatible\" and + \"criCompatible\".""" + container_image_names: Optional[List[str]] = rest_field(name="containerImageNames") + """The collection of container Image names. This is the full Image reference, as would be + specified to \"docker pull\". An Image will be sourced from the default Docker registry unless + the Image is fully qualified with an alternative registry.""" + container_registries: Optional[List["_models.ContainerRegistryReference"]] = rest_field(name="containerRegistries") + """Additional private registries from which containers can be pulled. If any Images must be + downloaded from a private registry which requires credentials, then those credentials must be + provided here.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.ContainerType"], + container_image_names: Optional[List[str]] = None, + container_registries: Optional[List["_models.ContainerRegistryReference"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ContainerRegistryReference(_model_base.Model): + """A private container registry. + + :ivar username: The user name to log into the registry server. + :vartype username: str + :ivar password: The password to log into the registry server. + :vartype password: str + :ivar registry_server: The registry URL. If omitted, the default is "docker.io". + :vartype registry_server: str + :ivar identity_reference: The reference to the user assigned identity to use to access an Azure + Container Registry instead of username and password. + :vartype identity_reference: ~azure.batch.models.BatchNodeIdentityReference + """ + + username: Optional[str] = rest_field() + """The user name to log into the registry server.""" + password: Optional[str] = rest_field() + """The password to log into the registry server.""" + registry_server: Optional[str] = rest_field(name="registryServer") + """The registry URL. If omitted, the default is \"docker.io\".""" + identity_reference: Optional["_models.BatchNodeIdentityReference"] = rest_field(name="identityReference") + """The reference to the user assigned identity to use to access an Azure Container Registry + instead of username and password.""" + + @overload + def __init__( + self, + *, + username: Optional[str] = None, + password: Optional[str] = None, + registry_server: Optional[str] = None, + identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class DataDisk(_model_base.Model): + """Settings which will be used by the data disks associated to Compute Nodes in + the Pool. When using attached data disks, you need to mount and format the + disks from within a VM to use them. + + + :ivar logical_unit_number: The logical unit number. The logicalUnitNumber is used to uniquely + identify each data disk. If attaching multiple disks, each should have a distinct + logicalUnitNumber. The value must be between 0 and 63, inclusive. Required. + :vartype logical_unit_number: int + :ivar caching: The type of caching to be enabled for the data disks. The default value for + caching is readwrite. For information about the caching options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Known values are: "none", "readonly", and "readwrite". + :vartype caching: str or ~azure.batch.models.CachingType + :ivar disk_size_gb: The initial disk size in gigabytes. Required. + :vartype disk_size_gb: int + :ivar storage_account_type: The storage Account type to be used for the data disk. If omitted, + the default is "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and + "standardssd_lrs". + :vartype storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + logical_unit_number: int = rest_field(name="lun") + """The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If + attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be + between 0 and 63, inclusive. Required.""" + caching: Optional[Union[str, "_models.CachingType"]] = rest_field() + """The type of caching to be enabled for the data disks. The default value for caching is + readwrite. For information about the caching options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Known values are: \"none\", \"readonly\", and \"readwrite\".""" + disk_size_gb: int = rest_field(name="diskSizeGB") + """The initial disk size in gigabytes. Required.""" + storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = rest_field(name="storageAccountType") + """The storage Account type to be used for the data disk. If omitted, the default is + \"standard_lrs\". Known values are: \"standard_lrs\", \"premium_lrs\", and \"standardssd_lrs\".""" + + @overload + def __init__( + self, + *, + logical_unit_number: int, + disk_size_gb: int, + caching: Optional[Union[str, "_models.CachingType"]] = None, + storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class DiffDiskSettings(_model_base.Model): + """Specifies the ephemeral Disk Settings for the operating system disk used by the + compute node (VM). + + :ivar placement: Specifies the ephemeral disk placement for operating system disk for all VMs + in the pool. This property can be used by user in the request to choose the location e.g., + cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk + size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + and Linux VMs at + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + "cachedisk" + :vartype placement: str or ~azure.batch.models.DiffDiskPlacement """ - _attribute_map = { - 'subnet_id': {'key': 'subnetId', 'type': 'str'}, - 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, - 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, - 'public_ip_address_configuration': {'key': 'publicIPAddressConfiguration', 'type': 'PublicIPAddressConfiguration'}, - 'enable_accelerated_networking': {'key': 'enableAcceleratedNetworking', 'type': 'bool'}, - } + placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = rest_field() + """Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This + property can be used by user in the request to choose the location e.g., cache disk space for + Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, + please refer to Ephemeral OS disk size requirements for Windows VMs at + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + and Linux VMs at + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + \"cachedisk\"""" - def __init__(self, **kwargs): - super(NetworkConfiguration, self).__init__(**kwargs) - self.subnet_id = kwargs.get('subnet_id', None) - self.dynamic_vnet_assignment_scope = kwargs.get('dynamic_vnet_assignment_scope', None) - self.endpoint_configuration = kwargs.get('endpoint_configuration', None) - self.public_ip_address_configuration = kwargs.get('public_ip_address_configuration', None) - self.enable_accelerated_networking = kwargs.get('enable_accelerated_networking', None) + @overload + def __init__( + self, + *, + placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = None, + ): ... + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ -class NetworkSecurityGroupRule(Model): - """A network security group rule to apply to an inbound endpoint. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - All required parameters must be populated in order to send to Azure. - - :param priority: Required. The priority for this rule. Priorities within a - Pool must be unique and are evaluated in order of priority. The lower the - number the higher the priority. For example, rules could be specified with - order numbers of 150, 250, and 350. The rule with the order number of 150 - takes precedence over the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved or duplicate values are - provided the request fails with HTTP status code 400. - :type priority: int - :param access: Required. Possible values include: 'allow', 'deny' - :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess - :param source_address_prefix: Required. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, - or * (for all addresses). If any other values are provided the request - fails with HTTP status code 400. - :type source_address_prefix: str - :param source_port_ranges: Valid values are '*' (for all ports 0 - 65535), - a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must - be in the range of 0 to 65535. Each entry in this collection must not - overlap any other entry (either a range or an individual port). If any - other values are provided the request fails with HTTP status code 400. The - default value is '*'. - :type source_port_ranges: list[str] - """ - - _validation = { - 'priority': {'required': True}, - 'access': {'required': True}, - 'source_address_prefix': {'required': True}, - } - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, - 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, - 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(NetworkSecurityGroupRule, self).__init__(**kwargs) - self.priority = kwargs.get('priority', None) - self.access = kwargs.get('access', None) - self.source_address_prefix = kwargs.get('source_address_prefix', None) - self.source_port_ranges = kwargs.get('source_port_ranges', None) - - -class NFSMountConfiguration(Model): - """Information used to connect to an NFS file system. - All required parameters must be populated in order to send to Azure. +class DiskEncryptionConfiguration(_model_base.Model): + """The disk encryption configuration applied on compute nodes in the pool. + Disk encryption configuration is not supported on Linux pool created with + Azure Compute Gallery Image. - :param source: Required. - :type source: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param mount_options: These are 'net use' options in Windows and 'mount' - options in Linux. - :type mount_options: str + :ivar targets: The list of disk targets Batch Service will encrypt on the compute node. If + omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only + "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. + :vartype targets: list[str or ~azure.batch.models.DiskEncryptionTarget] """ - _validation = { - 'source': {'required': True}, - 'relative_mount_path': {'required': True}, - } + targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = rest_field() + """The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks + on the compute nodes in the pool will be encrypted. On Linux pool, only \"TemporaryDisk\" is + supported; on Windows pool, \"OsDisk\" and \"TemporaryDisk\" must be specified.""" - _attribute_map = { - 'source': {'key': 'source', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'mount_options': {'key': 'mountOptions', 'type': 'str'}, - } + @overload + def __init__( + self, + *, + targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = None, + ): ... - def __init__(self, **kwargs): - super(NFSMountConfiguration, self).__init__(**kwargs) - self.source = kwargs.get('source', None) - self.relative_mount_path = kwargs.get('relative_mount_path', None) - self.mount_options = kwargs.get('mount_options', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class NodeAgentInformation(Model): - """Information about the Compute Node agent. - The Batch Compute Node agent is a program that runs on each Compute Node in - the Pool and provides Batch capability on the Compute Node. +class EnvironmentSetting(_model_base.Model): + """An environment variable to be set on a Task process. - All required parameters must be populated in order to send to Azure. - :param version: Required. This version number can be checked against the - Compute Node agent release notes located at - https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - :type version: str - :param last_update_time: Required. This is the most recent time that the - Compute Node agent was updated to a new version. - :type last_update_time: datetime + :ivar name: The name of the environment variable. Required. + :vartype name: str + :ivar value: The value of the environment variable. + :vartype value: str """ - _validation = { - 'version': {'required': True}, - 'last_update_time': {'required': True}, - } + name: str = rest_field() + """The name of the environment variable. Required.""" + value: Optional[str] = rest_field() + """The value of the environment variable.""" - _attribute_map = { - 'version': {'key': 'version', 'type': 'str'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - } + @overload + def __init__( + self, + *, + name: str, + value: Optional[str] = None, + ): ... - def __init__(self, **kwargs): - super(NodeAgentInformation, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.last_update_time = kwargs.get('last_update_time', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class NodeCounts(Model): - """The number of Compute Nodes in each Compute Node state. - All required parameters must be populated in order to send to Azure. - - :param creating: Required. The number of Compute Nodes in the creating - state. - :type creating: int - :param idle: Required. The number of Compute Nodes in the idle state. - :type idle: int - :param offline: Required. The number of Compute Nodes in the offline - state. - :type offline: int - :param preempted: Required. The number of Compute Nodes in the preempted - state. - :type preempted: int - :param rebooting: Required. The count of Compute Nodes in the rebooting - state. - :type rebooting: int - :param reimaging: Required. The number of Compute Nodes in the reimaging - state. - :type reimaging: int - :param running: Required. The number of Compute Nodes in the running - state. - :type running: int - :param starting: Required. The number of Compute Nodes in the starting - state. - :type starting: int - :param start_task_failed: Required. The number of Compute Nodes in the - startTaskFailed state. - :type start_task_failed: int - :param leaving_pool: Required. The number of Compute Nodes in the - leavingPool state. - :type leaving_pool: int - :param unknown: Required. The number of Compute Nodes in the unknown - state. - :type unknown: int - :param unusable: Required. The number of Compute Nodes in the unusable - state. - :type unusable: int - :param waiting_for_start_task: Required. The number of Compute Nodes in - the waitingForStartTask state. - :type waiting_for_start_task: int - :param upgrading_os: Required. The number of Compute Nodes in the - upgradingOS state. - :type upgrading_os: int - :param total: Required. The total number of Compute Nodes. - :type total: int - """ - - _validation = { - 'creating': {'required': True}, - 'idle': {'required': True}, - 'offline': {'required': True}, - 'preempted': {'required': True}, - 'rebooting': {'required': True}, - 'reimaging': {'required': True}, - 'running': {'required': True}, - 'starting': {'required': True}, - 'start_task_failed': {'required': True}, - 'leaving_pool': {'required': True}, - 'unknown': {'required': True}, - 'unusable': {'required': True}, - 'waiting_for_start_task': {'required': True}, - 'upgrading_os': {'required': True}, - 'total': {'required': True}, - } - - _attribute_map = { - 'creating': {'key': 'creating', 'type': 'int'}, - 'idle': {'key': 'idle', 'type': 'int'}, - 'offline': {'key': 'offline', 'type': 'int'}, - 'preempted': {'key': 'preempted', 'type': 'int'}, - 'rebooting': {'key': 'rebooting', 'type': 'int'}, - 'reimaging': {'key': 'reimaging', 'type': 'int'}, - 'running': {'key': 'running', 'type': 'int'}, - 'starting': {'key': 'starting', 'type': 'int'}, - 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, - 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, - 'unknown': {'key': 'unknown', 'type': 'int'}, - 'unusable': {'key': 'unusable', 'type': 'int'}, - 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, - 'upgrading_os': {'key': 'upgradingOS', 'type': 'int'}, - 'total': {'key': 'total', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(NodeCounts, self).__init__(**kwargs) - self.creating = kwargs.get('creating', None) - self.idle = kwargs.get('idle', None) - self.offline = kwargs.get('offline', None) - self.preempted = kwargs.get('preempted', None) - self.rebooting = kwargs.get('rebooting', None) - self.reimaging = kwargs.get('reimaging', None) - self.running = kwargs.get('running', None) - self.starting = kwargs.get('starting', None) - self.start_task_failed = kwargs.get('start_task_failed', None) - self.leaving_pool = kwargs.get('leaving_pool', None) - self.unknown = kwargs.get('unknown', None) - self.unusable = kwargs.get('unusable', None) - self.waiting_for_start_task = kwargs.get('waiting_for_start_task', None) - self.upgrading_os = kwargs.get('upgrading_os', None) - self.total = kwargs.get('total', None) - - -class NodeDisableSchedulingParameter(Model): - """Options for disabling scheduling on a Compute Node. - - :param node_disable_scheduling_option: The default value is requeue. - Possible values include: 'requeue', 'terminate', 'taskCompletion' - :type node_disable_scheduling_option: str or - ~azure.batch.models.DisableComputeNodeSchedulingOption - """ - - _attribute_map = { - 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, - } - - def __init__(self, **kwargs): - super(NodeDisableSchedulingParameter, self).__init__(**kwargs) - self.node_disable_scheduling_option = kwargs.get('node_disable_scheduling_option', None) - - -class NodeFile(Model): - """Information about a file or directory on a Compute Node. - - :param name: - :type name: str - :param url: - :type url: str - :param is_directory: Whether the object represents a directory. - :type is_directory: bool - :param properties: The file properties. - :type properties: ~azure.batch.models.FileProperties - """ +class ExitCodeMapping(_model_base.Model): + """How the Batch service should respond if a Task exits with a particular exit + code. - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'properties': {'key': 'properties', 'type': 'FileProperties'}, - } - def __init__(self, **kwargs): - super(NodeFile, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.url = kwargs.get('url', None) - self.is_directory = kwargs.get('is_directory', None) - self.properties = kwargs.get('properties', None) + :ivar code: A process exit code. Required. + :vartype code: int + :ivar exit_options: How the Batch service should respond if the Task exits with this exit code. + Required. + :vartype exit_options: ~azure.batch.models.ExitOptions + """ + + code: int = rest_field() + """A process exit code. Required.""" + exit_options: "_models.ExitOptions" = rest_field(name="exitOptions") + """How the Batch service should respond if the Task exits with this exit code. Required.""" + + @overload + def __init__( + self, + *, + code: int, + exit_options: "_models.ExitOptions", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ExitCodeRangeMapping(_model_base.Model): + """A range of exit codes and how the Batch service should respond to exit codes + within that range. + + + :ivar start: The first exit code in the range. Required. + :vartype start: int + :ivar end: The last exit code in the range. Required. + :vartype end: int + :ivar exit_options: How the Batch service should respond if the Task exits with an exit code in + the range start to end (inclusive). Required. + :vartype exit_options: ~azure.batch.models.ExitOptions + """ + + start: int = rest_field() + """The first exit code in the range. Required.""" + end: int = rest_field() + """The last exit code in the range. Required.""" + exit_options: "_models.ExitOptions" = rest_field(name="exitOptions") + """How the Batch service should respond if the Task exits with an exit code in the range start to + end (inclusive). Required.""" + + @overload + def __init__( + self, + *, + start: int, + end: int, + exit_options: "_models.ExitOptions", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ExitConditions(_model_base.Model): + """Specifies how the Batch service should respond when the Task completes. + :ivar exit_codes: A list of individual Task exit codes and how the Batch service should respond + to them. + :vartype exit_codes: list[~azure.batch.models.ExitCodeMapping] + :ivar exit_code_ranges: A list of Task exit code ranges and how the Batch service should + respond to them. + :vartype exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :ivar pre_processing_error: How the Batch service should respond if the Task fails to start due + to an error. + :vartype pre_processing_error: ~azure.batch.models.ExitOptions + :ivar file_upload_error: How the Batch service should respond if a file upload error occurs. If + the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then + encountered a file upload error, then the action specified by the exit code takes precedence. + :vartype file_upload_error: ~azure.batch.models.ExitOptions + :ivar default: How the Batch service should respond if the Task fails with an exit condition + not covered by any of the other properties. This value is used if the Task exits with any + nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a + pre-processing error if the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want non-default behavior on exit + code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. + :vartype default: ~azure.batch.models.ExitOptions + """ + + exit_codes: Optional[List["_models.ExitCodeMapping"]] = rest_field(name="exitCodes") + """A list of individual Task exit codes and how the Batch service should respond to them.""" + exit_code_ranges: Optional[List["_models.ExitCodeRangeMapping"]] = rest_field(name="exitCodeRanges") + """A list of Task exit code ranges and how the Batch service should respond to them.""" + pre_processing_error: Optional["_models.ExitOptions"] = rest_field(name="preProcessingError") + """How the Batch service should respond if the Task fails to start due to an error.""" + file_upload_error: Optional["_models.ExitOptions"] = rest_field(name="fileUploadError") + """How the Batch service should respond if a file upload error occurs. If the Task exited with an + exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence.""" + default: Optional["_models.ExitOptions"] = rest_field() + """How the Batch service should respond if the Task fails with an exit condition not covered by + any of the other properties. This value is used if the Task exits with any nonzero exit code + not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the + preProcessingError property is not present, or with a file upload error if the fileUploadError + property is not present. If you want non-default behavior on exit code 0, you must list it + explicitly using the exitCodes or exitCodeRanges collection.""" + + @overload + def __init__( + self, + *, + exit_codes: Optional[List["_models.ExitCodeMapping"]] = None, + exit_code_ranges: Optional[List["_models.ExitCodeRangeMapping"]] = None, + pre_processing_error: Optional["_models.ExitOptions"] = None, + file_upload_error: Optional["_models.ExitOptions"] = None, + default: Optional["_models.ExitOptions"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ExitOptions(_model_base.Model): + """Specifies how the Batch service responds to a particular exit condition. -class NodePlacementConfiguration(Model): - """Node placement configuration for a pool. + :ivar job_action: An action to take on the Job containing the Task, if the Task completes with + the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. + The default is none for exit code 0 and terminate for all other exit conditions. If the Job's + onTaskFailed property is noaction, then specifying this property returns an error and the add + Task request fails with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Known values are: "none", "disable", and + "terminate". + :vartype job_action: str or ~azure.batch.models.BatchJobAction + :ivar dependency_action: An action that the Batch service performs on Tasks that depend on this + Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' + (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + Known values are: "satisfy" and "block". + :vartype dependency_action: str or ~azure.batch.models.DependencyAction + """ + + job_action: Optional[Union[str, "_models.BatchJobAction"]] = rest_field(name="jobAction") + """An action to take on the Job containing the Task, if the Task completes with the given exit + condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed + property is noaction, then specifying this property returns an error and the add Task request + fails with an invalid property value error; if you are calling the REST API directly, the HTTP + status code is 400 (Bad Request). Known values are: \"none\", \"disable\", and \"terminate\".""" + dependency_action: Optional[Union[str, "_models.DependencyAction"]] = rest_field(name="dependencyAction") + """An action that the Batch service performs on Tasks that depend on this Task. Possible values + are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to + wait). Batch does not yet support cancellation of dependent tasks. Known values are: + \"satisfy\" and \"block\".""" + + @overload + def __init__( + self, + *, + job_action: Optional[Union[str, "_models.BatchJobAction"]] = None, + dependency_action: Optional[Union[str, "_models.DependencyAction"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FileProperties(_model_base.Model): + """The properties of a file on a Compute Node. - For regional placement, nodes in the pool will be allocated in the same - region. For zonal placement, nodes in the pool will be spread across - different zones with best effort balancing. - :param policy: Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Possible values include: 'regional', - 'zonal' - :type policy: str or ~azure.batch.models.NodePlacementPolicyType - """ + :ivar creation_time: The file creation time. The creation time is not returned for files on + Linux Compute Nodes. + :vartype creation_time: ~datetime.datetime + :ivar last_modified: The time at which the file was last modified. Required. + :vartype last_modified: ~datetime.datetime + :ivar content_length: The length of the file. Required. + :vartype content_length: str + :ivar content_type: The content type of the file. + :vartype content_type: str + :ivar file_mode: The file mode attribute in octal format. The file mode is returned only for + files on Linux Compute Nodes. + :vartype file_mode: str + """ + + creation_time: Optional[datetime.datetime] = rest_field(name="creationTime", format="rfc3339") + """The file creation time. The creation time is not returned for files on Linux Compute Nodes.""" + last_modified: datetime.datetime = rest_field(name="lastModified", format="rfc3339") + """The time at which the file was last modified. Required.""" + content_length: str = rest_field(name="contentLength") + """The length of the file. Required.""" + content_type: Optional[str] = rest_field(name="contentType") + """The content type of the file.""" + file_mode: Optional[str] = rest_field(name="fileMode") + """The file mode attribute in octal format. The file mode is returned only for files on Linux + Compute Nodes.""" + + @overload + def __init__( + self, + *, + last_modified: datetime.datetime, + content_length: str, + creation_time: Optional[datetime.datetime] = None, + content_type: Optional[str] = None, + file_mode: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class HttpHeader(_model_base.Model): + """An HTTP header name-value pair. - _attribute_map = { - 'policy': {'key': 'policy', 'type': 'NodePlacementPolicyType'}, - } - def __init__(self, **kwargs): - super(NodePlacementConfiguration, self).__init__(**kwargs) - self.policy = kwargs.get('policy', None) + :ivar name: The case-insensitive name of the header to be used while uploading output files. + Required. + :vartype name: str + :ivar value: The value of the header to be used while uploading output files. + :vartype value: str + """ + + name: str = rest_field() + """The case-insensitive name of the header to be used while uploading output files. Required.""" + value: Optional[str] = rest_field() + """The value of the header to be used while uploading output files.""" + + @overload + def __init__( + self, + *, + name: str, + value: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ImageReference(_model_base.Model): + """A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. + To get the list of all Azure Marketplace Image references verified by Azure Batch, see the + ' List Supported Images ' operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar publisher: The publisher of the Azure Virtual Machines Marketplace Image. For example, + Canonical or MicrosoftWindowsServer. + :vartype publisher: str + :ivar offer: The offer type of the Azure Virtual Machines Marketplace Image. For example, + UbuntuServer or WindowsServer. + :vartype offer: str + :ivar sku: The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or + 2019-Datacenter. + :vartype sku: str + :ivar version: The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' + can be specified to select the latest version of an Image. If omitted, the default is 'latest'. + :vartype version: str + :ivar virtual_machine_image_id: The ARM resource identifier of the Azure Compute Gallery Image. + Compute Nodes in the Pool will be created using this Image Id. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} # pylint: disable=line-too-long + or + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} # pylint: disable=line-too-long + for always defaulting to the latest image version. This property is mutually exclusive with + other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same + region and must be in the same subscription as the Azure Batch account. If the image version is + not specified in the imageId, the latest version will be used. For information about the + firewall settings for the Batch Compute Node agent to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :vartype virtual_machine_image_id: str + :ivar exact_version: The specific version of the platform image or marketplace image used to + create the node. This read-only field differs from 'version' only if the value specified for + 'version' when the pool was created was 'latest'. + :vartype exact_version: str + """ + publisher: Optional[str] = rest_field() + """The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or + MicrosoftWindowsServer.""" + offer: Optional[str] = rest_field() + """The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or + WindowsServer.""" + sku: Optional[str] = rest_field() + """The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or + 2019-Datacenter.""" + version: Optional[str] = rest_field() + """The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be + specified to select the latest version of an Image. If omitted, the default is 'latest'.""" + virtual_machine_image_id: Optional[str] = rest_field(name="virtualMachineImageId") + """The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will + be created using this Image Id. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} # pylint: disable=line-too-long + or + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} # pylint: disable=line-too-long + for always defaulting to the latest image version. This property is mutually exclusive with + other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same + region and must be in the same subscription as the Azure Batch account. If the image version is + not specified in the imageId, the latest version will be used. For information about the + firewall settings for the Batch Compute Node agent to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.""" + exact_version: Optional[str] = rest_field(name="exactVersion", visibility=["read"]) + """The specific version of the platform image or marketplace image used to create the node. This + read-only field differs from 'version' only if the value specified for 'version' when the pool + was created was 'latest'.""" + + @overload + def __init__( + self, + *, + publisher: Optional[str] = None, + offer: Optional[str] = None, + sku: Optional[str] = None, + version: Optional[str] = None, + virtual_machine_image_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class InboundEndpoint(_model_base.Model): + """An inbound endpoint on a Compute Node. -class NodeRebootParameter(Model): - """Options for rebooting a Compute Node. - :param node_reboot_option: The default value is requeue. Possible values - include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' - :type node_reboot_option: str or - ~azure.batch.models.ComputeNodeRebootOption - """ + :ivar name: The name of the endpoint. Required. + :vartype name: str + :ivar protocol: The protocol of the endpoint. Required. Known values are: "tcp" and "udp". + :vartype protocol: str or ~azure.batch.models.InboundEndpointProtocol + :ivar public_ip_address: The public IP address of the Compute Node. Required. + :vartype public_ip_address: str + :ivar public_f_q_d_n: The public fully qualified domain name for the Compute Node. Required. + :vartype public_f_q_d_n: str + :ivar frontend_port: The public port number of the endpoint. Required. + :vartype frontend_port: int + :ivar backend_port: The backend port number of the endpoint. Required. + :vartype backend_port: int + """ + + name: str = rest_field() + """The name of the endpoint. Required.""" + protocol: Union[str, "_models.InboundEndpointProtocol"] = rest_field() + """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" + public_ip_address: str = rest_field(name="publicIPAddress") + """The public IP address of the Compute Node. Required.""" + public_f_q_d_n: str = rest_field(name="publicFQDN") + """The public fully qualified domain name for the Compute Node. Required.""" + frontend_port: int = rest_field(name="frontendPort") + """The public port number of the endpoint. Required.""" + backend_port: int = rest_field(name="backendPort") + """The backend port number of the endpoint. Required.""" + + @overload + def __init__( + self, + *, + name: str, + protocol: Union[str, "_models.InboundEndpointProtocol"], + public_ip_address: str, + public_f_q_d_n: str, + frontend_port: int, + backend_port: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class InboundNatPool(_model_base.Model): + """A inbound NAT Pool that can be used to address specific ports on Compute Nodes + in a Batch Pool externally. + + + :ivar name: The name of the endpoint. The name must be unique within a Batch Pool, can contain + letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, + must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid + values are provided the request fails with HTTP status code 400. Required. + :vartype name: str + :ivar protocol: The protocol of the endpoint. Required. Known values are: "tcp" and "udp". + :vartype protocol: str or ~azure.batch.models.InboundEndpointProtocol + :ivar backend_port: The port number on the Compute Node. This must be unique within a Batch + Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these + are reserved. If any reserved values are provided the request fails with HTTP status code 400. + Required. + :vartype backend_port: int + :ivar frontend_port_range_start: The first port number in the range of external ports that will + be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable + values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All + ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 + ports. If any reserved or overlapping values are provided the request fails with HTTP status + code 400. Required. + :vartype frontend_port_range_start: int + :ivar frontend_port_range_end: The last port number in the range of external ports that will be + used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable + values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the + Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must + contain at least 40 ports. If any reserved or overlapping values are provided the request fails + with HTTP status code 400. Required. + :vartype frontend_port_range_end: int + :ivar network_security_group_rules: A list of network security group rules that will be applied + to the endpoint. The maximum number of rules that can be specified across all the endpoints on + a Batch Pool is 25. If no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the maximum number of network + security group rules is exceeded the request fails with HTTP status code 400. + :vartype network_security_group_rules: list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + name: str = rest_field() + """The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, + numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end + with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values + are provided the request fails with HTTP status code 400. Required.""" + protocol: Union[str, "_models.InboundEndpointProtocol"] = rest_field() + """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" + backend_port: int = rest_field(name="backendPort") + """The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values + are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. Required.""" + frontend_port_range_start: int = rest_field(name="frontendPortRangeStart") + """The first port number in the range of external ports that will be used to provide inbound + access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and + 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be + distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or + overlapping values are provided the request fails with HTTP status code 400. Required.""" + frontend_port_range_end: int = rest_field(name="frontendPortRangeEnd") + """The last port number in the range of external ports that will be used to provide inbound access + to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 + except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a + Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any + reserved or overlapping values are provided the request fails with HTTP status code 400. + Required.""" + network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = rest_field( + name="networkSecurityGroupRules" + ) + """A list of network security group rules that will be applied to the endpoint. The maximum number + of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network + security group rules are specified, a default rule will be created to allow inbound access to + the specified backendPort. If the maximum number of network security group rules is exceeded + the request fails with HTTP status code 400.""" + + @overload + def __init__( + self, + *, + name: str, + protocol: Union[str, "_models.InboundEndpointProtocol"], + backend_port: int, + frontend_port_range_start: int, + frontend_port_range_end: int, + network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class InstanceViewStatus(_model_base.Model): + """The instance view status. - _attribute_map = { - 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, - } + :ivar code: The status code. + :vartype code: str + :ivar display_status: The localized label for the status. + :vartype display_status: str + :ivar level: Level code. Known values are: "Error", "Info", and "Warning". + :vartype level: str or ~azure.batch.models.StatusLevelTypes + :ivar message: The detailed status message. + :vartype message: str + :ivar time: The time of the status. + :vartype time: ~datetime.datetime + """ + + code: Optional[str] = rest_field() + """The status code.""" + display_status: Optional[str] = rest_field(name="displayStatus") + """The localized label for the status.""" + level: Optional[Union[str, "_models.StatusLevelTypes"]] = rest_field() + """Level code. Known values are: \"Error\", \"Info\", and \"Warning\".""" + message: Optional[str] = rest_field() + """The detailed status message.""" + time: Optional[datetime.datetime] = rest_field(format="rfc3339") + """The time of the status.""" + + @overload + def __init__( + self, + *, + code: Optional[str] = None, + display_status: Optional[str] = None, + level: Optional[Union[str, "_models.StatusLevelTypes"]] = None, + message: Optional[str] = None, + time: Optional[datetime.datetime] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LinuxUserConfiguration(_model_base.Model): + """Properties used to create a user Account on a Linux Compute Node. - def __init__(self, **kwargs): - super(NodeRebootParameter, self).__init__(**kwargs) - self.node_reboot_option = kwargs.get('node_reboot_option', None) + :ivar uid: The user ID of the user Account. The uid and gid properties must be specified + together or not at all. If not specified the underlying operating system picks the uid. + :vartype uid: int + :ivar gid: The group ID for the user Account. The uid and gid properties must be specified + together or not at all. If not specified the underlying operating system picks the gid. + :vartype gid: int + :ivar ssh_private_key: The SSH private key for the user Account. The private key must not be + password protected. The private key is used to automatically configure asymmetric-key based + authentication for SSH between Compute Nodes in a Linux Pool when the Pool's + enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is + false). It does this by placing the key pair into the user's .ssh directory. If not specified, + password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh + directory is done). + :vartype ssh_private_key: str + """ + + uid: Optional[int] = rest_field() + """The user ID of the user Account. The uid and gid properties must be specified together or not + at all. If not specified the underlying operating system picks the uid.""" + gid: Optional[int] = rest_field() + """The group ID for the user Account. The uid and gid properties must be specified together or not + at all. If not specified the underlying operating system picks the gid.""" + ssh_private_key: Optional[str] = rest_field(name="sshPrivateKey") + """The SSH private key for the user Account. The private key must not be password protected. The + private key is used to automatically configure asymmetric-key based authentication for SSH + between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is + true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key + pair into the user's .ssh directory. If not specified, password-less SSH is not configured + between Compute Nodes (no modification of the user's .ssh directory is done).""" + + @overload + def __init__( + self, + *, + uid: Optional[int] = None, + gid: Optional[int] = None, + ssh_private_key: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ManagedDisk(_model_base.Model): + """The managed disk parameters. + + + :ivar storage_account_type: The storage account type for managed disk. Required. Known values + are: "standard_lrs", "premium_lrs", and "standardssd_lrs". + :vartype storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + storage_account_type: Union[str, "_models.StorageAccountType"] = rest_field(name="storageAccountType") + """The storage account type for managed disk. Required. Known values are: \"standard_lrs\", + \"premium_lrs\", and \"standardssd_lrs\".""" + + @overload + def __init__( + self, + *, + storage_account_type: Union[str, "_models.StorageAccountType"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MetadataItem(_model_base.Model): + """The Batch service does not assign any meaning to this metadata; it is solely + for the use of user code. + + + :ivar name: The name of the metadata item. Required. + :vartype name: str + :ivar value: The value of the metadata item. Required. + :vartype value: str + """ + + name: str = rest_field() + """The name of the metadata item. Required.""" + value: str = rest_field() + """The value of the metadata item. Required.""" + + @overload + def __init__( + self, + *, + name: str, + value: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class NodeReimageParameter(Model): - """Options for reimaging a Compute Node. +class MountConfiguration(_model_base.Model): + """The file system to mount on each node. + + :ivar azure_blob_file_system_configuration: The Azure Storage Container to mount using blob + FUSE on each node. This property is mutually exclusive with all other properties. + :vartype azure_blob_file_system_configuration: + ~azure.batch.models.AzureBlobFileSystemConfiguration + :ivar nfs_mount_configuration: The NFS file system to mount on each node. This property is + mutually exclusive with all other properties. + :vartype nfs_mount_configuration: ~azure.batch.models.NfsMountConfiguration + :ivar cifs_mount_configuration: The CIFS/SMB file system to mount on each node. This property + is mutually exclusive with all other properties. + :vartype cifs_mount_configuration: ~azure.batch.models.CifsMountConfiguration + :ivar azure_file_share_configuration: The Azure File Share to mount on each node. This property + is mutually exclusive with all other properties. + :vartype azure_file_share_configuration: ~azure.batch.models.AzureFileShareConfiguration + """ + + azure_blob_file_system_configuration: Optional["_models.AzureBlobFileSystemConfiguration"] = rest_field( + name="azureBlobFileSystemConfiguration" + ) + """The Azure Storage Container to mount using blob FUSE on each node. This property is mutually + exclusive with all other properties.""" + nfs_mount_configuration: Optional["_models.NfsMountConfiguration"] = rest_field(name="nfsMountConfiguration") + """The NFS file system to mount on each node. This property is mutually exclusive with all other + properties.""" + cifs_mount_configuration: Optional["_models.CifsMountConfiguration"] = rest_field(name="cifsMountConfiguration") + """The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all + other properties.""" + azure_file_share_configuration: Optional["_models.AzureFileShareConfiguration"] = rest_field( + name="azureFileShareConfiguration" + ) + """The Azure File Share to mount on each node. This property is mutually exclusive with all other + properties.""" + + @overload + def __init__( + self, + *, + azure_blob_file_system_configuration: Optional["_models.AzureBlobFileSystemConfiguration"] = None, + nfs_mount_configuration: Optional["_models.NfsMountConfiguration"] = None, + cifs_mount_configuration: Optional["_models.CifsMountConfiguration"] = None, + azure_file_share_configuration: Optional["_models.AzureFileShareConfiguration"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MultiInstanceSettings(_model_base.Model): + """Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, + if any of the subtasks fail (for example due to exiting with a non-zero exit + code) the entire multi-instance Task fails. The multi-instance Task is then + terminated and retried, up to its retry limit. + + + :ivar number_of_instances: The number of Compute Nodes required by the Task. If omitted, the + default is 1. + :vartype number_of_instances: int + :ivar coordination_command_line: The command line to run on all the Compute Nodes to enable + them to coordinate when the primary runs the main Task command. A typical coordination command + line launches a background service and verifies that the service is ready to process inter-node + messages. Required. + :vartype coordination_command_line: str + :ivar common_resource_files: A list of files that the Batch service will download before + running the coordination command line. The difference between common resource files and Task + resource files is that common resource files are downloaded for all subtasks including the + primary, whereas Task resource files are downloaded only for the primary. Also note that these + resource files are not downloaded to the Task working directory, but instead are downloaded to + the Task root directory (one directory above the working directory). There is a maximum size + for the list of resource files. When the max size is exceeded, the request will fail and the + response error code will be RequestEntityTooLarge. If this occurs, the collection of + ResourceFiles must be reduced in size. This can be achieved using .zip files, Application + Packages, or Docker Containers. + :vartype common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + number_of_instances: Optional[int] = rest_field(name="numberOfInstances") + """The number of Compute Nodes required by the Task. If omitted, the default is 1.""" + coordination_command_line: str = rest_field(name="coordinationCommandLine") + """The command line to run on all the Compute Nodes to enable them to coordinate when the primary + runs the main Task command. A typical coordination command line launches a background service + and verifies that the service is ready to process inter-node messages. Required.""" + common_resource_files: Optional[List["_models.ResourceFile"]] = rest_field(name="commonResourceFiles") + """A list of files that the Batch service will download before running the coordination command + line. The difference between common resource files and Task resource files is that common + resource files are downloaded for all subtasks including the primary, whereas Task resource + files are downloaded only for the primary. Also note that these resource files are not + downloaded to the Task working directory, but instead are downloaded to the Task root directory + (one directory above the working directory). There is a maximum size for the list of resource + files. When the max size is exceeded, the request will fail and the response error code will + be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in + size. This can be achieved using .zip files, Application Packages, or Docker Containers.""" + + @overload + def __init__( + self, + *, + coordination_command_line: str, + number_of_instances: Optional[int] = None, + common_resource_files: Optional[List["_models.ResourceFile"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class NameValuePair(_model_base.Model): + """Represents a name-value pair. - :param node_reimage_option: The default value is requeue. Possible values - include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' - :type node_reimage_option: str or - ~azure.batch.models.ComputeNodeReimageOption + :ivar name: The name in the name-value pair. + :vartype name: str + :ivar value: The value in the name-value pair. + :vartype value: str """ - _attribute_map = { - 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, - } + name: Optional[str] = rest_field() + """The name in the name-value pair.""" + value: Optional[str] = rest_field() + """The value in the name-value pair.""" - def __init__(self, **kwargs): - super(NodeReimageParameter, self).__init__(**kwargs) - self.node_reimage_option = kwargs.get('node_reimage_option', None) + @overload + def __init__( + self, + *, + name: Optional[str] = None, + value: Optional[str] = None, + ): ... + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ -class NodeRemoveParameter(Model): - """Options for removing Compute Nodes from a Pool. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - All required parameters must be populated in order to send to Azure. - :param node_list: Required. A maximum of 100 nodes may be removed per - request. - :type node_list: list[str] - :param resize_timeout: The default value is 15 minutes. The minimum value - is 5 minutes. If you specify a value less than 5 minutes, the Batch - service returns an error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param node_deallocation_option: Determines what to do with a Compute Node - and its running task(s) after it has been selected for deallocation. The - default value is requeue. Possible values include: 'requeue', 'terminate', - 'taskCompletion', 'retainedData' - :type node_deallocation_option: str or - ~azure.batch.models.ComputeNodeDeallocationOption - """ - - _validation = { - 'node_list': {'required': True}, - } - - _attribute_map = { - 'node_list': {'key': 'nodeList', 'type': '[str]'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, - } - - def __init__(self, **kwargs): - super(NodeRemoveParameter, self).__init__(**kwargs) - self.node_list = kwargs.get('node_list', None) - self.resize_timeout = kwargs.get('resize_timeout', None) - self.node_deallocation_option = kwargs.get('node_deallocation_option', None) - - -class NodeUpdateUserParameter(Model): - """The set of changes to be made to a user Account on a Compute Node. - - :param password: The password is required for Windows Compute Nodes (those - created with 'cloudServiceConfiguration', or created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux - Compute Nodes, the password can optionally be specified along with the - sshPublicKey property. If omitted, any existing password is removed. - :type password: str - :param expiry_time: If omitted, the default is 1 day from the current - time. For Linux Compute Nodes, the expiryTime has a precision up to a day. - :type expiry_time: datetime - :param ssh_public_key: The public key should be compatible with OpenSSH - encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute - Node, then the Batch service rejects the request; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). If omitted, - any existing SSH public key is removed. - :type ssh_public_key: str - """ - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, - 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(NodeUpdateUserParameter, self).__init__(**kwargs) - self.password = kwargs.get('password', None) - self.expiry_time = kwargs.get('expiry_time', None) - self.ssh_public_key = kwargs.get('ssh_public_key', None) - - -class NodeVMExtension(Model): - """The configuration for virtual machine extension instance view. +class NetworkConfiguration(_model_base.Model): + """The network configuration for a Pool. - :param provisioning_state: - :type provisioning_state: str - :param vm_extension: The virtual machine extension. - :type vm_extension: ~azure.batch.models.VMExtension - :param instance_view: The vm extension instance view. - :type instance_view: ~azure.batch.models.VMExtensionInstanceView - """ + :ivar subnet_id: The ARM resource identifier of the virtual network subnet which the Compute + Nodes of the Pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. # pylint: disable=line-too-long + The virtual network must be in the same region and subscription as the Azure Batch Account. The + specified subnet should have enough free IP addresses to accommodate the number of Compute + Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially + allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must + have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the + specified VNet. The specified subnet must allow communication from the Azure Batch service to + be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet + has any associated Network Security Groups (NSG). If communication to the Nodes in the + specified subnet is denied by an NSG, then the Batch service will set the state of the Compute + Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks + ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound + communication. For Pools created with a virtual machine configuration, enable ports 29876 and + 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections + to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :vartype subnet_id: str + :ivar dynamic_v_net_assignment_scope: The scope of dynamic vnet assignment. Known values are: + "none" and "job". + :vartype dynamic_v_net_assignment_scope: str or ~azure.batch.models.DynamicVNetAssignmentScope + :ivar endpoint_configuration: The configuration for endpoints on Compute Nodes in the Batch + Pool. Pool endpoint configuration is only supported on Pools with the + virtualMachineConfiguration property. + :vartype endpoint_configuration: ~azure.batch.models.BatchPoolEndpointConfiguration + :ivar public_ip_address_configuration: The Public IPAddress configuration for Compute Nodes in + the Batch Pool. Public IP configuration property is only supported on Pools with the + virtualMachineConfiguration property. + :vartype public_ip_address_configuration: ~azure.batch.models.PublicIpAddressConfiguration + :ivar enable_accelerated_networking: Whether this pool should enable accelerated networking. + Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead + to improved networking performance. For more details, see: + https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. + :vartype enable_accelerated_networking: bool + """ + + subnet_id: Optional[str] = rest_field(name="subnetId") + """The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool + will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. # pylint: disable=line-too-long + The virtual network must be in the same region and subscription as the Azure Batch Account. The + specified subnet should have enough free IP addresses to accommodate the number of Compute + Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially + allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must + have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the + specified VNet. The specified subnet must allow communication from the Azure Batch service to + be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet + has any associated Network Security Groups (NSG). If communication to the Nodes in the + specified subnet is denied by an NSG, then the Batch service will set the state of the Compute + Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks + ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound + communication. For Pools created with a virtual machine configuration, enable ports 29876 and + 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections + to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.""" + dynamic_v_net_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = rest_field( + name="dynamicVNetAssignmentScope" + ) + """The scope of dynamic vnet assignment. Known values are: \"none\" and \"job\".""" + endpoint_configuration: Optional["_models.BatchPoolEndpointConfiguration"] = rest_field( + name="endpointConfiguration" + ) + """The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration + is only supported on Pools with the virtualMachineConfiguration property.""" + public_ip_address_configuration: Optional["_models.PublicIpAddressConfiguration"] = rest_field( + name="publicIPAddressConfiguration" + ) + """The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration + property is only supported on Pools with the virtualMachineConfiguration property.""" + enable_accelerated_networking: Optional[bool] = rest_field(name="enableAcceleratedNetworking") + """Whether this pool should enable accelerated networking. Accelerated networking enables single + root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. + For more details, see: + https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview.""" + + @overload + def __init__( + self, + *, + subnet_id: Optional[str] = None, + dynamic_v_net_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = None, + endpoint_configuration: Optional["_models.BatchPoolEndpointConfiguration"] = None, + public_ip_address_configuration: Optional["_models.PublicIpAddressConfiguration"] = None, + enable_accelerated_networking: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class NetworkSecurityGroupRule(_model_base.Model): + """A network security group rule to apply to an inbound endpoint. - _attribute_map = { - 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, - 'vm_extension': {'key': 'vmExtension', 'type': 'VMExtension'}, - 'instance_view': {'key': 'instanceView', 'type': 'VMExtensionInstanceView'}, - } - def __init__(self, **kwargs): - super(NodeVMExtension, self).__init__(**kwargs) - self.provisioning_state = kwargs.get('provisioning_state', None) - self.vm_extension = kwargs.get('vm_extension', None) - self.instance_view = kwargs.get('instance_view', None) + :ivar priority: The priority for this rule. Priorities within a Pool must be unique and are + evaluated in order of priority. The lower the number the higher the priority. For example, + rules could be specified with order numbers of 150, 250, and 350. The rule with the order + number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are + 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP + status code 400. Required. + :vartype priority: int + :ivar access: The action that should be taken for a specified IP address, subnet range or tag. + Required. Known values are: "allow" and "deny". + :vartype access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :ivar source_address_prefix: The source address prefix or tag to match for the rule. Valid + values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default + tag, or * (for all addresses). If any other values are provided the request fails with HTTP + status code 400. Required. + :vartype source_address_prefix: str + :ivar source_port_ranges: The source port ranges to match for the rule. Valid values are '\\ *' + (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports + must be in the range of 0 to 65535. Each entry in this collection must not overlap any other + entry (either a range or an individual port). If any other values are provided the request + fails with HTTP status code 400. The default value is '*\\ '. + :vartype source_port_ranges: list[str] + """ + + priority: int = rest_field() + """The priority for this rule. Priorities within a Pool must be unique and are evaluated in order + of priority. The lower the number the higher the priority. For example, rules could be + specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes + precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any + reserved or duplicate values are provided the request fails with HTTP status code 400. + Required.""" + access: Union[str, "_models.NetworkSecurityGroupRuleAccess"] = rest_field() + """The action that should be taken for a specified IP address, subnet range or tag. Required. + Known values are: \"allow\" and \"deny\".""" + source_address_prefix: str = rest_field(name="sourceAddressPrefix") + """The source address prefix or tag to match for the rule. Valid values are a single IP address + (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If + any other values are provided the request fails with HTTP status code 400. Required.""" + source_port_ranges: Optional[List[str]] = rest_field(name="sourcePortRanges") + """The source port ranges to match for the rule. Valid values are '\ *' (for all ports 0 - 65535), + a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 + to 65535. Each entry in this collection must not overlap any other entry (either a range or an + individual port). If any other values are provided the request fails with HTTP status code 400. + The default value is '*\ '.""" + + @overload + def __init__( + self, + *, + priority: int, + access: Union[str, "_models.NetworkSecurityGroupRuleAccess"], + source_address_prefix: str, + source_port_ranges: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class NfsMountConfiguration(_model_base.Model): + """Information used to connect to an NFS file system. -class OSDisk(Model): + :ivar source: The URI of the file system to mount. Required. + :vartype source: str + :ivar relative_mount_path: The relative path on the compute node where the file system will be + mounted. All file systems are mounted relative to the Batch mounts directory, accessible via + the AZ_BATCH_NODE_MOUNTS_DIR environment variable. Required. + :vartype relative_mount_path: str + :ivar mount_options: Additional command line options to pass to the mount command. These are + 'net use' options in Windows and 'mount' options in Linux. + :vartype mount_options: str + """ + + source: str = rest_field() + """The URI of the file system to mount. Required.""" + relative_mount_path: str = rest_field(name="relativeMountPath") + """The relative path on the compute node where the file system will be mounted. All file systems + are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR + environment variable. Required.""" + mount_options: Optional[str] = rest_field(name="mountOptions") + """Additional command line options to pass to the mount command. These are 'net use' options in + Windows and 'mount' options in Linux.""" + + @overload + def __init__( + self, + *, + source: str, + relative_mount_path: str, + mount_options: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class OSDisk(_model_base.Model): """Settings for the operating system disk of the compute node (VM). - :param ephemeral_os_disk_settings: Specifies the ephemeral Disk Settings - for the operating system disk used by the compute node (VM). - :type ephemeral_os_disk_settings: ~azure.batch.models.DiffDiskSettings - :param caching: Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for Standard - storage. ReadOnly for Premium storage. Possible values include: 'none', - 'readOnly', 'readWrite' - :type caching: str or ~azure.batch.models.CachingType - :param managed_disk: The managed disk parameters. - :type managed_disk: ~azure.batch.models.ManagedDisk - :param disk_size_gb: The initial disk size in GB when creating new OS - disk. - :type disk_size_gb: int - :param write_accelerator_enabled: Specifies whether writeAccelerator - should be enabled or disabled on the disk. - :type write_accelerator_enabled: bool - """ - - _attribute_map = { - 'ephemeral_os_disk_settings': {'key': 'ephemeralOSDiskSettings', 'type': 'DiffDiskSettings'}, - 'caching': {'key': 'caching', 'type': 'CachingType'}, - 'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDisk'}, - 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, - 'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(OSDisk, self).__init__(**kwargs) - self.ephemeral_os_disk_settings = kwargs.get('ephemeral_os_disk_settings', None) - self.caching = kwargs.get('caching', None) - self.managed_disk = kwargs.get('managed_disk', None) - self.disk_size_gb = kwargs.get('disk_size_gb', None) - self.write_accelerator_enabled = kwargs.get('write_accelerator_enabled', None) - - -class OutputFile(Model): - """A specification for uploading files from an Azure Batch Compute Node to - another location after the Batch service has finished executing the Task - process. - - On every file uploads, Batch service writes two log files to the compute - node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used - to learn more about a specific failure. - - All required parameters must be populated in order to send to Azure. - - :param file_pattern: Required. Both relative and absolute paths are - supported. Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters (for - example pattern abc* would match abc or abcdef), ** matches any directory, - ? matches any single character, [abc] matches one character in the - brackets, and [a-c] matches one character in the range. Brackets can - include a negation to match any character not specified (for example - [!abc] matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif will). A - simple example: **\\*.txt matches any file that does not start in '.' and - ends with .txt in the Task working directory or any subdirectory. If the - filename contains a wildcard character it can be escaped using brackets - (for example abc[*] would match a file named abc*). Note that both \\ and - / are treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are expanded - prior to the pattern being applied. - :type file_pattern: str - :param destination: Required. The destination for the output file(s). - :type destination: ~azure.batch.models.OutputFileDestination - :param upload_options: Required. Additional options for the upload - operation, including under what conditions to perform the upload. - :type upload_options: ~azure.batch.models.OutputFileUploadOptions - """ - - _validation = { - 'file_pattern': {'required': True}, - 'destination': {'required': True}, - 'upload_options': {'required': True}, - } - - _attribute_map = { - 'file_pattern': {'key': 'filePattern', 'type': 'str'}, - 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, - 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, - } - - def __init__(self, **kwargs): - super(OutputFile, self).__init__(**kwargs) - self.file_pattern = kwargs.get('file_pattern', None) - self.destination = kwargs.get('destination', None) - self.upload_options = kwargs.get('upload_options', None) - - -class OutputFileBlobContainerDestination(Model): + :ivar ephemeral_o_s_disk_settings: Specifies the ephemeral Disk Settings for the operating + system disk used by the compute node (VM). + :vartype ephemeral_o_s_disk_settings: ~azure.batch.models.DiffDiskSettings + :ivar caching: Specifies the caching requirements. Possible values are: None, ReadOnly, + ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + Known values are: "none", "readonly", and "readwrite". + :vartype caching: str or ~azure.batch.models.CachingType + :ivar disk_size_g_b: The initial disk size in GB when creating new OS disk. + :vartype disk_size_g_b: int + :ivar managed_disk: The managed disk parameters. + :vartype managed_disk: ~azure.batch.models.ManagedDisk + :ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or + disabled on the disk. + :vartype write_accelerator_enabled: bool + """ + + ephemeral_o_s_disk_settings: Optional["_models.DiffDiskSettings"] = rest_field(name="ephemeralOSDiskSettings") + """Specifies the ephemeral Disk Settings for the operating system disk used by the compute node + (VM).""" + caching: Optional[Union[str, "_models.CachingType"]] = rest_field() + """Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default + values are: None for Standard storage. ReadOnly for Premium storage. Known values are: + \"none\", \"readonly\", and \"readwrite\".""" + disk_size_g_b: Optional[int] = rest_field(name="diskSizeGB") + """The initial disk size in GB when creating new OS disk.""" + managed_disk: Optional["_models.ManagedDisk"] = rest_field(name="managedDisk") + """The managed disk parameters.""" + write_accelerator_enabled: Optional[bool] = rest_field(name="writeAcceleratorEnabled") + """Specifies whether writeAccelerator should be enabled or disabled on the disk.""" + + @overload + def __init__( + self, + *, + ephemeral_o_s_disk_settings: Optional["_models.DiffDiskSettings"] = None, + caching: Optional[Union[str, "_models.CachingType"]] = None, + disk_size_g_b: Optional[int] = None, + managed_disk: Optional["_models.ManagedDisk"] = None, + write_accelerator_enabled: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class OutputFile(_model_base.Model): + """On every file uploads, Batch service writes two log files to the compute node, + 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a + specific failure. + + + :ivar file_pattern: A pattern indicating which file(s) to upload. Both relative and absolute + paths are supported. Relative paths are relative to the Task working directory. The following + wildcards are supported: * matches 0 or more characters (for example pattern abc* would match + abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one + character in the brackets, and [a-c] matches one character in the range. Brackets can include a + negation to match any character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may be matched by + specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple + example: **\\ *.txt matches any file that does not start in '.' and ends with .txt in the Task + working directory or any subdirectory. If the filename contains a wildcard character it can be + escaped using brackets (for example abc[\\ *] would match a file named abc*\\ ). Note that both + and / are treated as directory separators on Windows, but only / is on Linux. Environment + variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied. + Required. + :vartype file_pattern: str + :ivar destination: The destination for the output file(s). Required. + :vartype destination: ~azure.batch.models.OutputFileDestination + :ivar upload_options: Additional options for the upload operation, including under what + conditions to perform the upload. Required. + :vartype upload_options: ~azure.batch.models.OutputFileUploadConfig + """ + + file_pattern: str = rest_field(name="filePattern") + """A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. + Relative paths are relative to the Task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), + ** matches any directory, ? matches any single character, [abc] matches one character in the + brackets, and [a-c] matches one character in the range. Brackets can include a negation to + match any character not specified (for example [!abc] matches any character but a, b, or c). If + a file name starts with \".\" it is ignored by default but may be matched by specifying it + explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\ + *.txt matches any file that does not start in '.' and ends with .txt in the Task working + directory or any subdirectory. If the filename contains a wildcard character it can be escaped + using brackets (for example abc[\ *] would match a file named abc*\ ). Note that both and / are + treated as directory separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied. Required.""" + destination: "_models.OutputFileDestination" = rest_field() + """The destination for the output file(s). Required.""" + upload_options: "_models.OutputFileUploadConfig" = rest_field(name="uploadOptions") + """Additional options for the upload operation, including under what conditions to perform the + upload. Required.""" + + @overload + def __init__( + self, + *, + file_pattern: str, + destination: "_models.OutputFileDestination", + upload_options: "_models.OutputFileUploadConfig", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class OutputFileBlobContainerDestination(_model_base.Model): """Specifies a file upload destination within an Azure blob storage container. - All required parameters must be populated in order to send to Azure. - - :param path: If filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload that - file. If filePattern contains one or more wildcards (and therefore may - match multiple files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the file(s). If - omitted, file(s) are uploaded to the root of the container with a blob - name matching their file name. - :type path: str - :param container_url: Required. If not using a managed identity, the URL - must include a Shared Access Signature (SAS) granting write permissions to - the container. - :type container_url: str - :param identity_reference: The reference to the user assigned identity to - use to access Azure Blob Storage specified by containerUrl. The identity - must have write access to the Azure Blob Storage container - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - :param upload_headers: These headers will be specified when uploading - files to Azure Storage. Official document on allowed headers when + + :ivar path: The destination blob or virtual directory within the Azure Storage container. If + filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of + the blob to which to upload that file. If filePattern contains one or more wildcards (and + therefore may match multiple files), then path is the name of the blob virtual directory (which + is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are + uploaded to the root of the container with a blob name matching their file name. + :vartype path: str + :ivar container_url: The URL of the container within Azure Blob Storage to which to upload the + file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) + granting write permissions to the container. Required. + :vartype container_url: str + :ivar identity_reference: The reference to the user assigned identity to use to access Azure + Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob + Storage container. + :vartype identity_reference: ~azure.batch.models.BatchNodeIdentityReference + :ivar upload_headers: A list of name-value pairs for headers to be used in uploading output + files. These headers will be specified when uploading files to Azure Storage. Official document + on allowed headers when uploading blobs: + https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. + :vartype upload_headers: list[~azure.batch.models.HttpHeader] + """ + + path: Optional[str] = rest_field() + """The destination blob or virtual directory within the Azure Storage container. If filePattern + refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to + which to upload that file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory (which is prepended + to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root + of the container with a blob name matching their file name.""" + container_url: str = rest_field(name="containerUrl") + """The URL of the container within Azure Blob Storage to which to upload the file(s). If not using + a managed identity, the URL must include a Shared Access Signature (SAS) granting write + permissions to the container. Required.""" + identity_reference: Optional["_models.BatchNodeIdentityReference"] = rest_field(name="identityReference") + """The reference to the user assigned identity to use to access Azure Blob Storage specified by + containerUrl. The identity must have write access to the Azure Blob Storage container.""" + upload_headers: Optional[List["_models.HttpHeader"]] = rest_field(name="uploadHeaders") + """A list of name-value pairs for headers to be used in uploading output files. These headers will + be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: - https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types - :type upload_headers: list[~azure.batch.models.HttpHeader] + https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types.""" + + @overload + def __init__( + self, + *, + container_url: str, + path: Optional[str] = None, + identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, + upload_headers: Optional[List["_models.HttpHeader"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class OutputFileDestination(_model_base.Model): + """The destination to which a file should be uploaded. + + :ivar container: A location in Azure blob storage to which files are uploaded. + :vartype container: ~azure.batch.models.OutputFileBlobContainerDestination """ - _validation = { - 'container_url': {'required': True}, - } + container: Optional["_models.OutputFileBlobContainerDestination"] = rest_field() + """A location in Azure blob storage to which files are uploaded.""" - _attribute_map = { - 'path': {'key': 'path', 'type': 'str'}, - 'container_url': {'key': 'containerUrl', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - 'upload_headers': {'key': 'uploadHeaders', 'type': '[HttpHeader]'}, - } + @overload + def __init__( + self, + *, + container: Optional["_models.OutputFileBlobContainerDestination"] = None, + ): ... - def __init__(self, **kwargs): - super(OutputFileBlobContainerDestination, self).__init__(**kwargs) - self.path = kwargs.get('path', None) - self.container_url = kwargs.get('container_url', None) - self.identity_reference = kwargs.get('identity_reference', None) - self.upload_headers = kwargs.get('upload_headers', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class OutputFileDestination(Model): - """The destination to which a file should be uploaded. - :param container: A location in Azure blob storage to which files are - uploaded. - :type container: ~azure.batch.models.OutputFileBlobContainerDestination - """ - - _attribute_map = { - 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, - } - - def __init__(self, **kwargs): - super(OutputFileDestination, self).__init__(**kwargs) - self.container = kwargs.get('container', None) - - -class OutputFileUploadOptions(Model): - """Details about an output file upload operation, including under what - conditions to perform the upload. - - All required parameters must be populated in order to send to Azure. - - :param upload_condition: Required. The conditions under which the Task - output file or set of files should be uploaded. The default is - taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', - 'taskCompletion' - :type upload_condition: str or - ~azure.batch.models.OutputFileUploadCondition - """ - - _validation = { - 'upload_condition': {'required': True}, - } - - _attribute_map = { - 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, - } - - def __init__(self, **kwargs): - super(OutputFileUploadOptions, self).__init__(**kwargs) - self.upload_condition = kwargs.get('upload_condition', None) - - -class PoolAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolAddOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class PoolAddParameter(Model): - """A Pool in the Azure Batch service to add. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two Pool IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param vm_size: Required. For information about available sizes of virtual - machines for Cloud Services Pools (pools created with - cloudServiceConfiguration), see Sizes for Cloud Services - (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). - Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and - A2V2. For information about available VM sizes for Pools using Images from - the Virtual Machines Marketplace (pools created with - virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) - (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) - or Sizes for Virtual Machines (Windows) - (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). - Batch supports all Azure VM sizes except STANDARD_A0 and those with - premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). - :type vm_size: str - :param cloud_service_configuration: The cloud service configuration for - the Pool. This property and virtualMachineConfiguration are mutually - exclusive and one of the properties must be specified. This property - cannot be specified if the Batch Account was created with its - poolAllocationMode property set to 'UserSubscription'. - :type cloud_service_configuration: - ~azure.batch.models.CloudServiceConfiguration - :param virtual_machine_configuration: The virtual machine configuration - for the Pool. This property and cloudServiceConfiguration are mutually - exclusive and one of the properties must be specified. - :type virtual_machine_configuration: - ~azure.batch.models.VirtualMachineConfiguration - :param resize_timeout: This timeout applies only to manual scaling; it has - no effect when enableAutoScale is set to true. The default value is 15 - minutes. The minimum value is 5 minutes. If you specify a value less than - 5 minutes, the Batch service returns an error; if you are calling the REST - API directly, the HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. This property must not be specified if enableAutoScale - is set to true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. This property must not be specified if - enableAutoScale is set to true. If enableAutoScale is set to false, then - you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_low_priority_nodes: int - :param enable_auto_scale: Whether the Pool size should automatically - adjust over time. If false, at least one of targetDedicatedNodes and - targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the - formula. The default value is false. - :type enable_auto_scale: bool - :param auto_scale_formula: This property must not be specified if - enableAutoScale is set to false. It is required if enableAutoScale is set - to true. The formula is checked for validity before the Pool is created. - If the formula is not valid, the Batch service rejects the request with - detailed error information. For more information about specifying this - formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' - (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 minutes. - The minimum and maximum value are 5 minutes and 168 hours respectively. If - you specify a value less than 5 minutes or greater than 168 hours, the - Batch service returns an error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). - :type auto_scale_evaluation_interval: timedelta - :param enable_inter_node_communication: Whether the Pool permits direct - communication between Compute Nodes. Enabling inter-node communication - limits the maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not reaching its - desired size. The default value is false. - :type enable_inter_node_communication: bool - :param network_configuration: The network configuration for the Pool. - :type network_configuration: ~azure.batch.models.NetworkConfiguration - :param start_task: A Task specified to run on each Compute Node as it - joins the Pool. The Task runs when the Compute Node is added to the Pool - or when the Compute Node is restarted. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Changes to Package references affect all new Nodes joining the Pool, but - do not affect Compute Nodes that are already in the Pool until they are - rebooted or reimaged. There is a maximum of 10 Package references on any - given Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses must be a - subset of available Batch service application licenses. If a license is - requested which is not supported, Pool creation will fail. - :type application_licenses: list[str] - :param task_slots_per_node: The number of task slots that can be used to - run concurrent tasks on a single compute node in the pool. The default - value is 1. The maximum value is the smaller of 4 times the number of - cores of the vmSize of the pool or 256. - :type task_slots_per_node: int - :param task_scheduling_policy: How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. - :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy - :param user_accounts: - :type user_accounts: list[~azure.batch.models.UserAccount] - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param mount_configuration: Mount the storage using Azure fileshare, NFS, - CIFS or Blobfuse based file system. - :type mount_configuration: list[~azure.batch.models.MountConfiguration] - :param target_node_communication_mode: The desired node communication mode - for the pool. If omitted, the default value is Default. Possible values - include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :param upgrade_policy: The upgrade policy for the Pool. Describes an - upgrade policy - automatic, manual, or rolling. - :type upgrade_policy: ~azure.batch.models.UpgradePolicy - :param resource_tags: The user-defined tags to be associated with the - Azure Batch Pool. When specified, these tags are propagated to the backing - Azure resources associated with the pool. This property can only be - specified when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - :type resource_tags: dict[str, str] - """ - - _validation = { - 'id': {'required': True}, - 'vm_size': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, - 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, - 'task_slots_per_node': {'key': 'taskSlotsPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'upgrade_policy': {'key': 'upgradePolicy', 'type': 'UpgradePolicy'}, - 'resource_tags': {'key': 'resourceTags', 'type': '{str}'}, - } - - def __init__(self, **kwargs): - super(PoolAddParameter, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.vm_size = kwargs.get('vm_size', None) - self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) - self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) - self.resize_timeout = kwargs.get('resize_timeout', None) - self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) - self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) - self.enable_auto_scale = kwargs.get('enable_auto_scale', None) - self.auto_scale_formula = kwargs.get('auto_scale_formula', None) - self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) - self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) - self.network_configuration = kwargs.get('network_configuration', None) - self.start_task = kwargs.get('start_task', None) - self.certificate_references = kwargs.get('certificate_references', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.application_licenses = kwargs.get('application_licenses', None) - self.task_slots_per_node = kwargs.get('task_slots_per_node', None) - self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) - self.user_accounts = kwargs.get('user_accounts', None) - self.metadata = kwargs.get('metadata', None) - self.mount_configuration = kwargs.get('mount_configuration', None) - self.target_node_communication_mode = kwargs.get('target_node_communication_mode', None) - self.upgrade_policy = kwargs.get('upgrade_policy', None) - self.resource_tags = kwargs.get('resource_tags', None) - - -class PoolDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolDeleteOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolDisableAutoScaleOptions(Model): - """Additional parameters for disable_auto_scale operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class PoolEnableAutoScaleOptions(Model): - """Additional parameters for enable_auto_scale operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolEnableAutoScaleParameter(Model): - """Options for enabling automatic scaling on a Pool. - - :param auto_scale_formula: The formula is checked for validity before it - is applied to the Pool. If the formula is not valid, the Batch service - rejects the request with detailed error information. For more information - about specifying this formula, see Automatically scale Compute Nodes in an - Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 minutes. - The minimum and maximum value are 5 minutes and 168 hours respectively. If - you specify a value less than 5 minutes or greater than 168 hours, the - Batch service rejects the request with an invalid property value error; if - you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). If you specify a new interval, then the existing autoscale - evaluation schedule will be stopped and a new autoscale evaluation - schedule will be started, with its starting time being the time when this - request was issued. - :type auto_scale_evaluation_interval: timedelta - """ - - _attribute_map = { - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) - self.auto_scale_formula = kwargs.get('auto_scale_formula', None) - self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) - - -class PoolEndpointConfiguration(Model): - """The endpoint configuration for a Pool. +class OutputFileUploadConfig(_model_base.Model): + """Options for an output file upload operation, including under what conditions + to perform the upload. - All required parameters must be populated in order to send to Azure. - - :param inbound_nat_pools: Required. The maximum number of inbound NAT - Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is - exceeded the request fails with HTTP status code 400. This cannot be - specified if the IPAddressProvisioningType is NoPublicIPAddresses. - :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] - """ - - _validation = { - 'inbound_nat_pools': {'required': True}, - } - - _attribute_map = { - 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, - } - - def __init__(self, **kwargs): - super(PoolEndpointConfiguration, self).__init__(**kwargs) - self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) - - -class PoolEvaluateAutoScaleOptions(Model): - """Additional parameters for evaluate_auto_scale operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class PoolEvaluateAutoScaleParameter(Model): - """Options for evaluating an automatic scaling formula on a Pool. - - All required parameters must be populated in order to send to Azure. - - :param auto_scale_formula: Required. The formula is validated and its - results calculated, but it is not applied to the Pool. To apply the - formula to the Pool, 'Enable automatic scaling on a Pool'. For more - information about specifying this formula, see Automatically scale Compute - Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - :type auto_scale_formula: str - """ - - _validation = { - 'auto_scale_formula': {'required': True}, - } - - _attribute_map = { - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) - self.auto_scale_formula = kwargs.get('auto_scale_formula', None) - - -class PoolExistsOptions(Model): - """Additional parameters for exists operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolExistsOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolInformation(Model): - """Specifies how a Job should be assigned to a Pool. - :param pool_id: You must ensure that the Pool referenced by this property - exists. If the Pool does not exist at the time the Batch service tries to - schedule a Job, no Tasks for the Job will run until you create a Pool with - that id. Note that the Batch service will not reject the Job request; it - will simply not run Tasks until the Pool exists. You must specify either - the Pool ID or the auto Pool specification, but not both. - :type pool_id: str - :param auto_pool_specification: Characteristics for a temporary 'auto - pool'. The Batch service will create this auto Pool when the Job is - submitted. If auto Pool creation fails, the Batch service moves the Job to - a completed state, and the Pool creation error is set in the Job's - scheduling error property. The Batch service manages the lifetime (both - creation and, unless keepAlive is specified, deletion) of the auto Pool. - Any user actions that affect the lifetime of the auto Pool while the Job - is active will result in unexpected behavior. You must specify either the - Pool ID or the auto Pool specification, but not both. - :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification - """ - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, - } - - def __init__(self, **kwargs): - super(PoolInformation, self).__init__(**kwargs) - self.pool_id = kwargs.get('pool_id', None) - self.auto_pool_specification = kwargs.get('auto_pool_specification', None) - - -class PoolListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Pools can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolListOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class PoolListUsageMetricsOptions(Model): - """Additional parameters for list_usage_metrics operation. - - :param start_time: The earliest time from which to include metrics. This - must be at least two and a half hours before the current time. If not - specified this defaults to the start time of the last aggregation interval - currently available. - :type start_time: datetime - :param end_time: The latest time from which to include metrics. This must - be at least two hours before the current time. If not specified this - defaults to the end time of the last aggregation interval currently - available. - :type end_time: datetime - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 results will be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'start_time': {'key': '', 'type': 'iso-8601'}, - 'end_time': {'key': '', 'type': 'iso-8601'}, - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolListUsageMetricsOptions, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.filter = kwargs.get('filter', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class PoolNodeCounts(Model): - """The number of Compute Nodes in each state for a Pool. + :ivar upload_condition: The conditions under which the Task output file or set of files should + be uploaded. The default is taskcompletion. Required. Known values are: "tasksuccess", + "taskfailure", and "taskcompletion". + :vartype upload_condition: str or ~azure.batch.models.OutputFileUploadCondition + """ - All required parameters must be populated in order to send to Azure. - - :param pool_id: Required. - :type pool_id: str - :param dedicated: The number of dedicated Compute Nodes in each state. - :type dedicated: ~azure.batch.models.NodeCounts - :param low_priority: The number of Spot/Low-priority Compute Nodes in each - state. - :type low_priority: ~azure.batch.models.NodeCounts - """ - - _validation = { - 'pool_id': {'required': True}, - } - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, - 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, - } - - def __init__(self, **kwargs): - super(PoolNodeCounts, self).__init__(**kwargs) - self.pool_id = kwargs.get('pool_id', None) - self.dedicated = kwargs.get('dedicated', None) - self.low_priority = kwargs.get('low_priority', None) - - -class PoolPatchOptions(Model): - """Additional parameters for patch operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolPatchOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolPatchParameter(Model): - """The set of changes to be made to a Pool. - - :param start_task: A Task to run on each Compute Node as it joins the - Pool. The Task runs when the Compute Node is added to the Pool or when the - Compute Node is restarted. If this element is present, it overwrites any - existing StartTask. If omitted, any existing StartTask is left unchanged. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: If this element is present, it replaces any - existing Certificate references configured on the Pool. If omitted, any - existing Certificate references are left unchanged. For Windows Nodes, the - Batch service installs the Certificates to the specified Certificate store - and location. For Linux Compute Nodes, the Certificates are stored in a - directory inside the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: Changes to Package references - affect all new Nodes joining the Pool, but do not affect Compute Nodes - that are already in the Pool until they are rebooted or reimaged. If this - element is present, it replaces any existing Package references. If you - specify an empty collection, then all Package references are removed from - the Pool. If omitted, any existing Package references are left unchanged. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param metadata: If this element is present, it replaces any existing - metadata configured on the Pool. If you specify an empty collection, any - metadata is removed from the Pool. If omitted, any existing metadata is - left unchanged. - :type metadata: list[~azure.batch.models.MetadataItem] - :param target_node_communication_mode: The desired node communication mode - for the pool. If this element is present, it replaces the existing - targetNodeCommunicationMode configured on the Pool. If omitted, any - existing metadata is left unchanged. Possible values include: 'default', - 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - """ - - _attribute_map = { - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - } - - def __init__(self, **kwargs): - super(PoolPatchParameter, self).__init__(**kwargs) - self.start_task = kwargs.get('start_task', None) - self.certificate_references = kwargs.get('certificate_references', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.metadata = kwargs.get('metadata', None) - self.target_node_communication_mode = kwargs.get('target_node_communication_mode', None) - - -class PoolRemoveNodesOptions(Model): - """Additional parameters for remove_nodes operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolRemoveNodesOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolResizeOptions(Model): - """Additional parameters for resize operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolResizeOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolResizeParameter(Model): - """Options for changing the size of a Pool. - - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. - :type target_low_priority_nodes: int - :param resize_timeout: The default value is 15 minutes. The minimum value - is 5 minutes. If you specify a value less than 5 minutes, the Batch - service returns an error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param node_deallocation_option: Determines what to do with a Compute Node - and its running task(s) if the Pool size is decreasing. The default value - is requeue. Possible values include: 'requeue', 'terminate', - 'taskCompletion', 'retainedData' - :type node_deallocation_option: str or - ~azure.batch.models.ComputeNodeDeallocationOption - """ - - _attribute_map = { - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, - } - - def __init__(self, **kwargs): - super(PoolResizeParameter, self).__init__(**kwargs) - self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) - self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) - self.resize_timeout = kwargs.get('resize_timeout', None) - self.node_deallocation_option = kwargs.get('node_deallocation_option', None) - - -class PoolSpecification(Model): - """Specification for creating a new Pool. + upload_condition: Union[str, "_models.OutputFileUploadCondition"] = rest_field(name="uploadCondition") + """The conditions under which the Task output file or set of files should be uploaded. The default + is taskcompletion. Required. Known values are: \"tasksuccess\", \"taskfailure\", and + \"taskcompletion\".""" - All required parameters must be populated in order to send to Azure. - - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param vm_size: Required. For information about available sizes of virtual - machines in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param cloud_service_configuration: The cloud service configuration for - the Pool. This property must be specified if the Pool needs to be created - with Azure PaaS VMs. This property and virtualMachineConfiguration are - mutually exclusive and one of the properties must be specified. If neither - is specified then the Batch service returns an error; if you are calling - the REST API directly, the HTTP status code is 400 (Bad Request). This - property cannot be specified if the Batch Account was created with its - poolAllocationMode property set to 'UserSubscription'. - :type cloud_service_configuration: - ~azure.batch.models.CloudServiceConfiguration - :param virtual_machine_configuration: The virtual machine configuration - for the Pool. This property must be specified if the Pool needs to be - created with Azure IaaS VMs. This property and cloudServiceConfiguration - are mutually exclusive and one of the properties must be specified. If - neither is specified then the Batch service returns an error; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - :type virtual_machine_configuration: - ~azure.batch.models.VirtualMachineConfiguration - :param task_slots_per_node: The number of task slots that can be used to - run concurrent tasks on a single compute node in the pool. The default - value is 1. The maximum value is the smaller of 4 times the number of - cores of the vmSize of the pool or 256. - :type task_slots_per_node: int - :param task_scheduling_policy: How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. - :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy - :param resize_timeout: This timeout applies only to manual scaling; it has - no effect when enableAutoScale is set to true. The default value is 15 - minutes. The minimum value is 5 minutes. If you specify a value less than - 5 minutes, the Batch service rejects the request with an error; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. This property must not be specified if enableAutoScale - is set to true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. This property must not be specified if - enableAutoScale is set to true. If enableAutoScale is set to false, then - you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_low_priority_nodes: int - :param enable_auto_scale: Whether the Pool size should automatically - adjust over time. If false, at least one of targetDedicatedNodes and - targetLowPriorityNodes must be specified. If true, the autoScaleFormula - element is required. The Pool automatically resizes according to the - formula. The default value is false. - :type enable_auto_scale: bool - :param auto_scale_formula: This property must not be specified if - enableAutoScale is set to false. It is required if enableAutoScale is set - to true. The formula is checked for validity before the Pool is created. - If the formula is not valid, the Batch service rejects the request with - detailed error information. - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 minutes. - The minimum and maximum value are 5 minutes and 168 hours respectively. If - you specify a value less than 5 minutes or greater than 168 hours, the - Batch service rejects the request with an invalid property value error; if - you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - :type auto_scale_evaluation_interval: timedelta - :param enable_inter_node_communication: Whether the Pool permits direct - communication between Compute Nodes. Enabling inter-node communication - limits the maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not reaching its - desired size. The default value is false. - :type enable_inter_node_communication: bool - :param network_configuration: The network configuration for the Pool. - :type network_configuration: ~azure.batch.models.NetworkConfiguration - :param start_task: A Task to run on each Compute Node as it joins the - Pool. The Task runs when the Compute Node is added to the Pool or when the - Compute Node is restarted. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Changes to Package references affect all new Nodes joining the Pool, but - do not affect Compute Nodes that are already in the Pool until they are - rebooted or reimaged. There is a maximum of 10 Package references on any - given Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses must be a - subset of available Batch service application licenses. If a license is - requested which is not supported, Pool creation will fail. The permitted - licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An - additional charge applies for each application license added to the Pool. - :type application_licenses: list[str] - :param user_accounts: - :type user_accounts: list[~azure.batch.models.UserAccount] - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param mount_configuration: This supports Azure Files, NFS, CIFS/SMB, and - Blobfuse. - :type mount_configuration: list[~azure.batch.models.MountConfiguration] - :param target_node_communication_mode: The desired node communication mode - for the pool. If omitted, the default value is Default. Possible values - include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :param upgrade_policy: The upgrade policy for the pool. - :type upgrade_policy: ~azure.batch.models.UpgradePolicy - :param resource_tags: The user-defined tags to be associated with the - Azure Batch Pool. When specified, these tags are propagated to the backing - Azure resources associated with the pool. This property can only be - specified when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - :type resource_tags: dict[str, str] - """ - - _validation = { - 'vm_size': {'required': True}, - } - - _attribute_map = { - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, - 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, - 'task_slots_per_node': {'key': 'taskSlotsPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, - 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'upgrade_policy': {'key': 'upgradePolicy', 'type': 'UpgradePolicy'}, - 'resource_tags': {'key': 'resourceTags', 'type': '{str}'}, - } - - def __init__(self, **kwargs): - super(PoolSpecification, self).__init__(**kwargs) - self.display_name = kwargs.get('display_name', None) - self.vm_size = kwargs.get('vm_size', None) - self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) - self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) - self.task_slots_per_node = kwargs.get('task_slots_per_node', None) - self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) - self.resize_timeout = kwargs.get('resize_timeout', None) - self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) - self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) - self.enable_auto_scale = kwargs.get('enable_auto_scale', None) - self.auto_scale_formula = kwargs.get('auto_scale_formula', None) - self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) - self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) - self.network_configuration = kwargs.get('network_configuration', None) - self.start_task = kwargs.get('start_task', None) - self.certificate_references = kwargs.get('certificate_references', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.application_licenses = kwargs.get('application_licenses', None) - self.user_accounts = kwargs.get('user_accounts', None) - self.metadata = kwargs.get('metadata', None) - self.mount_configuration = kwargs.get('mount_configuration', None) - self.target_node_communication_mode = kwargs.get('target_node_communication_mode', None) - self.upgrade_policy = kwargs.get('upgrade_policy', None) - self.resource_tags = kwargs.get('resource_tags', None) - - -class PoolStatistics(Model): - """Contains utilization and resource usage statistics for the lifetime of a - Pool. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param usage_stats: Statistics related to Pool usage, such as the amount - of core-time used. - :type usage_stats: ~azure.batch.models.UsageStatistics - :param resource_stats: Statistics related to resource consumption by - Compute Nodes in the Pool. - :type resource_stats: ~azure.batch.models.ResourceStatistics - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, - 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, - } - - def __init__(self, **kwargs): - super(PoolStatistics, self).__init__(**kwargs) - self.url = kwargs.get('url', None) - self.start_time = kwargs.get('start_time', None) - self.last_update_time = kwargs.get('last_update_time', None) - self.usage_stats = kwargs.get('usage_stats', None) - self.resource_stats = kwargs.get('resource_stats', None) - - -class PoolStopResizeOptions(Model): - """Additional parameters for stop_resize operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolStopResizeOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class PoolUpdatePropertiesOptions(Model): - """Additional parameters for update_properties operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class PoolUpdatePropertiesParameter(Model): - """The set of changes to be made to a Pool. - - All required parameters must be populated in order to send to Azure. - - :param start_task: A Task to run on each Compute Node as it joins the - Pool. The Task runs when the Compute Node is added to the Pool or when the - Compute Node is restarted. If this element is present, it overwrites any - existing StartTask. If omitted, any existing StartTask is removed from the - Pool. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: Required. This list replaces any existing - Certificate references configured on the Pool. If you specify an empty - collection, any existing Certificate references are removed from the Pool. - For Windows Nodes, the Batch service installs the Certificates to the - specified Certificate store and location. For Linux Compute Nodes, the - Certificates are stored in a directory inside the Task working directory - and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the - Task to query for this location. For Certificates with visibility of - 'remoteUser', a 'certs' directory is created in the user's home directory - (e.g., /home/{user-name}/certs) and Certificates are placed in that - directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: Required. The list replaces any - existing Application Package references on the Pool. Changes to - Application Package references affect all new Compute Nodes joining the - Pool, but do not affect Compute Nodes that are already in the Pool until - they are rebooted or reimaged. There is a maximum of 10 Application - Package references on any given Pool. If omitted, or if you specify an - empty collection, any existing Application Packages references are removed - from the Pool. A maximum of 10 references may be specified on a given - Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param metadata: Required. This list replaces any existing metadata - configured on the Pool. If omitted, or if you specify an empty collection, - any existing metadata is removed from the Pool. - :type metadata: list[~azure.batch.models.MetadataItem] - :param target_node_communication_mode: The desired node communication mode - for the pool. This setting replaces any existing targetNodeCommunication - setting on the Pool. If omitted, the existing setting is default. Possible - values include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - """ - - _validation = { - 'certificate_references': {'required': True}, - 'application_package_references': {'required': True}, - 'metadata': {'required': True}, - } - - _attribute_map = { - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - } - - def __init__(self, **kwargs): - super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) - self.start_task = kwargs.get('start_task', None) - self.certificate_references = kwargs.get('certificate_references', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.metadata = kwargs.get('metadata', None) - self.target_node_communication_mode = kwargs.get('target_node_communication_mode', None) - - -class PoolUsageMetrics(Model): - """Usage metrics for a Pool across an aggregation interval. + @overload + def __init__( + self, + *, + upload_condition: Union[str, "_models.OutputFileUploadCondition"], + ): ... - All required parameters must be populated in order to send to Azure. - - :param pool_id: Required. - :type pool_id: str - :param start_time: Required. - :type start_time: datetime - :param end_time: Required. - :type end_time: datetime - :param vm_size: Required. For information about available sizes of virtual - machines in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param total_core_hours: Required. The total core hours used in the Pool - during this aggregation interval. - :type total_core_hours: float - """ - - _validation = { - 'pool_id': {'required': True}, - 'start_time': {'required': True}, - 'end_time': {'required': True}, - 'vm_size': {'required': True}, - 'total_core_hours': {'required': True}, - } - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(PoolUsageMetrics, self).__init__(**kwargs) - self.pool_id = kwargs.get('pool_id', None) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.vm_size = kwargs.get('vm_size', None) - self.total_core_hours = kwargs.get('total_core_hours', None) - - -class PublicIPAddressConfiguration(Model): - """The public IP Address configuration of the networking configuration of a - Pool. - - :param provision: The provisioning type for Public IP Addresses for the - Pool. The default value is BatchManaged. Possible values include: - 'batchManaged', 'userManaged', 'noPublicIPAddresses' - :type provision: str or ~azure.batch.models.IPAddressProvisioningType - :param ip_address_ids: The number of IPs specified here limits the maximum - size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can - be allocated for each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - :type ip_address_ids: list[str] - """ + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - _attribute_map = { - 'provision': {'key': 'provision', 'type': 'IPAddressProvisioningType'}, - 'ip_address_ids': {'key': 'ipAddressIds', 'type': '[str]'}, - } + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - def __init__(self, **kwargs): - super(PublicIPAddressConfiguration, self).__init__(**kwargs) - self.provision = kwargs.get('provision', None) - self.ip_address_ids = kwargs.get('ip_address_ids', None) +class PublicIpAddressConfiguration(_model_base.Model): + """The public IP Address configuration of the networking configuration of a Pool. -class RecentJob(Model): + :ivar ip_address_provisioning_type: The provisioning type for Public IP Addresses for the Pool. + The default value is BatchManaged. Known values are: "batchmanaged", "usermanaged", and + "nopublicipaddresses". + :vartype ip_address_provisioning_type: str or ~azure.batch.models.IpAddressProvisioningType + :ivar ip_address_ids: The list of public IPs which the Batch service will use when provisioning + Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 + dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For + example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each + element of this collection is of the form: + /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + :vartype ip_address_ids: list[str] + """ + + ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = rest_field( + name="provision" + ) + """The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + Known values are: \"batchmanaged\", \"usermanaged\", and \"nopublicipaddresses\".""" + ip_address_ids: Optional[List[str]] = rest_field(name="ipAddressIds") + """The list of public IPs which the Batch service will use when provisioning Compute Nodes. The + number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 + Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 + dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of + the form: + /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.""" + + @overload + def __init__( + self, + *, + ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = None, + ip_address_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RecentBatchJob(_model_base.Model): """Information about the most recent Job to run under the Job Schedule. - :param id: - :type id: str - :param url: - :type url: str + :ivar id: The ID of the Job. + :vartype id: str + :ivar url: The URL of the Job. + :vartype url: str """ - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(RecentJob, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.url = kwargs.get('url', None) + id: Optional[str] = rest_field() + """The ID of the Job.""" + url: Optional[str] = rest_field() + """The URL of the Job.""" + @overload + def __init__( + self, + *, + id: Optional[str] = None, # pylint: disable=redefined-builtin + url: Optional[str] = None, + ): ... -class ResizeError(Model): - """An error that occurred when resizing a Pool. + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - :param code: - :type code: str - :param message: - :type message: str - :param values: - :type values: list[~azure.batch.models.NameValuePair] - """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[NameValuePair]'}, - } - - def __init__(self, **kwargs): - super(ResizeError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.values = kwargs.get('values', None) +class ResizeError(_model_base.Model): + """An error that occurred when resizing a Pool. -class ResourceFile(Model): + :ivar code: An identifier for the Pool resize error. Codes are invariant and are intended to be + consumed programmatically. + :vartype code: str + :ivar message: A message describing the Pool resize error, intended to be suitable for display + in a user interface. + :vartype message: str + :ivar values_property: A list of additional error details related to the Pool resize error. + :vartype values_property: list[~azure.batch.models.NameValuePair] + """ + + code: Optional[str] = rest_field() + """An identifier for the Pool resize error. Codes are invariant and are intended to be consumed + programmatically.""" + message: Optional[str] = rest_field() + """A message describing the Pool resize error, intended to be suitable for display in a user + interface.""" + values_property: Optional[List["_models.NameValuePair"]] = rest_field(name="values") + """A list of additional error details related to the Pool resize error.""" + + @overload + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + values_property: Optional[List["_models.NameValuePair"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ResourceFile(_model_base.Model): """A single file or multiple files to be downloaded to a Compute Node. - :param auto_storage_container_name: The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - :type auto_storage_container_name: str - :param storage_container_url: The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public access. - :type storage_container_url: str - :param http_url: The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be readable - from compute nodes. There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - :type http_url: str - :param blob_prefix: The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This prefix can - be a partial filename or a subdirectory. If a prefix is not specified, all - the files in the container will be downloaded. - :type blob_prefix: str - :param file_path: If the httpUrl property is specified, the filePath is - required and describes the path which the file will be downloaded to, - including the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is the - directory to download the files to. In the case where filePath is used as - a directory, any directory structure already associated with the input - data will be retained in full and appended to the specified filePath - directory. The specified relative path cannot break out of the Task's - working directory (for example by using '..'). - :type file_path: str - :param file_mode: This property applies only to files being downloaded to - Linux Compute Nodes. It will be ignored if it is specified for a - resourceFile which will be downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, then a default value - of 0770 is applied to the file. - :type file_mode: str - :param identity_reference: The reference to the user assigned identity to - use to access Azure Blob Storage specified by storageContainerUrl or - httpUrl. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _attribute_map = { - 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, - 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, - 'http_url': {'key': 'httpUrl', 'type': 'str'}, - 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, - 'file_path': {'key': 'filePath', 'type': 'str'}, - 'file_mode': {'key': 'fileMode', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, **kwargs): - super(ResourceFile, self).__init__(**kwargs) - self.auto_storage_container_name = kwargs.get('auto_storage_container_name', None) - self.storage_container_url = kwargs.get('storage_container_url', None) - self.http_url = kwargs.get('http_url', None) - self.blob_prefix = kwargs.get('blob_prefix', None) - self.file_path = kwargs.get('file_path', None) - self.file_mode = kwargs.get('file_mode', None) - self.identity_reference = kwargs.get('identity_reference', None) - - -class ResourceStatistics(Model): - """Statistics related to resource consumption by Compute Nodes in a Pool. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param avg_cpu_percentage: Required. The average CPU usage across all - Compute Nodes in the Pool (percentage per node). - :type avg_cpu_percentage: float - :param avg_memory_gi_b: Required. The average memory usage in GiB across - all Compute Nodes in the Pool. - :type avg_memory_gi_b: float - :param peak_memory_gi_b: Required. The peak memory usage in GiB across all - Compute Nodes in the Pool. - :type peak_memory_gi_b: float - :param avg_disk_gi_b: Required. The average used disk space in GiB across - all Compute Nodes in the Pool. - :type avg_disk_gi_b: float - :param peak_disk_gi_b: Required. The peak used disk space in GiB across - all Compute Nodes in the Pool. - :type peak_disk_gi_b: float - :param disk_read_iops: Required. The total number of disk read operations - across all Compute Nodes in the Pool. - :type disk_read_iops: long - :param disk_write_iops: Required. The total number of disk write - operations across all Compute Nodes in the Pool. - :type disk_write_iops: long - :param disk_read_gi_b: Required. The total amount of data in GiB of disk - reads across all Compute Nodes in the Pool. - :type disk_read_gi_b: float - :param disk_write_gi_b: Required. The total amount of data in GiB of disk - writes across all Compute Nodes in the Pool. - :type disk_write_gi_b: float - :param network_read_gi_b: Required. The total amount of data in GiB of - network reads across all Compute Nodes in the Pool. - :type network_read_gi_b: float - :param network_write_gi_b: Required. The total amount of data in GiB of - network writes across all Compute Nodes in the Pool. - :type network_write_gi_b: float - """ - - _validation = { - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'avg_cpu_percentage': {'required': True}, - 'avg_memory_gi_b': {'required': True}, - 'peak_memory_gi_b': {'required': True}, - 'avg_disk_gi_b': {'required': True}, - 'peak_disk_gi_b': {'required': True}, - 'disk_read_iops': {'required': True}, - 'disk_write_iops': {'required': True}, - 'disk_read_gi_b': {'required': True}, - 'disk_write_gi_b': {'required': True}, - 'network_read_gi_b': {'required': True}, - 'network_write_gi_b': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, - 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, - 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, - 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, - 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, - 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, - 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, - 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, - 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, - 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, - 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(ResourceStatistics, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.last_update_time = kwargs.get('last_update_time', None) - self.avg_cpu_percentage = kwargs.get('avg_cpu_percentage', None) - self.avg_memory_gi_b = kwargs.get('avg_memory_gi_b', None) - self.peak_memory_gi_b = kwargs.get('peak_memory_gi_b', None) - self.avg_disk_gi_b = kwargs.get('avg_disk_gi_b', None) - self.peak_disk_gi_b = kwargs.get('peak_disk_gi_b', None) - self.disk_read_iops = kwargs.get('disk_read_iops', None) - self.disk_write_iops = kwargs.get('disk_write_iops', None) - self.disk_read_gi_b = kwargs.get('disk_read_gi_b', None) - self.disk_write_gi_b = kwargs.get('disk_write_gi_b', None) - self.network_read_gi_b = kwargs.get('network_read_gi_b', None) - self.network_write_gi_b = kwargs.get('network_write_gi_b', None) - - -class RollingUpgradePolicy(Model): + :ivar auto_storage_container_name: The storage container name in the auto storage Account. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and + one of them must be specified. + :vartype auto_storage_container_name: str + :ivar storage_container_url: The URL of the blob container within Azure Blob Storage. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and + one of them must be specified. This URL must be readable and listable from compute nodes. There + are three ways to get such a URL for a container in Azure storage: include a Shared Access + Signature (SAS) granting read and list permissions on the container, use a managed identity + with read and list permissions, or set the ACL for the container to allow public access. + :vartype storage_container_url: str + :ivar http_url: The URL of the file to download. The autoStorageContainerName, + storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be + specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. + There are three ways to get such a URL for a blob in Azure storage: include a Shared Access + Signature (SAS) granting read permissions on the blob, use a managed identity with read + permission, or set the ACL for the blob or its container to allow public access. + :vartype http_url: str + :ivar blob_prefix: The blob prefix to use when downloading blobs from an Azure Storage + container. Only the blobs whose names begin with the specified prefix will be downloaded. The + property is valid only when autoStorageContainerName or storageContainerUrl is used. This + prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files + in the container will be downloaded. + :vartype blob_prefix: str + :ivar file_path: The location on the Compute Node to which to download the file(s), relative to + the Task's working directory. If the httpUrl property is specified, the filePath is required + and describes the path which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is specified, filePath is + optional and is the directory to download the files to. In the case where filePath is used as a + directory, any directory structure already associated with the input data will be retained in + full and appended to the specified filePath directory. The specified relative path cannot break + out of the Task's working directory (for example by using '..'). + :vartype file_path: str + :ivar file_mode: The file permission mode attribute in octal format. This property applies only + to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a + resourceFile which will be downloaded to a Windows Compute Node. If this property is not + specified for a Linux Compute Node, then a default value of 0770 is applied to the file. + :vartype file_mode: str + :ivar identity_reference: The reference to the user assigned identity to use to access Azure + Blob Storage specified by storageContainerUrl or httpUrl. + :vartype identity_reference: ~azure.batch.models.BatchNodeIdentityReference + """ + + auto_storage_container_name: Optional[str] = rest_field(name="autoStorageContainerName") + """The storage container name in the auto storage Account. The autoStorageContainerName, + storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be + specified.""" + storage_container_url: Optional[str] = rest_field(name="storageContainerUrl") + """The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, + storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable from compute nodes. There are three ways to + get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, use a managed identity with read and list + permissions, or set the ACL for the container to allow public access.""" + http_url: Optional[str] = rest_field(name="httpUrl") + """The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl + properties are mutually exclusive and one of them must be specified. If the URL points to Azure + Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL + for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions + on the blob, use a managed identity with read permission, or set the ACL for the blob or its + container to allow public access.""" + blob_prefix: Optional[str] = rest_field(name="blobPrefix") + """The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs + whose names begin with the specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename + or a subdirectory. If a prefix is not specified, all the files in the container will be + downloaded.""" + file_path: Optional[str] = rest_field(name="filePath") + """The location on the Compute Node to which to download the file(s), relative to the Task's + working directory. If the httpUrl property is specified, the filePath is required and describes + the path which the file will be downloaded to, including the filename. Otherwise, if the + autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and + is the directory to download the files to. In the case where filePath is used as a directory, + any directory structure already associated with the input data will be retained in full and + appended to the specified filePath directory. The specified relative path cannot break out of + the Task's working directory (for example by using '..').""" + file_mode: Optional[str] = rest_field(name="fileMode") + """The file permission mode attribute in octal format. This property applies only to files being + downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile + which will be downloaded to a Windows Compute Node. If this property is not specified for a + Linux Compute Node, then a default value of 0770 is applied to the file.""" + identity_reference: Optional["_models.BatchNodeIdentityReference"] = rest_field(name="identityReference") + """The reference to the user assigned identity to use to access Azure Blob Storage specified by + storageContainerUrl or httpUrl.""" + + @overload + def __init__( + self, + *, + auto_storage_container_name: Optional[str] = None, + storage_container_url: Optional[str] = None, + http_url: Optional[str] = None, + blob_prefix: Optional[str] = None, + file_path: Optional[str] = None, + file_mode: Optional[str] = None, + identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RollingUpgradePolicy(_model_base.Model): """The configuration parameters used while performing a rolling upgrade. - :param enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the Update Domain - and maxBatchInstancePercent to determine the batch size. This field is - able to be set to true or false only when using NodePlacementConfiguration - as Zonal. - :type enable_cross_zone_upgrade: bool - :param max_batch_instance_percent: The maximum percent of total virtual - machine instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy instances in - previous or future batches can cause the percentage of instances in a - batch to decrease to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both maxBatchInstancePercent - and maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - :type max_batch_instance_percent: int - :param max_unhealthy_instance_percent: The maximum percentage of the total - virtual machine instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being found in an - unhealthy state by the virtual machine health checks before the rolling - upgrade aborts. This constraint will be checked prior to starting any - batch. The value of this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned + :ivar enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when constructing upgrade + batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the + batch size. This field is able to be set to true or false only when using + NodePlacementConfiguration as Zonal. + :vartype enable_cross_zone_upgrade: bool + :ivar max_batch_instance_percent: The maximum percent of total virtual machine instances that + will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, + unhealthy instances in previous or future batches can cause the percentage of instances in a + batch to decrease to ensure higher reliability. The value of this field should be between 5 and + 100, inclusive. If both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned with value, the value of maxBatchInstancePercent should not be more than maxUnhealthyInstancePercent. - :type max_unhealthy_instance_percent: int - :param max_unhealthy_upgraded_instance_percent: The maximum percentage of - upgraded virtual machine instances that can be found to be in an unhealthy - state. This check will happen after each batch is upgraded. If this - percentage is ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - :type max_unhealthy_upgraded_instance_percent: int - :param pause_time_between_batches: The wait time between completing the - update for all virtual machines in one batch and starting the next batch. - The time duration should be specified in ISO 8601 format. - :type pause_time_between_batches: timedelta - :param prioritize_unhealthy_instances: Upgrade all unhealthy instances in - a scale set before any healthy instances. - :type prioritize_unhealthy_instances: bool - :param rollback_failed_instances_on_policy_breach: Rollback failed - instances to previous model if the Rolling Upgrade policy is violated. - :type rollback_failed_instances_on_policy_breach: bool - """ - - _validation = { - 'max_batch_instance_percent': {'maximum': 100, 'minimum': 5}, - 'max_unhealthy_instance_percent': {'maximum': 100, 'minimum': 5}, - 'max_unhealthy_upgraded_instance_percent': {'maximum': 100, 'minimum': 0}, - } - - _attribute_map = { - 'enable_cross_zone_upgrade': {'key': 'enableCrossZoneUpgrade', 'type': 'bool'}, - 'max_batch_instance_percent': {'key': 'maxBatchInstancePercent', 'type': 'int'}, - 'max_unhealthy_instance_percent': {'key': 'maxUnhealthyInstancePercent', 'type': 'int'}, - 'max_unhealthy_upgraded_instance_percent': {'key': 'maxUnhealthyUpgradedInstancePercent', 'type': 'int'}, - 'pause_time_between_batches': {'key': 'pauseTimeBetweenBatches', 'type': 'duration'}, - 'prioritize_unhealthy_instances': {'key': 'prioritizeUnhealthyInstances', 'type': 'bool'}, - 'rollback_failed_instances_on_policy_breach': {'key': 'rollbackFailedInstancesOnPolicyBreach', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(RollingUpgradePolicy, self).__init__(**kwargs) - self.enable_cross_zone_upgrade = kwargs.get('enable_cross_zone_upgrade', None) - self.max_batch_instance_percent = kwargs.get('max_batch_instance_percent', None) - self.max_unhealthy_instance_percent = kwargs.get('max_unhealthy_instance_percent', None) - self.max_unhealthy_upgraded_instance_percent = kwargs.get('max_unhealthy_upgraded_instance_percent', None) - self.pause_time_between_batches = kwargs.get('pause_time_between_batches', None) - self.prioritize_unhealthy_instances = kwargs.get('prioritize_unhealthy_instances', None) - self.rollback_failed_instances_on_policy_breach = kwargs.get('rollback_failed_instances_on_policy_breach', None) - - -class Schedule(Model): - """The schedule according to which Jobs will be created. All times are fixed - respective to UTC and are not impacted by daylight saving time. - - :param do_not_run_until: If you do not specify a doNotRunUntil time, the - schedule becomes ready to create Jobs immediately. - :type do_not_run_until: datetime - :param do_not_run_after: If you do not specify a doNotRunAfter time, and - you are creating a recurring Job Schedule, the Job Schedule will remain - active until you explicitly terminate it. - :type do_not_run_after: datetime - :param start_window: If a Job is not created within the startWindow - interval, then the 'opportunity' is lost; no Job will be created until the - next recurrence of the schedule. If the schedule is recurring, and the - startWindow is longer than the recurrence interval, then this is - equivalent to an infinite startWindow, because the Job that is 'due' in - one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an - error; if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - :type start_window: timedelta - :param recurrence_interval: Because a Job Schedule can have at most one - active Job under it at any given time, if it is time to create a new Job - under a Job Schedule, but the previous Job is still running, the Batch - service will not create the new Job until the previous Job finishes. If - the previous Job does not finish within the startWindow period of the new - recurrenceInterval, then no new Job will be scheduled for that interval. - For recurring Jobs, you should normally specify a jobManagerTask in the - jobSpecification. If you do not use jobManagerTask, you will need an - external process to monitor when Jobs are created, add Tasks to the Jobs - and terminate the Jobs ready for the next recurrence. The default is that - the schedule does not recur: one Job is created, within the startWindow - after the doNotRunUntil time, and the schedule is complete as soon as that - Job finishes. The minimum value is 1 minute. If you specify a lower value, - the Batch service rejects the schedule with an error; if you are calling - the REST API directly, the HTTP status code is 400 (Bad Request). - :type recurrence_interval: timedelta - """ - - _attribute_map = { - 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, - 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, - 'start_window': {'key': 'startWindow', 'type': 'duration'}, - 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(Schedule, self).__init__(**kwargs) - self.do_not_run_until = kwargs.get('do_not_run_until', None) - self.do_not_run_after = kwargs.get('do_not_run_after', None) - self.start_window = kwargs.get('start_window', None) - self.recurrence_interval = kwargs.get('recurrence_interval', None) - - -class SecurityProfile(Model): - """Specifies the security profile settings for the virtual machine or virtual - machine scale set. - - :param security_type: Possible values include: 'trustedLaunch' - :type security_type: str or ~azure.batch.models.SecurityTypes - :param encryption_at_host: This property can be used by user in the - request to enable or disable the Host Encryption for the virtual machine - or virtual machine scale set. This will enable the encryption for all the - disks including Resource/Temp disk at host itself. - :type encryption_at_host: bool - :param uefi_settings: Specifies the security settings like secure boot and - vTPM used while creating the virtual machine. Specifies the security - settings like secure boot and vTPM used while creating the virtual - machine. - :type uefi_settings: ~azure.batch.models.UefiSettings - """ - - _attribute_map = { - 'security_type': {'key': 'securityType', 'type': 'SecurityTypes'}, - 'encryption_at_host': {'key': 'encryptionAtHost', 'type': 'bool'}, - 'uefi_settings': {'key': 'uefiSettings', 'type': 'UefiSettings'}, - } - - def __init__(self, **kwargs): - super(SecurityProfile, self).__init__(**kwargs) - self.security_type = kwargs.get('security_type', None) - self.encryption_at_host = kwargs.get('encryption_at_host', None) - self.uefi_settings = kwargs.get('uefi_settings', None) - - -class ServiceArtifactReference(Model): + :vartype max_batch_instance_percent: int + :ivar max_unhealthy_instance_percent: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, either as a result of being + upgraded, or by being found in an unhealthy state by the virtual machine health checks before + the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The + value of this field should be between 5 and 100, inclusive. If both maxBatchInstancePercent and + maxUnhealthyInstancePercent are assigned with value, the value of maxBatchInstancePercent + should not be more than maxUnhealthyInstancePercent. + :vartype max_unhealthy_instance_percent: int + :ivar max_unhealthy_upgraded_instance_percent: The maximum percentage of upgraded virtual + machine instances that can be found to be in an unhealthy state. This check will happen after + each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The + value of this field should be between 0 and 100, inclusive. + :vartype max_unhealthy_upgraded_instance_percent: int + :ivar pause_time_between_batches: The wait time between completing the update for all virtual + machines in one batch and starting the next batch. The time duration should be specified in ISO + 8601 format.. + :vartype pause_time_between_batches: ~datetime.timedelta + :ivar prioritize_unhealthy_instances: Upgrade all unhealthy instances in a scale set before any + healthy instances. + :vartype prioritize_unhealthy_instances: bool + :ivar rollback_failed_instances_on_policy_breach: Rollback failed instances to previous model + if the Rolling Upgrade policy is violated. + :vartype rollback_failed_instances_on_policy_breach: bool + """ + + enable_cross_zone_upgrade: Optional[bool] = rest_field(name="enableCrossZoneUpgrade") + """Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration + the Update Domain and maxBatchInstancePercent to determine the batch size. This field is able + to be set to true or false only when using NodePlacementConfiguration as Zonal.""" + max_batch_instance_percent: Optional[int] = rest_field(name="maxBatchInstancePercent") + """The maximum percent of total virtual machine instances that will be upgraded simultaneously by + the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or + future batches can cause the percentage of instances in a batch to decrease to ensure higher + reliability. The value of this field should be between 5 and 100, inclusive. If both + maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned with value, the value of + maxBatchInstancePercent should not be more than maxUnhealthyInstancePercent.""" + max_unhealthy_instance_percent: Optional[int] = rest_field(name="maxUnhealthyInstancePercent") + """The maximum percentage of the total virtual machine instances in the scale set that can be + simultaneously unhealthy, either as a result of being upgraded, or by being found in an + unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This + constraint will be checked prior to starting any batch. The value of this field should be + between 5 and 100, inclusive. If both maxBatchInstancePercent and maxUnhealthyInstancePercent + are assigned with value, the value of maxBatchInstancePercent should not be more than + maxUnhealthyInstancePercent.""" + max_unhealthy_upgraded_instance_percent: Optional[int] = rest_field(name="maxUnhealthyUpgradedInstancePercent") + """The maximum percentage of upgraded virtual machine instances that can be found to be in an + unhealthy state. This check will happen after each batch is upgraded. If this percentage is + ever exceeded, the rolling update aborts. The value of this field should be between 0 and 100, + inclusive.""" + pause_time_between_batches: Optional[datetime.timedelta] = rest_field(name="pauseTimeBetweenBatches") + """The wait time between completing the update for all virtual machines in one batch and starting + the next batch. The time duration should be specified in ISO 8601 format..""" + prioritize_unhealthy_instances: Optional[bool] = rest_field(name="prioritizeUnhealthyInstances") + """Upgrade all unhealthy instances in a scale set before any healthy instances.""" + rollback_failed_instances_on_policy_breach: Optional[bool] = rest_field( + name="rollbackFailedInstancesOnPolicyBreach" + ) + """Rollback failed instances to previous model if the Rolling Upgrade policy is violated.""" + + @overload + def __init__( + self, + *, + enable_cross_zone_upgrade: Optional[bool] = None, + max_batch_instance_percent: Optional[int] = None, + max_unhealthy_instance_percent: Optional[int] = None, + max_unhealthy_upgraded_instance_percent: Optional[int] = None, + pause_time_between_batches: Optional[datetime.timedelta] = None, + prioritize_unhealthy_instances: Optional[bool] = None, + rollback_failed_instances_on_policy_breach: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class SecurityProfile(_model_base.Model): + """Specifies the security profile settings for the virtual machine or virtual machine scale set. + + + :ivar encryption_at_host: This property can be used by user in the request to enable or disable + the Host Encryption for the virtual machine or virtual machine scale set. This will enable the + encryption for all the disks including Resource/Temp disk at host itself. Required. + :vartype encryption_at_host: bool + :ivar security_type: Specifies the SecurityType of the virtual machine. It has to be set to any + specified value to enable UefiSettings. Required. "trustedLaunch" + :vartype security_type: str or ~azure.batch.models.SecurityTypes + :ivar uefi_settings: Specifies the security settings like secure boot and vTPM used while + creating the virtual machine. Specifies the security settings like secure boot and vTPM used + while creating the virtual machine. Required. + :vartype uefi_settings: ~azure.batch.models.UefiSettings + """ + + encryption_at_host: bool = rest_field(name="encryptionAtHost") + """This property can be used by user in the request to enable or disable the Host Encryption for + the virtual machine or virtual machine scale set. This will enable the encryption for all the + disks including Resource/Temp disk at host itself. Required.""" + security_type: Union[str, "_models.SecurityTypes"] = rest_field(name="securityType") + """Specifies the SecurityType of the virtual machine. It has to be set to any specified value to + enable UefiSettings. Required. \"trustedLaunch\"""" + uefi_settings: "_models.UefiSettings" = rest_field(name="uefiSettings") + """Specifies the security settings like secure boot and vTPM used while creating the virtual + machine. Specifies the security settings like secure boot and vTPM used while creating the + virtual machine. Required.""" + + @overload + def __init__( + self, + *, + encryption_at_host: bool, + security_type: Union[str, "_models.SecurityTypes"], + uefi_settings: "_models.UefiSettings", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ServiceArtifactReference(_model_base.Model): """Specifies the service artifact reference id used to set same image version - for all virtual machines in the scale set when using 'latest' image - version. + for all virtual machines in the scale set when using 'latest' image version. - All required parameters must be populated in order to send to Azure. - :param id: Required. The service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} - :type id: str - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(ServiceArtifactReference, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - - -class StartTask(Model): - """A Task which is run when a Node joins a Pool in the Azure Batch service, or - when the Compute Node is rebooted or reimaged. - - Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host - failure. Retries due to recovery operations are independent of and are not - counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, - an internal retry due to a recovery operation may occur. Because of this, - all Tasks should be idempotent. This means Tasks need to tolerate being - interrupted and restarted without causing any corruption or duplicate data. - The best practice for long running Tasks is to use some form of - checkpointing. In some cases the StartTask may be re-run even though the - Compute Node was not rebooted. Special care should be taken to avoid - StartTasks which create breakaway process or install/launch services from - the StartTask working directory, as this will block Batch from being able - to re-run the StartTask. - - All required parameters must be populated in order to send to Azure. - - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - StartTask runs. When this is specified, all directories recursively below - the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the - node) are mapped into the container, all Task environment variables are - mapped into the container, and the Task command line is executed in the - container. Files produced in the container outside of - AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param user_identity: The user identity under which the StartTask runs. If - omitted, the Task runs as a non-administrative user unique to the Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param max_task_retry_count: The maximum number of times the Task may be - retried. The Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of retries. The - Batch service will try the Task once, and may then retry up to this limit. - For example, if the maximum retry count is 3, Batch tries the Task up to 4 - times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - :type max_task_retry_count: int - :param wait_for_success: Whether the Batch service should wait for the - StartTask to complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and the StartTask - fails on a Node, the Batch service retries the StartTask up to its maximum - retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be - detected via the Compute Node state and failure info details. If false, - the Batch service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while the - StartTask is still running; and even if the StartTask fails, new Tasks - will continue to be scheduled on the Compute Node. The default is true. - :type wait_for_success: bool - """ - - _validation = { - 'command_line': {'required': True}, - } - - _attribute_map = { - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(StartTask, self).__init__(**kwargs) - self.command_line = kwargs.get('command_line', None) - self.container_settings = kwargs.get('container_settings', None) - self.resource_files = kwargs.get('resource_files', None) - self.environment_settings = kwargs.get('environment_settings', None) - self.user_identity = kwargs.get('user_identity', None) - self.max_task_retry_count = kwargs.get('max_task_retry_count', None) - self.wait_for_success = kwargs.get('wait_for_success', None) - - -class StartTaskInformation(Model): - """Information about a StartTask running on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param state: Required. Possible values include: 'running', 'completed' - :type state: str or ~azure.batch.models.StartTaskState - :param start_time: Required. This value is reset every time the Task is - restarted or retried (that is, this is the most recent time at which the - StartTask started running). - :type start_time: datetime - :param end_time: This is the end time of the most recent run of the - StartTask, if that run has completed (even if that run failed and a retry - is pending). This element is not present if the StartTask is currently - running. - :type end_time: datetime - :param exit_code: The exit code of the program specified on the StartTask - command line. This property is set only if the StartTask is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application - process. However, if the Batch service terminates the StartTask (due to - timeout, or user termination via the API) you may see an operating - system-defined exit code. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param retry_count: Required. The number of times the Task has been - retried by the Batch service. Task application failures (non-zero exit - code) are retried, pre-processing errors (the Task could not be run) and - file upload errors are not retried. The Batch service will retry the Task - up to the limit specified by the constraints. - :type retry_count: int - :param last_retry_time: This element is present only if the Task was - retried (i.e. retryCount is nonzero). If present, this is typically the - same as startTime, but may be different if the Task has been restarted for - reasons other than retry; for example, if the Compute Node was rebooted - during a retry, then the startTime is updated but the lastRetryTime is - not. - :type last_retry_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'state': {'required': True}, - 'start_time': {'required': True}, - 'retry_count': {'required': True}, - } - - _attribute_map = { - 'state': {'key': 'state', 'type': 'StartTaskState'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'retry_count': {'key': 'retryCount', 'type': 'int'}, - 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, **kwargs): - super(StartTaskInformation, self).__init__(**kwargs) - self.state = kwargs.get('state', None) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.exit_code = kwargs.get('exit_code', None) - self.container_info = kwargs.get('container_info', None) - self.failure_info = kwargs.get('failure_info', None) - self.retry_count = kwargs.get('retry_count', None) - self.last_retry_time = kwargs.get('last_retry_time', None) - self.result = kwargs.get('result', None) - - -class SubtaskInformation(Model): - """Information about an Azure Batch subtask. - - :param id: The ID of the subtask. - :type id: int - :param node_info: Information about the Compute Node on which the subtask - ran. - :type node_info: ~azure.batch.models.ComputeNodeInformation - :param start_time: - :type start_time: datetime - :param end_time: This property is set only if the subtask is in the - Completed state. - :type end_time: datetime - :param exit_code: The exit code of the program specified on the subtask - command line. This property is set only if the subtask is in the completed - state. In general, the exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. - However, if the Batch service terminates the subtask (due to timeout, or - user termination via the API) you may see an operating system-defined exit - code. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param state: The current state of the subtask. Possible values include: - 'preparing', 'running', 'completed' - :type state: str or ~azure.batch.models.SubtaskState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the subtask. This property is - not set if the subtask is in its initial running state. Possible values - include: 'preparing', 'running', 'completed' - :type previous_state: str or ~azure.batch.models.SubtaskState - :param previous_state_transition_time: This property is not set if the - subtask is in its initial running state. - :type previous_state_transition_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'int'}, - 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'state': {'key': 'state', 'type': 'SubtaskState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, **kwargs): - super(SubtaskInformation, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.node_info = kwargs.get('node_info', None) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.exit_code = kwargs.get('exit_code', None) - self.container_info = kwargs.get('container_info', None) - self.failure_info = kwargs.get('failure_info', None) - self.state = kwargs.get('state', None) - self.state_transition_time = kwargs.get('state_transition_time', None) - self.previous_state = kwargs.get('previous_state', None) - self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) - self.result = kwargs.get('result', None) - - -class TaskAddCollectionOptions(Model): - """Additional parameters for add_collection operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 2 minutes. If the value is larger than - 120, the default will be used instead. Default value: 120 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskAddCollectionOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 120) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class TaskAddCollectionParameter(Model): - """A collection of Azure Batch Tasks to add. - - All required parameters must be populated in order to send to Azure. - - :param value: Required. The total serialized size of this collection must - be less than 1MB. If it is greater than 1MB (for example if each Task has - 100's of resource files or environment variables), the request will fail - with code 'RequestBodyTooLarge' and should be retried again with fewer - Tasks. - :type value: list[~azure.batch.models.TaskAddParameter] - """ - - _validation = { - 'value': {'required': True}, - } - - _attribute_map = { - 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, - } - - def __init__(self, **kwargs): - super(TaskAddCollectionParameter, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - - -class TaskAddCollectionResult(Model): - """The result of adding a collection of Tasks to a Job. - - :param value: - :type value: list[~azure.batch.models.TaskAddResult] - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[TaskAddResult]'}, - } - - def __init__(self, **kwargs): - super(TaskAddCollectionResult, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - - -class TaskAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskAddOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class TaskAddParameter(Model): - """An Azure Batch Task to add. - - Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host - failure. Retries due to recovery operations are independent of and are not - counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, - an internal retry due to a recovery operation may occur. Because of this, - all Tasks should be idempotent. This means Tasks need to tolerate being - interrupted and restarted without causing any corruption or duplicate data. - The best practice for long running Tasks is to use some form of - checkpointing. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two IDs within a Job that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param command_line: Required. For multi-instance Tasks, the command line - is executed as the primary Task, after the primary Task and all subtasks - have finished executing the coordination command line. The command line - does not run under a shell, and therefore cannot take advantage of shell - features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should - use a relative path (relative to the Task working directory), or use the - Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Task runs. If the Pool that will run this Task has containerConfiguration - set, this must be set as well. If the Pool that will run this Task doesn't - have containerConfiguration set, this must not be set. When this is - specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR - (the root of Azure Batch directories on the node) are mapped into the - container, all Task environment variables are mapped into the container, - and the Task command line is executed in the container. Files produced in - the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to - the host disk, meaning that Batch file APIs will not be able to access - those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param exit_conditions: How the Batch service should respond when the Task - completes. - :type exit_conditions: ~azure.batch.models.ExitConditions - :param resource_files: For multi-instance Tasks, the resource files will - only be downloaded to the Compute Node on which the primary Task is - executed. There is a maximum size for the list of resource files. When - the max size is exceeded, the request will fail and the response error - code will be RequestEntityTooLarge. If this occurs, the collection of - ResourceFiles must be reduced in size. This can be achieved using .zip - files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param output_files: For multi-instance Tasks, the files will only be - uploaded from the Compute Node on which the primary Task is executed. - :type output_files: list[~azure.batch.models.OutputFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param affinity_info: A locality hint that can be used by the Batch - service to select a Compute Node on which to start the new Task. - :type affinity_info: ~azure.batch.models.AffinityInformation - :param constraints: The execution constraints that apply to this Task. If - you do not specify constraints, the maxTaskRetryCount is the - maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, - and the retentionTime is 7 days. - :type constraints: ~azure.batch.models.TaskConstraints - :param required_slots: The number of scheduling slots that the Task - required to run. The default is 1. A Task can only be scheduled to run on - a compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - :type required_slots: int - :param user_identity: The user identity under which the Task runs. If - omitted, the Task runs as a non-administrative user unique to the Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param multi_instance_settings: An object that indicates that the Task is - a multi-instance Task, and contains information about how to run the - multi-instance Task. - :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings - :param depends_on: The Tasks that this Task depends on. This Task will not - be scheduled until all Tasks that it depends on have completed - successfully. If any of those Tasks fail and exhaust their retry counts, - this Task will never be scheduled. If the Job does not have - usesTaskDependencies set to true, and this element is present, the request - fails with error code TaskDependenciesNotSpecifiedOnJob. - :type depends_on: ~azure.batch.models.TaskDependencies - :param application_package_references: Application packages are downloaded - and deployed to a shared directory, not the Task working directory. - Therefore, if a referenced package is already on the Node, and is up to - date, then it is not re-downloaded; the existing copy on the Compute Node - is used. If a referenced Package cannot be installed, for example because - the package has been deleted or because download failed, the Task fails. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param authentication_token_settings: The settings for an authentication - token that the Task can use to perform Batch service operations. If this - property is set, the Batch service provides the Task with an - authentication token which can be used to authenticate Batch service - operations without requiring an Account access key. The token is provided - via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations - that the Task can carry out using the token depend on the settings. For - example, a Task can request Job permissions in order to add other Tasks to - the Job, or check the status of the Job or of other Tasks under the Job. - :type authentication_token_settings: - ~azure.batch.models.AuthenticationTokenSettings - """ - - _validation = { - 'id': {'required': True}, - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'required_slots': {'key': 'requiredSlots', 'type': 'int'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, - 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - } - - def __init__(self, **kwargs): - super(TaskAddParameter, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.display_name = kwargs.get('display_name', None) - self.command_line = kwargs.get('command_line', None) - self.container_settings = kwargs.get('container_settings', None) - self.exit_conditions = kwargs.get('exit_conditions', None) - self.resource_files = kwargs.get('resource_files', None) - self.output_files = kwargs.get('output_files', None) - self.environment_settings = kwargs.get('environment_settings', None) - self.affinity_info = kwargs.get('affinity_info', None) - self.constraints = kwargs.get('constraints', None) - self.required_slots = kwargs.get('required_slots', None) - self.user_identity = kwargs.get('user_identity', None) - self.multi_instance_settings = kwargs.get('multi_instance_settings', None) - self.depends_on = kwargs.get('depends_on', None) - self.application_package_references = kwargs.get('application_package_references', None) - self.authentication_token_settings = kwargs.get('authentication_token_settings', None) - - -class TaskAddResult(Model): - """Result for a single Task added as part of an add Task collection operation. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. Possible values include: 'success', - 'clientError', 'serverError' - :type status: str or ~azure.batch.models.TaskAddStatus - :param task_id: Required. - :type task_id: str - :param e_tag: You can use this to detect whether the Task has changed - between requests. In particular, you can be pass the ETag with an Update - Task request to specify that your changes should take effect only if - nobody else has modified the Job in the meantime. - :type e_tag: str - :param last_modified: - :type last_modified: datetime - :param location: - :type location: str - :param error: The error encountered while attempting to add the Task. - :type error: ~azure.batch.models.BatchError - """ - - _validation = { - 'status': {'required': True}, - 'task_id': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'TaskAddStatus'}, - 'task_id': {'key': 'taskId', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'location': {'key': 'location', 'type': 'str'}, - 'error': {'key': 'error', 'type': 'BatchError'}, - } - - def __init__(self, **kwargs): - super(TaskAddResult, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.task_id = kwargs.get('task_id', None) - self.e_tag = kwargs.get('e_tag', None) - self.last_modified = kwargs.get('last_modified', None) - self.location = kwargs.get('location', None) - self.error = kwargs.get('error', None) - - -class TaskConstraints(Model): - """Execution constraints to apply to a Task. - - :param max_wall_clock_time: If this is not specified, there is no time - limit on how long the Task may run. - :type max_wall_clock_time: timedelta - :param retention_time: The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - :type retention_time: timedelta - :param max_task_retry_count: The maximum number of times the Task may be - retried. The Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of retries for the - Task executable due to a nonzero exit code. The Batch service will try the - Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial - try and 3 retries). If the maximum retry count is 0, the Batch service - does not retry the Task after the first attempt. If the maximum retry - count is -1, the Batch service retries the Task without limit, however - this is not recommended for a start task or any task. The default value is - 0 (no retries). - :type max_task_retry_count: int - """ - - _attribute_map = { - 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, - 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(TaskConstraints, self).__init__(**kwargs) - self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) - self.retention_time = kwargs.get('retention_time', None) - self.max_task_retry_count = kwargs.get('max_task_retry_count', None) - - -class TaskContainerExecutionInformation(Model): - """Contains information about the container which a Task is executing. - - :param container_id: - :type container_id: str - :param state: This is the state of the container according to the Docker - service. It is equivalent to the status field returned by "docker - inspect". - :type state: str - :param error: This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - :type error: str - """ - - _attribute_map = { - 'container_id': {'key': 'containerId', 'type': 'str'}, - 'state': {'key': 'state', 'type': 'str'}, - 'error': {'key': 'error', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(TaskContainerExecutionInformation, self).__init__(**kwargs) - self.container_id = kwargs.get('container_id', None) - self.state = kwargs.get('state', None) - self.error = kwargs.get('error', None) - - -class TaskContainerSettings(Model): - """The container settings for a Task. - - All required parameters must be populated in order to send to Azure. - - :param container_run_options: These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - :type container_run_options: str - :param image_name: Required. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image - name, the tag ":latest" is used as a default. - :type image_name: str - :param registry: The private registry which contains the container Image. - This setting can be omitted if was already provided at Pool creation. - :type registry: ~azure.batch.models.ContainerRegistry - :param working_directory: The default is 'taskWorkingDirectory'. Possible - values include: 'taskWorkingDirectory', 'containerImageDefault' - :type working_directory: str or - ~azure.batch.models.ContainerWorkingDirectory - """ - - _validation = { - 'image_name': {'required': True}, - } - - _attribute_map = { - 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, - 'image_name': {'key': 'imageName', 'type': 'str'}, - 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, - 'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'}, - } - - def __init__(self, **kwargs): - super(TaskContainerSettings, self).__init__(**kwargs) - self.container_run_options = kwargs.get('container_run_options', None) - self.image_name = kwargs.get('image_name', None) - self.registry = kwargs.get('registry', None) - self.working_directory = kwargs.get('working_directory', None) - - -class TaskCounts(Model): - """The Task counts for a Job. - - All required parameters must be populated in order to send to Azure. - - :param active: Required. The number of Tasks in the active state. - :type active: int - :param running: Required. The number of Tasks in the running or preparing - state. - :type running: int - :param completed: Required. The number of Tasks in the completed state. - :type completed: int - :param succeeded: Required. The number of Tasks which succeeded. A Task - succeeds if its result (found in the executionInfo property) is 'success'. - :type succeeded: int - :param failed: Required. The number of Tasks which failed. A Task fails if - its result (found in the executionInfo property) is 'failure'. - :type failed: int - """ - - _validation = { - 'active': {'required': True}, - 'running': {'required': True}, - 'completed': {'required': True}, - 'succeeded': {'required': True}, - 'failed': {'required': True}, - } - - _attribute_map = { - 'active': {'key': 'active', 'type': 'int'}, - 'running': {'key': 'running', 'type': 'int'}, - 'completed': {'key': 'completed', 'type': 'int'}, - 'succeeded': {'key': 'succeeded', 'type': 'int'}, - 'failed': {'key': 'failed', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(TaskCounts, self).__init__(**kwargs) - self.active = kwargs.get('active', None) - self.running = kwargs.get('running', None) - self.completed = kwargs.get('completed', None) - self.succeeded = kwargs.get('succeeded', None) - self.failed = kwargs.get('failed', None) - - -class TaskCountsResult(Model): - """The Task and TaskSlot counts for a Job. - - All required parameters must be populated in order to send to Azure. - - :param task_counts: Required. The number of Tasks per state. - :type task_counts: ~azure.batch.models.TaskCounts - :param task_slot_counts: Required. The number of TaskSlots required by - Tasks per state. - :type task_slot_counts: ~azure.batch.models.TaskSlotCounts - """ - - _validation = { - 'task_counts': {'required': True}, - 'task_slot_counts': {'required': True}, - } - - _attribute_map = { - 'task_counts': {'key': 'taskCounts', 'type': 'TaskCounts'}, - 'task_slot_counts': {'key': 'taskSlotCounts', 'type': 'TaskSlotCounts'}, - } - - def __init__(self, **kwargs): - super(TaskCountsResult, self).__init__(**kwargs) - self.task_counts = kwargs.get('task_counts', None) - self.task_slot_counts = kwargs.get('task_slot_counts', None) - - -class TaskDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskDeleteOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class TaskDependencies(Model): - """Specifies any dependencies of a Task. Any Task that is explicitly specified - or within a dependency range must complete before the dependant Task will - be scheduled. - - :param task_ids: The taskIds collection is limited to 64000 characters - total (i.e. the combined length of all Task IDs). If the taskIds - collection exceeds the maximum length, the Add Task request fails with - error code TaskDependencyListTooLong. In this case consider using Task ID - ranges instead. - :type task_ids: list[str] - :param task_id_ranges: - :type task_id_ranges: list[~azure.batch.models.TaskIdRange] - """ - - _attribute_map = { - 'task_ids': {'key': 'taskIds', 'type': '[str]'}, - 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, - } - - def __init__(self, **kwargs): - super(TaskDependencies, self).__init__(**kwargs) - self.task_ids = kwargs.get('task_ids', None) - self.task_id_ranges = kwargs.get('task_id_ranges', None) - - -class TaskExecutionInformation(Model): - """Information about the execution of a Task. - - All required parameters must be populated in order to send to Azure. - - :param start_time: 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects - the time at which the Task started downloading or deploying these. If the - Task has been restarted or retried, this is the most recent time at which - the Task started running. This property is present only for Tasks that are - in the running or completed state. - :type start_time: datetime - :param end_time: This property is set only if the Task is in the Completed - state. - :type end_time: datetime - :param exit_code: The exit code of the program specified on the Task - command line. This property is set only if the Task is in the completed - state. In general, the exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit - code. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param retry_count: Required. The number of times the Task has been - retried by the Batch service. Task application failures (non-zero exit - code) are retried, pre-processing errors (the Task could not be run) and - file upload errors are not retried. The Batch service will retry the Task - up to the limit specified by the constraints. - :type retry_count: int - :param last_retry_time: This element is present only if the Task was - retried (i.e. retryCount is nonzero). If present, this is typically the - same as startTime, but may be different if the Task has been restarted for - reasons other than retry; for example, if the Compute Node was rebooted - during a retry, then the startTime is updated but the lastRetryTime is - not. - :type last_retry_time: datetime - :param requeue_count: Required. The number of times the Task has been - requeued by the Batch service as the result of a user request. When the - user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or - when the Job is being disabled, the user can specify that running Tasks on - the Compute Nodes be requeued for execution. This count tracks how many - times the Task has been requeued for these reasons. - :type requeue_count: int - :param last_requeue_time: This property is set only if the requeueCount is - nonzero. - :type last_requeue_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'retry_count': {'required': True}, - 'requeue_count': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'retry_count': {'key': 'retryCount', 'type': 'int'}, - 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, - 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, - 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, **kwargs): - super(TaskExecutionInformation, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.exit_code = kwargs.get('exit_code', None) - self.container_info = kwargs.get('container_info', None) - self.failure_info = kwargs.get('failure_info', None) - self.retry_count = kwargs.get('retry_count', None) - self.last_retry_time = kwargs.get('last_retry_time', None) - self.requeue_count = kwargs.get('requeue_count', None) - self.last_requeue_time = kwargs.get('last_requeue_time', None) - self.result = kwargs.get('result', None) - - -class TaskFailureInformation(Model): - """Information about a Task failure. - - All required parameters must be populated in order to send to Azure. - - :param category: Required. The category of the Task error. Possible values - include: 'userError', 'serverError' - :type category: str or ~azure.batch.models.ErrorCategory - :param code: - :type code: str - :param message: - :type message: str - :param details: - :type details: list[~azure.batch.models.NameValuePair] - """ - - _validation = { - 'category': {'required': True}, - } - - _attribute_map = { - 'category': {'key': 'category', 'type': 'ErrorCategory'}, - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[NameValuePair]'}, - } - - def __init__(self, **kwargs): - super(TaskFailureInformation, self).__init__(**kwargs) - self.category = kwargs.get('category', None) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.details = kwargs.get('details', None) - - -class TaskGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskGetOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class TaskIdRange(Model): - """A range of Task IDs that a Task can depend on. All Tasks with IDs in the - range must complete successfully before the dependent Task can be - scheduled. - - The start and end of the range are inclusive. For example, if a range has - start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The first Task ID in the range. - :type start: int - :param end: Required. The last Task ID in the range. - :type end: int + :ivar id: The service artifact reference id of ServiceArtifactReference. The service artifact + reference id in the form of + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. # pylint: disable=line-too-long + Required. + :vartype id: str """ - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } + id: str = rest_field() + """The service artifact reference id of ServiceArtifactReference. The service artifact reference + id in the form of + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. # pylint: disable=line-too-long + Required.""" - _attribute_map = { - 'start': {'key': 'start', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - } + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + ): ... - def __init__(self, **kwargs): - super(TaskIdRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class TaskInformation(Model): - """Information about a Task running on a Compute Node. - - All required parameters must be populated in order to send to Azure. - :param task_url: - :type task_url: str - :param job_id: - :type job_id: str - :param task_id: - :type task_id: str - :param subtask_id: The ID of the subtask if the Task is a multi-instance - Task. - :type subtask_id: int - :param task_state: Required. The current state of the Task. Possible - values include: 'active', 'preparing', 'running', 'completed' - :type task_state: str or ~azure.batch.models.TaskState - :param execution_info: Information about the execution of the Task. - :type execution_info: ~azure.batch.models.TaskExecutionInformation - """ - - _validation = { - 'task_state': {'required': True}, - } - - _attribute_map = { - 'task_url': {'key': 'taskUrl', 'type': 'str'}, - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'task_id': {'key': 'taskId', 'type': 'str'}, - 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, - 'task_state': {'key': 'taskState', 'type': 'TaskState'}, - 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, - } - - def __init__(self, **kwargs): - super(TaskInformation, self).__init__(**kwargs) - self.task_url = kwargs.get('task_url', None) - self.job_id = kwargs.get('job_id', None) - self.task_id = kwargs.get('task_id', None) - self.subtask_id = kwargs.get('subtask_id', None) - self.task_state = kwargs.get('task_state', None) - self.execution_info = kwargs.get('execution_info', None) - - -class TaskListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Tasks can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskListOptions, self).__init__(**kwargs) - self.filter = kwargs.get('filter', None) - self.select = kwargs.get('select', None) - self.expand = kwargs.get('expand', None) - self.max_results = kwargs.get('max_results', 1000) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class TaskListSubtasksOptions(Model): - """Additional parameters for list_subtasks operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskListSubtasksOptions, self).__init__(**kwargs) - self.select = kwargs.get('select', None) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - - -class TaskReactivateOptions(Model): - """Additional parameters for reactivate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskReactivateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class TaskSchedulingPolicy(Model): - """Specifies how Tasks should be distributed across Compute Nodes. +class UefiSettings(_model_base.Model): + """Specifies the security settings like secure boot and vTPM used while creating the virtual + machine. - All required parameters must be populated in order to send to Azure. - - :param node_fill_type: Required. If not specified, the default is spread. - Possible values include: 'spread', 'pack' - :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + :ivar secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual + machine. + :vartype secure_boot_enabled: bool + :ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. + :vartype v_tpm_enabled: bool """ - _validation = { - 'node_fill_type': {'required': True}, - } + secure_boot_enabled: Optional[bool] = rest_field(name="secureBootEnabled") + """Specifies whether secure boot should be enabled on the virtual machine.""" + v_tpm_enabled: Optional[bool] = rest_field(name="vTpmEnabled") + """Specifies whether vTPM should be enabled on the virtual machine.""" - _attribute_map = { - 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, - } + @overload + def __init__( + self, + *, + secure_boot_enabled: Optional[bool] = None, + v_tpm_enabled: Optional[bool] = None, + ): ... - def __init__(self, **kwargs): - super(TaskSchedulingPolicy, self).__init__(**kwargs) - self.node_fill_type = kwargs.get('node_fill_type', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class TaskSlotCounts(Model): - """The TaskSlot counts for a Job. - All required parameters must be populated in order to send to Azure. - - :param active: Required. The number of TaskSlots for active Tasks. - :type active: int - :param running: Required. The number of TaskSlots for running Tasks. - :type running: int - :param completed: Required. The number of TaskSlots for completed Tasks. - :type completed: int - :param succeeded: Required. The number of TaskSlots for succeeded Tasks. - :type succeeded: int - :param failed: Required. The number of TaskSlots for failed Tasks. - :type failed: int - """ - - _validation = { - 'active': {'required': True}, - 'running': {'required': True}, - 'completed': {'required': True}, - 'succeeded': {'required': True}, - 'failed': {'required': True}, - } - - _attribute_map = { - 'active': {'key': 'active', 'type': 'int'}, - 'running': {'key': 'running', 'type': 'int'}, - 'completed': {'key': 'completed', 'type': 'int'}, - 'succeeded': {'key': 'succeeded', 'type': 'int'}, - 'failed': {'key': 'failed', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(TaskSlotCounts, self).__init__(**kwargs) - self.active = kwargs.get('active', None) - self.running = kwargs.get('running', None) - self.completed = kwargs.get('completed', None) - self.succeeded = kwargs.get('succeeded', None) - self.failed = kwargs.get('failed', None) - - -class TaskStatistics(Model): - """Resource usage statistics for a Task. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param user_cpu_time: Required. - :type user_cpu_time: timedelta - :param kernel_cpu_time: Required. - :type kernel_cpu_time: timedelta - :param wall_clock_time: Required. The wall clock time is the elapsed time - from when the Task started running on a Compute Node to when it finished - (or to the last time the statistics were updated, if the Task had not - finished by then). If the Task was retried, this includes the wall clock - time of all the Task retries. - :type wall_clock_time: timedelta - :param read_iops: Required. The total number of disk read operations made - by the Task. - :type read_iops: long - :param write_iops: Required. The total number of disk write operations - made by the Task. - :type write_iops: long - :param read_io_gi_b: Required. The total gibibytes read from disk by the - Task. - :type read_io_gi_b: float - :param write_io_gi_b: Required. The total gibibytes written to disk by the - Task. - :type write_io_gi_b: float - :param wait_time: Required. - :type wait_time: timedelta - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'user_cpu_time': {'required': True}, - 'kernel_cpu_time': {'required': True}, - 'wall_clock_time': {'required': True}, - 'read_iops': {'required': True}, - 'write_iops': {'required': True}, - 'read_io_gi_b': {'required': True}, - 'write_io_gi_b': {'required': True}, - 'wait_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, - 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, - 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, - 'read_iops': {'key': 'readIOps', 'type': 'long'}, - 'write_iops': {'key': 'writeIOps', 'type': 'long'}, - 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, - 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, - 'wait_time': {'key': 'waitTime', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(TaskStatistics, self).__init__(**kwargs) - self.url = kwargs.get('url', None) - self.start_time = kwargs.get('start_time', None) - self.last_update_time = kwargs.get('last_update_time', None) - self.user_cpu_time = kwargs.get('user_cpu_time', None) - self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) - self.wall_clock_time = kwargs.get('wall_clock_time', None) - self.read_iops = kwargs.get('read_iops', None) - self.write_iops = kwargs.get('write_iops', None) - self.read_io_gi_b = kwargs.get('read_io_gi_b', None) - self.write_io_gi_b = kwargs.get('write_io_gi_b', None) - self.wait_time = kwargs.get('wait_time', None) - - -class TaskTerminateOptions(Model): - """Additional parameters for terminate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskTerminateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class TaskUpdateOptions(Model): - """Additional parameters for update operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(TaskUpdateOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - - -class TaskUpdateParameter(Model): - """The set of changes to be made to a Task. - - :param constraints: Constraints that apply to this Task. If omitted, the - Task is given the default constraints. For multi-instance Tasks, updating - the retention time applies only to the primary Task and not subtasks. - :type constraints: ~azure.batch.models.TaskConstraints - """ - - _attribute_map = { - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - } - - def __init__(self, **kwargs): - super(TaskUpdateParameter, self).__init__(**kwargs) - self.constraints = kwargs.get('constraints', None) - - -class UefiSettings(Model): - """Specifies the security settings like secure boot and vTPM used while - creating the virtual machine. - - :param secure_boot_enabled: Specifies whether secure boot should be - enabled on the virtual machine. - :type secure_boot_enabled: bool - :param v_tpm_enabled: Specifies whether vTPM should be enabled on the - virtual machine. - :type v_tpm_enabled: bool - """ - - _attribute_map = { - 'secure_boot_enabled': {'key': 'secureBootEnabled', 'type': 'bool'}, - 'v_tpm_enabled': {'key': 'vTpmEnabled', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(UefiSettings, self).__init__(**kwargs) - self.secure_boot_enabled = kwargs.get('secure_boot_enabled', None) - self.v_tpm_enabled = kwargs.get('v_tpm_enabled', None) - - -class UpgradePolicy(Model): +class UpgradePolicy(_model_base.Model): """Describes an upgrade policy - automatic, manual, or rolling. - All required parameters must be populated in order to send to Azure. - - :param mode: Required. Possible values include: 'automatic', 'manual', - 'rolling' - :type mode: str or ~azure.batch.models.UpgradeMode - :param automatic_os_upgrade_policy: Configuration parameters used for - performing automatic OS Upgrade. The configuration parameters used for - performing automatic OS upgrade. - :type automatic_os_upgrade_policy: - ~azure.batch.models.AutomaticOSUpgradePolicy - :param rolling_upgrade_policy: The configuration parameters used while - performing a rolling upgrade. This property is only supported on Pools - with the virtualMachineConfiguration property. - :type rolling_upgrade_policy: ~azure.batch.models.RollingUpgradePolicy - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'UpgradeMode'}, - 'automatic_os_upgrade_policy': {'key': 'automaticOSUpgradePolicy', 'type': 'AutomaticOSUpgradePolicy'}, - 'rolling_upgrade_policy': {'key': 'rollingUpgradePolicy', 'type': 'RollingUpgradePolicy'}, - } - - def __init__(self, **kwargs): - super(UpgradePolicy, self).__init__(**kwargs) - self.mode = kwargs.get('mode', None) - self.automatic_os_upgrade_policy = kwargs.get('automatic_os_upgrade_policy', None) - self.rolling_upgrade_policy = kwargs.get('rolling_upgrade_policy', None) - - -class UploadBatchServiceLogsConfiguration(Model): - """The Azure Batch service log files upload configuration for a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param container_url: Required. If a user assigned managed identity is not - being used, the URL must include a Shared Access Signature (SAS) granting - write permissions to the container. The SAS duration must allow enough - time for the upload to finish. The start time for SAS is optional and - recommended to not be specified. - :type container_url: str - :param start_time: Required. Any log file containing a log message in the - time range will be uploaded. This means that the operation might retrieve - more logs than have been requested since the entire log file is always - uploaded, but the operation should not retrieve fewer logs than have been - requested. - :type start_time: datetime - :param end_time: Any log file containing a log message in the time range - will be uploaded. This means that the operation might retrieve more logs - than have been requested since the entire log file is always uploaded, but - the operation should not retrieve fewer logs than have been requested. If - omitted, the default is to upload all logs available after the startTime. - :type end_time: datetime - :param identity_reference: The reference to the user assigned identity to - use to access Azure Blob Storage specified by containerUrl. The identity - must have write access to the Azure Blob Storage container. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _validation = { - 'container_url': {'required': True}, - 'start_time': {'required': True}, - } - - _attribute_map = { - 'container_url': {'key': 'containerUrl', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, **kwargs): - super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) - self.container_url = kwargs.get('container_url', None) - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - self.identity_reference = kwargs.get('identity_reference', None) - - -class UploadBatchServiceLogsResult(Model): - """The result of uploading Batch service log files from a specific Compute - Node. - - All required parameters must be populated in order to send to Azure. - - :param virtual_directory_name: Required. The virtual directory name is - part of the blob name for each log file uploaded, and it is built based - poolId, nodeId and a unique identifier. - :type virtual_directory_name: str - :param number_of_files_uploaded: Required. The number of log files which - will be uploaded. - :type number_of_files_uploaded: int - """ - - _validation = { - 'virtual_directory_name': {'required': True}, - 'number_of_files_uploaded': {'required': True}, - } - - _attribute_map = { - 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, - 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(UploadBatchServiceLogsResult, self).__init__(**kwargs) - self.virtual_directory_name = kwargs.get('virtual_directory_name', None) - self.number_of_files_uploaded = kwargs.get('number_of_files_uploaded', None) - - -class UsageStatistics(Model): - """Statistics related to Pool usage information. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param dedicated_core_time: Required. - :type dedicated_core_time: timedelta - """ - - _validation = { - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'dedicated_core_time': {'required': True}, - } - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(UsageStatistics, self).__init__(**kwargs) - self.start_time = kwargs.get('start_time', None) - self.last_update_time = kwargs.get('last_update_time', None) - self.dedicated_core_time = kwargs.get('dedicated_core_time', None) - - -class UserAccount(Model): + :ivar mode: Specifies the mode of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control + the application of updates to virtual machines in the scale set. You do this by using the + manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the + scale set are automatically updated at the same time.:code:`
`:code:`
` **Rolling** - + Scale set performs updates in batches with an optional pause time in between. Required. Known + values are: "automatic", "manual", and "rolling". + :vartype mode: str or ~azure.batch.models.UpgradeMode + :ivar automatic_os_upgrade_policy: Configuration parameters used for performing automatic OS + Upgrade. The configuration parameters used for performing automatic OS upgrade. + :vartype automatic_os_upgrade_policy: ~azure.batch.models.AutomaticOsUpgradePolicy + :ivar rolling_upgrade_policy: The configuration parameters used while performing a rolling + upgrade. This property is only supported on Pools with the virtualMachineConfiguration + property. + :vartype rolling_upgrade_policy: ~azure.batch.models.RollingUpgradePolicy + """ + + mode: Union[str, "_models.UpgradeMode"] = rest_field() + """Specifies the mode of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application + of updates to virtual machines in the scale set. You do this by using the manualUpgrade + action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are + automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set + performs updates in batches with an optional pause time in between. Required. Known values are: + \"automatic\", \"manual\", and \"rolling\".""" + automatic_os_upgrade_policy: Optional["_models.AutomaticOsUpgradePolicy"] = rest_field( + name="automaticOSUpgradePolicy" + ) + """Configuration parameters used for performing automatic OS Upgrade. The configuration parameters + used for performing automatic OS upgrade.""" + rolling_upgrade_policy: Optional["_models.RollingUpgradePolicy"] = rest_field(name="rollingUpgradePolicy") + """The configuration parameters used while performing a rolling upgrade. This property is only + supported on Pools with the virtualMachineConfiguration property.""" + + @overload + def __init__( + self, + *, + mode: Union[str, "_models.UpgradeMode"], + automatic_os_upgrade_policy: Optional["_models.AutomaticOsUpgradePolicy"] = None, + rolling_upgrade_policy: Optional["_models.RollingUpgradePolicy"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UploadBatchServiceLogsContent(_model_base.Model): + """The Azure Batch service log files upload parameters for a Compute Node. + + All required parameters must be populated in order to send to server. + + :ivar container_url: The URL of the container within Azure Blob Storage to which to upload the + Batch Service log file(s). If a user assigned managed identity is not being used, the URL must + include a Shared Access Signature (SAS) granting write permissions to the container. The SAS + duration must allow enough time for the upload to finish. The start time for SAS is optional + and recommended to not be specified. Required. + :vartype container_url: str + :ivar start_time: The start of the time range from which to upload Batch Service log file(s). + Any log file containing a log message in the time range will be uploaded. This means that the + operation might retrieve more logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been requested. Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end of the time range from which to upload Batch Service log file(s). Any + log file containing a log message in the time range will be uploaded. This means that the + operation might retrieve more logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been requested. If + omitted, the default is to upload all logs available after the startTime. + :vartype end_time: ~datetime.datetime + :ivar identity_reference: The reference to the user assigned identity to use to access Azure + Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob + Storage container. + :vartype identity_reference: ~azure.batch.models.BatchNodeIdentityReference + """ + + container_url: str = rest_field(name="containerUrl") + """The URL of the container within Azure Blob Storage to which to upload the Batch Service log + file(s). If a user assigned managed identity is not being used, the URL must include a Shared + Access Signature (SAS) granting write permissions to the container. The SAS duration must allow + enough time for the upload to finish. The start time for SAS is optional and recommended to not + be specified. Required.""" + start_time: datetime.datetime = rest_field(name="startTime", format="rfc3339") + """The start of the time range from which to upload Batch Service log file(s). Any log file + containing a log message in the time range will be uploaded. This means that the operation + might retrieve more logs than have been requested since the entire log file is always uploaded, + but the operation should not retrieve fewer logs than have been requested. Required.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", format="rfc3339") + """The end of the time range from which to upload Batch Service log file(s). Any log file + containing a log message in the time range will be uploaded. This means that the operation + might retrieve more logs than have been requested since the entire log file is always uploaded, + but the operation should not retrieve fewer logs than have been requested. If omitted, the + default is to upload all logs available after the startTime.""" + identity_reference: Optional["_models.BatchNodeIdentityReference"] = rest_field(name="identityReference") + """The reference to the user assigned identity to use to access Azure Blob Storage specified by + containerUrl. The identity must have write access to the Azure Blob Storage container.""" + + @overload + def __init__( + self, + *, + container_url: str, + start_time: datetime.datetime, + end_time: Optional[datetime.datetime] = None, + identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UploadBatchServiceLogsResult(_model_base.Model): + """The result of uploading Batch service log files from a specific Compute Node. + + + :ivar virtual_directory_name: The virtual directory within Azure Blob Storage container to + which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the + blob name for each log file uploaded, and it is built based poolId, nodeId and a unique + identifier. Required. + :vartype virtual_directory_name: str + :ivar number_of_files_uploaded: The number of log files which will be uploaded. Required. + :vartype number_of_files_uploaded: int + """ + + virtual_directory_name: str = rest_field(name="virtualDirectoryName") + """The virtual directory within Azure Blob Storage container to which the Batch Service log + file(s) will be uploaded. The virtual directory name is part of the blob name for each log file + uploaded, and it is built based poolId, nodeId and a unique identifier. Required.""" + number_of_files_uploaded: int = rest_field(name="numberOfFilesUploaded") + """The number of log files which will be uploaded. Required.""" + + @overload + def __init__( + self, + *, + virtual_directory_name: str, + number_of_files_uploaded: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UserAccount(_model_base.Model): """Properties used to create a user used to execute Tasks on an Azure Batch Compute Node. - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param password: Required. - :type password: str - :param elevation_level: The elevation level of the user Account. The - default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' - :type elevation_level: str or ~azure.batch.models.ElevationLevel - :param linux_user_configuration: The Linux-specific user configuration for - the user Account. This property is ignored if specified on a Windows Pool. - If not specified, the user is created with the default options. - :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration - :param windows_user_configuration: The Windows-specific user configuration - for the user Account. This property can only be specified if the user is - on a Windows Pool. If not specified and on a Windows Pool, the user is - created with the default options. - :type windows_user_configuration: - ~azure.batch.models.WindowsUserConfiguration - """ - - _validation = { - 'name': {'required': True}, - 'password': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, - 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, - 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, - } - - def __init__(self, **kwargs): - super(UserAccount, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.password = kwargs.get('password', None) - self.elevation_level = kwargs.get('elevation_level', None) - self.linux_user_configuration = kwargs.get('linux_user_configuration', None) - self.windows_user_configuration = kwargs.get('windows_user_configuration', None) - - -class UserAssignedIdentity(Model): + + :ivar name: The name of the user Account. Names can contain any Unicode characters up to a + maximum length of 20. Required. + :vartype name: str + :ivar password: The password for the user Account. Required. + :vartype password: str + :ivar elevation_level: The elevation level of the user Account. The default value is nonAdmin. + Known values are: "nonadmin" and "admin". + :vartype elevation_level: str or ~azure.batch.models.ElevationLevel + :ivar linux_user_configuration: The Linux-specific user configuration for the user Account. + This property is ignored if specified on a Windows Pool. If not specified, the user is created + with the default options. + :vartype linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :ivar windows_user_configuration: The Windows-specific user configuration for the user Account. + This property can only be specified if the user is on a Windows Pool. If not specified and on a + Windows Pool, the user is created with the default options. + :vartype windows_user_configuration: ~azure.batch.models.WindowsUserConfiguration + """ + + name: str = rest_field() + """The name of the user Account. Names can contain any Unicode characters up to a maximum length + of 20. Required.""" + password: str = rest_field() + """The password for the user Account. Required.""" + elevation_level: Optional[Union[str, "_models.ElevationLevel"]] = rest_field(name="elevationLevel") + """The elevation level of the user Account. The default value is nonAdmin. Known values are: + \"nonadmin\" and \"admin\".""" + linux_user_configuration: Optional["_models.LinuxUserConfiguration"] = rest_field(name="linuxUserConfiguration") + """The Linux-specific user configuration for the user Account. This property is ignored if + specified on a Windows Pool. If not specified, the user is created with the default options.""" + windows_user_configuration: Optional["_models.WindowsUserConfiguration"] = rest_field( + name="windowsUserConfiguration" + ) + """The Windows-specific user configuration for the user Account. This property can only be + specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is + created with the default options.""" + + @overload + def __init__( + self, + *, + name: str, + password: str, + elevation_level: Optional[Union[str, "_models.ElevationLevel"]] = None, + linux_user_configuration: Optional["_models.LinuxUserConfiguration"] = None, + windows_user_configuration: Optional["_models.WindowsUserConfiguration"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UserAssignedIdentity(_model_base.Model): """The user assigned Identity. - Variables are only populated by the server, and will be ignored when - sending a request. + Readonly variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to Azure. - :param resource_id: Required. The ARM resource id of the user assigned - identity - :type resource_id: str + :ivar resource_id: The ARM resource id of the user assigned identity. Required. + :vartype resource_id: str :ivar client_id: The client id of the user assigned identity. :vartype client_id: str :ivar principal_id: The principal id of the user assigned identity. :vartype principal_id: str """ - _validation = { - 'resource_id': {'required': True}, - 'client_id': {'readonly': True}, - 'principal_id': {'readonly': True}, - } - - _attribute_map = { - 'resource_id': {'key': 'resourceId', 'type': 'str'}, - 'client_id': {'key': 'clientId', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(UserAssignedIdentity, self).__init__(**kwargs) - self.resource_id = kwargs.get('resource_id', None) - self.client_id = None - self.principal_id = None - - -class UserIdentity(Model): - """The definition of the user identity under which the Task is run. - - Specify either the userName or autoUser property, but not both. - - :param user_name: The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - :type user_name: str - :param auto_user: The auto user under which the Task is run. The userName - and autoUser properties are mutually exclusive; you must specify one but - not both. - :type auto_user: ~azure.batch.models.AutoUserSpecification - """ - - _attribute_map = { - 'user_name': {'key': 'username', 'type': 'str'}, - 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, - } - - def __init__(self, **kwargs): - super(UserIdentity, self).__init__(**kwargs) - self.user_name = kwargs.get('user_name', None) - self.auto_user = kwargs.get('auto_user', None) - - -class VirtualMachineConfiguration(Model): + resource_id: str = rest_field(name="resourceId") + """The ARM resource id of the user assigned identity. Required.""" + client_id: Optional[str] = rest_field(name="clientId", visibility=["read"]) + """The client id of the user assigned identity.""" + principal_id: Optional[str] = rest_field(name="principalId", visibility=["read"]) + """The principal id of the user assigned identity.""" + + @overload + def __init__( + self, + *, + resource_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UserIdentity(_model_base.Model): + """The definition of the user identity under which the Task is run. Specify either the userName or + autoUser property, but not both. + + :ivar username: The name of the user identity under which the Task is run. The userName and + autoUser properties are mutually exclusive; you must specify one but not both. + :vartype username: str + :ivar auto_user: The auto user under which the Task is run. The userName and autoUser + properties are mutually exclusive; you must specify one but not both. + :vartype auto_user: ~azure.batch.models.AutoUserSpecification + """ + + username: Optional[str] = rest_field() + """The name of the user identity under which the Task is run. The userName and autoUser properties + are mutually exclusive; you must specify one but not both.""" + auto_user: Optional["_models.AutoUserSpecification"] = rest_field(name="autoUser") + """The auto user under which the Task is run. The userName and autoUser properties are mutually + exclusive; you must specify one but not both.""" + + @overload + def __init__( + self, + *, + username: Optional[str] = None, + auto_user: Optional["_models.AutoUserSpecification"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VirtualMachineConfiguration(_model_base.Model): # pylint: disable=too-many-instance-attributes """The configuration for Compute Nodes in a Pool based on the Azure Virtual Machines infrastructure. - All required parameters must be populated in order to send to Azure. - - :param image_reference: Required. A reference to the Azure Virtual - Machines Marketplace Image or the custom Virtual Machine Image to use. - :type image_reference: ~azure.batch.models.ImageReference - :param node_agent_sku_id: Required. The Batch Compute Node agent is a - program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch - service. There are different implementations of the Compute Node agent, - known as SKUs, for different operating systems. You must specify a Compute - Node agent SKU which matches the selected Image reference. To get the list - of supported Compute Node agent SKUs along with their list of verified - Image references, see the 'List supported Compute Node agent SKUs' - operation. - :type node_agent_sku_id: str - :param windows_configuration: Windows operating system settings on the - virtual machine. This property must not be specified if the imageReference - property specifies a Linux OS Image. - :type windows_configuration: ~azure.batch.models.WindowsConfiguration - :param data_disks: This property must be specified if the Compute Nodes in - the Pool need to have empty data disks attached to them. This cannot be - updated. Each Compute Node gets its own disk (the disk is not a file - share). Existing disks cannot be attached, each attached disk is empty. - When the Compute Node is removed from the Pool, the disk and all data - associated with it is also deleted. The disk is not formatted after being - attached, it must be formatted before use - for more information see - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + + :ivar image_reference: A reference to the Azure Virtual Machines Marketplace Image or the + custom Virtual Machine Image to use. Required. + :vartype image_reference: ~azure.batch.models.ImageReference + :ivar node_agent_sku_id: The SKU of the Batch Compute Node agent to be provisioned on Compute + Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in + the Pool, and provides the command-and-control interface between the Compute Node and the Batch + service. There are different implementations of the Compute Node agent, known as SKUs, for + different operating systems. You must specify a Compute Node agent SKU which matches the + selected Image reference. To get the list of supported Compute Node agent SKUs along with their + list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. + Required. + :vartype node_agent_sku_id: str + :ivar windows_configuration: Windows operating system settings on the virtual machine. This + property must not be specified if the imageReference property specifies a Linux OS Image. + :vartype windows_configuration: ~azure.batch.models.WindowsConfiguration + :ivar data_disks: The configuration for data disks attached to the Compute Nodes in the Pool. + This property must be specified if the Compute Nodes in the Pool need to have empty data disks + attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not + a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute + Node is removed from the Pool, the disk and all data associated with it is also deleted. The + disk is not formatted after being attached, it must be formatted before use - for more + information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux # pylint: disable=line-too-long and - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. - :type data_disks: list[~azure.batch.models.DataDisk] - :param license_type: This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid on-premises - licenses for the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - :type license_type: str - :param container_configuration: The container configuration for the Pool. - If specified, setup is performed on each Compute Node in the Pool to allow - Tasks to run in containers. All regular Tasks and Job manager Tasks run on - this Pool must specify the containerSettings property, and all other Tasks - may specify it. - :type container_configuration: ~azure.batch.models.ContainerConfiguration - :param disk_encryption_configuration: The disk encryption configuration - for the pool. If specified, encryption is performed on each node in the - pool during node provisioning. - :type disk_encryption_configuration: - ~azure.batch.models.DiskEncryptionConfiguration - :param node_placement_configuration: The node placement configuration for - the pool. This configuration will specify rules on how nodes in the pool - will be physically allocated. - :type node_placement_configuration: - ~azure.batch.models.NodePlacementConfiguration - :param extensions: If specified, the extensions mentioned in this - configuration will be installed on each node. - :type extensions: list[~azure.batch.models.VMExtension] - :param os_disk: Settings for the operating system disk of the Virtual - Machine. - :type os_disk: ~azure.batch.models.OSDisk - :param security_profile: Specifies the security profile settings for the - virtual machine or virtual machine scale set. - :type security_profile: ~azure.batch.models.SecurityProfile - :param service_artifact_reference: Specifies the service artifact - reference id used to set same image version for all virtual machines in - the scale set when using 'latest' image version. The service artifact - reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} - :type service_artifact_reference: - ~azure.batch.models.ServiceArtifactReference - """ - - _validation = { - 'image_reference': {'required': True}, - 'node_agent_sku_id': {'required': True}, - } - - _attribute_map = { - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, - 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, - 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, - 'license_type': {'key': 'licenseType', 'type': 'str'}, - 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, - 'disk_encryption_configuration': {'key': 'diskEncryptionConfiguration', 'type': 'DiskEncryptionConfiguration'}, - 'node_placement_configuration': {'key': 'nodePlacementConfiguration', 'type': 'NodePlacementConfiguration'}, - 'extensions': {'key': 'extensions', 'type': '[VMExtension]'}, - 'os_disk': {'key': 'osDisk', 'type': 'OSDisk'}, - 'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'}, - 'service_artifact_reference': {'key': 'serviceArtifactReference', 'type': 'ServiceArtifactReference'}, - } - - def __init__(self, **kwargs): - super(VirtualMachineConfiguration, self).__init__(**kwargs) - self.image_reference = kwargs.get('image_reference', None) - self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) - self.windows_configuration = kwargs.get('windows_configuration', None) - self.data_disks = kwargs.get('data_disks', None) - self.license_type = kwargs.get('license_type', None) - self.container_configuration = kwargs.get('container_configuration', None) - self.disk_encryption_configuration = kwargs.get('disk_encryption_configuration', None) - self.node_placement_configuration = kwargs.get('node_placement_configuration', None) - self.extensions = kwargs.get('extensions', None) - self.os_disk = kwargs.get('os_disk', None) - self.security_profile = kwargs.get('security_profile', None) - self.service_artifact_reference = kwargs.get('service_artifact_reference', None) - - -class VirtualMachineInfo(Model): + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. # pylint: disable=line-too-long + :vartype data_disks: list[~azure.batch.models.DataDisk] + :ivar license_type: This only applies to Images that contain the Windows operating system, and + should only be used when you hold valid on-premises licenses for the Compute + Nodes which will be deployed. If omitted, no on-premises licensing discount is + applied. Values are: + + Windows_Server - The on-premises license is for Windows + Server. + Windows_Client - The on-premises license is for Windows Client. + :vartype license_type: str + :ivar container_configuration: The container configuration for the Pool. If specified, setup is + performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular + Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and + all other Tasks may specify it. + :vartype container_configuration: ~azure.batch.models.ContainerConfiguration + :ivar disk_encryption_configuration: The disk encryption configuration for the pool. If + specified, encryption is performed on each node in the pool during node provisioning. + :vartype disk_encryption_configuration: ~azure.batch.models.DiskEncryptionConfiguration + :ivar node_placement_configuration: The node placement configuration for the pool. This + configuration will specify rules on how nodes in the pool will be physically allocated. + :vartype node_placement_configuration: ~azure.batch.models.BatchNodePlacementConfiguration + :ivar extensions: The virtual machine extension for the pool. If specified, the extensions + mentioned in this configuration will be installed on each node. + :vartype extensions: list[~azure.batch.models.VMExtension] + :ivar os_disk: Settings for the operating system disk of the Virtual Machine. + :vartype os_disk: ~azure.batch.models.OSDisk + :ivar security_profile: Specifies the security profile settings for the virtual machine or + virtual machine scale set. + :vartype security_profile: ~azure.batch.models.SecurityProfile + :ivar service_artifact_reference: Specifies the service artifact reference id used to set same + image version for all virtual machines in the scale set when using 'latest' image version. The + service artifact reference id in the form of + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. # pylint: disable=line-too-long + :vartype service_artifact_reference: ~azure.batch.models.ServiceArtifactReference + """ + + image_reference: "_models.ImageReference" = rest_field(name="imageReference") + """A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image + to use. Required.""" + node_agent_sku_id: str = rest_field(name="nodeAgentSKUId") + """The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The + Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides + the command-and-control interface between the Compute Node and the Batch service. There are + different implementations of the Compute Node agent, known as SKUs, for different operating + systems. You must specify a Compute Node agent SKU which matches the selected Image reference. + To get the list of supported Compute Node agent SKUs along with their list of verified Image + references, see the 'List supported Compute Node agent SKUs' operation. Required.""" + windows_configuration: Optional["_models.WindowsConfiguration"] = rest_field(name="windowsConfiguration") + """Windows operating system settings on the virtual machine. This property must not be specified + if the imageReference property specifies a Linux OS Image.""" + data_disks: Optional[List["_models.DataDisk"]] = rest_field(name="dataDisks") + """The configuration for data disks attached to the Compute Nodes in the Pool. This property must + be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. + This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). + Existing disks cannot be attached, each attached disk is empty. When the Compute Node is + removed from the Pool, the disk and all data associated with it is also deleted. The disk is + not formatted after being attached, it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux # pylint: disable=line-too-long + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. # pylint: disable=line-too-long""" + license_type: Optional[str] = rest_field(name="licenseType") + """This only applies to Images that contain the Windows operating system, and + should only be used when you hold valid on-premises licenses for the Compute + Nodes which will be deployed. If omitted, no on-premises licensing discount is + applied. Values are: + + Windows_Server - The on-premises license is for Windows + Server. + Windows_Client - The on-premises license is for Windows Client.""" + container_configuration: Optional["_models.ContainerConfiguration"] = rest_field(name="containerConfiguration") + """The container configuration for the Pool. If specified, setup is performed on each Compute Node + in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on + this Pool must specify the containerSettings property, and all other Tasks may specify it.""" + disk_encryption_configuration: Optional["_models.DiskEncryptionConfiguration"] = rest_field( + name="diskEncryptionConfiguration" + ) + """The disk encryption configuration for the pool. If specified, encryption is performed on each + node in the pool during node provisioning.""" + node_placement_configuration: Optional["_models.BatchNodePlacementConfiguration"] = rest_field( + name="nodePlacementConfiguration" + ) + """The node placement configuration for the pool. This configuration will specify rules on how + nodes in the pool will be physically allocated.""" + extensions: Optional[List["_models.VMExtension"]] = rest_field() + """The virtual machine extension for the pool. If specified, the extensions mentioned in this + configuration will be installed on each node.""" + os_disk: Optional["_models.OSDisk"] = rest_field(name="osDisk") + """Settings for the operating system disk of the Virtual Machine.""" + security_profile: Optional["_models.SecurityProfile"] = rest_field(name="securityProfile") + """Specifies the security profile settings for the virtual machine or virtual machine scale set.""" + service_artifact_reference: Optional["_models.ServiceArtifactReference"] = rest_field( + name="serviceArtifactReference" + ) + """Specifies the service artifact reference id used to set same image version for all virtual + machines in the scale set when using 'latest' image version. The service artifact reference id + in the form of + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. # pylint: disable=line-too-long""" + + @overload + def __init__( + self, + *, + image_reference: "_models.ImageReference", + node_agent_sku_id: str, + windows_configuration: Optional["_models.WindowsConfiguration"] = None, + data_disks: Optional[List["_models.DataDisk"]] = None, + license_type: Optional[str] = None, + container_configuration: Optional["_models.ContainerConfiguration"] = None, + disk_encryption_configuration: Optional["_models.DiskEncryptionConfiguration"] = None, + node_placement_configuration: Optional["_models.BatchNodePlacementConfiguration"] = None, + extensions: Optional[List["_models.VMExtension"]] = None, + os_disk: Optional["_models.OSDisk"] = None, + security_profile: Optional["_models.SecurityProfile"] = None, + service_artifact_reference: Optional["_models.ServiceArtifactReference"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VirtualMachineInfo(_model_base.Model): """Info about the current state of the virtual machine. - :param image_reference: The reference to the Azure Virtual Machine's - Marketplace Image. - :type image_reference: ~azure.batch.models.ImageReference - :param scale_set_vm_resource_id: - :type scale_set_vm_resource_id: str + :ivar image_reference: The reference to the Azure Virtual Machine's Marketplace Image. + :vartype image_reference: ~azure.batch.models.ImageReference + :ivar scale_set_vm_resource_id: The resource ID of the Compute Node's current Virtual Machine + Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode + property set to 'UserSubscription'. + :vartype scale_set_vm_resource_id: str """ - _attribute_map = { - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'scale_set_vm_resource_id': {'key': 'scaleSetVmResourceId', 'type': 'str'}, - } + image_reference: Optional["_models.ImageReference"] = rest_field(name="imageReference") + """The reference to the Azure Virtual Machine's Marketplace Image.""" + scale_set_vm_resource_id: Optional[str] = rest_field(name="scaleSetVmResourceId") + """The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the + Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.""" - def __init__(self, **kwargs): - super(VirtualMachineInfo, self).__init__(**kwargs) - self.image_reference = kwargs.get('image_reference', None) - self.scale_set_vm_resource_id = kwargs.get('scale_set_vm_resource_id', None) + @overload + def __init__( + self, + *, + image_reference: Optional["_models.ImageReference"] = None, + scale_set_vm_resource_id: Optional[str] = None, + ): ... + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ -class VMExtension(Model): - """The configuration for virtual machine extensions. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param publisher: Required. - :type publisher: str - :param type: Required. - :type type: str - :param type_handler_version: - :type type_handler_version: str - :param auto_upgrade_minor_version: Indicates whether the extension should - use a newer minor version if one is available at deployment time. Once - deployed, however, the extension will not upgrade minor versions unless - redeployed, even with this property set to true. - :type auto_upgrade_minor_version: bool - :param enable_automatic_upgrade: Indicates whether the extension should be - automatically upgraded by the platform if there is a newer version of the - extension available. - :type enable_automatic_upgrade: bool - :param settings: - :type settings: object - :param protected_settings: The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or no protected - settings at all. - :type protected_settings: object - :param provision_after_extensions: Collection of extension names after - which this extension needs to be provisioned. - :type provision_after_extensions: list[str] - """ - - _validation = { - 'name': {'required': True}, - 'publisher': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'publisher': {'key': 'publisher', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'}, - 'auto_upgrade_minor_version': {'key': 'autoUpgradeMinorVersion', 'type': 'bool'}, - 'enable_automatic_upgrade': {'key': 'enableAutomaticUpgrade', 'type': 'bool'}, - 'settings': {'key': 'settings', 'type': 'object'}, - 'protected_settings': {'key': 'protectedSettings', 'type': 'object'}, - 'provision_after_extensions': {'key': 'provisionAfterExtensions', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(VMExtension, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.publisher = kwargs.get('publisher', None) - self.type = kwargs.get('type', None) - self.type_handler_version = kwargs.get('type_handler_version', None) - self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None) - self.enable_automatic_upgrade = kwargs.get('enable_automatic_upgrade', None) - self.settings = kwargs.get('settings', None) - self.protected_settings = kwargs.get('protected_settings', None) - self.provision_after_extensions = kwargs.get('provision_after_extensions', None) - - -class VMExtensionInstanceView(Model): - """The vm extension instance view. - :param name: - :type name: str - :param statuses: The resource status information. - :type statuses: list[~azure.batch.models.InstanceViewStatus] - :param sub_statuses: The resource status information. - :type sub_statuses: list[~azure.batch.models.InstanceViewStatus] - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'}, - 'sub_statuses': {'key': 'subStatuses', 'type': '[InstanceViewStatus]'}, - } +class VMExtension(_model_base.Model): + """The configuration for virtual machine extensions. - def __init__(self, **kwargs): - super(VMExtensionInstanceView, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.statuses = kwargs.get('statuses', None) - self.sub_statuses = kwargs.get('sub_statuses', None) + :ivar name: The name of the virtual machine extension. Required. + :vartype name: str + :ivar publisher: The name of the extension handler publisher. Required. + :vartype publisher: str + :ivar type: The type of the extension. Required. + :vartype type: str + :ivar type_handler_version: The version of script handler. + :vartype type_handler_version: str + :ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor + version if one is available at deployment time. Once deployed, however, the extension will not + upgrade minor versions unless redeployed, even with this property set to true. + :vartype auto_upgrade_minor_version: bool + :ivar enable_automatic_upgrade: Indicates whether the extension should be automatically + upgraded by the platform if there is a newer version of the extension available. + :vartype enable_automatic_upgrade: bool + :ivar settings: JSON formatted public settings for the extension. + :vartype settings: dict[str, str] + :ivar protected_settings: The extension can contain either protectedSettings or + protectedSettingsFromKeyVault or no protected settings at all. + :vartype protected_settings: dict[str, str] + :ivar provision_after_extensions: The collection of extension names. Collection of extension + names after which this extension needs to be provisioned. + :vartype provision_after_extensions: list[str] + """ + + name: str = rest_field() + """The name of the virtual machine extension. Required.""" + publisher: str = rest_field() + """The name of the extension handler publisher. Required.""" + type: str = rest_field() + """The type of the extension. Required.""" + type_handler_version: Optional[str] = rest_field(name="typeHandlerVersion") + """The version of script handler.""" + auto_upgrade_minor_version: Optional[bool] = rest_field(name="autoUpgradeMinorVersion") + """Indicates whether the extension should use a newer minor version if one is available at + deployment time. Once deployed, however, the extension will not upgrade minor versions unless + redeployed, even with this property set to true.""" + enable_automatic_upgrade: Optional[bool] = rest_field(name="enableAutomaticUpgrade") + """Indicates whether the extension should be automatically upgraded by the platform if there is a + newer version of the extension available.""" + settings: Optional[Dict[str, str]] = rest_field() + """JSON formatted public settings for the extension.""" + protected_settings: Optional[Dict[str, str]] = rest_field(name="protectedSettings") + """The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no + protected settings at all.""" + provision_after_extensions: Optional[List[str]] = rest_field(name="provisionAfterExtensions") + """The collection of extension names. Collection of extension names after which this extension + needs to be provisioned.""" + + @overload + def __init__( + self, + *, + name: str, + publisher: str, + type: str, + type_handler_version: Optional[str] = None, + auto_upgrade_minor_version: Optional[bool] = None, + enable_automatic_upgrade: Optional[bool] = None, + settings: Optional[Dict[str, str]] = None, + protected_settings: Optional[Dict[str, str]] = None, + provision_after_extensions: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VMExtensionInstanceView(_model_base.Model): + """The vm extension instance view. -class WindowsConfiguration(Model): + :ivar name: The name of the vm extension instance view. + :vartype name: str + :ivar statuses: The resource status information. + :vartype statuses: list[~azure.batch.models.InstanceViewStatus] + :ivar sub_statuses: The resource status information. + :vartype sub_statuses: list[~azure.batch.models.InstanceViewStatus] + """ + + name: Optional[str] = rest_field() + """The name of the vm extension instance view.""" + statuses: Optional[List["_models.InstanceViewStatus"]] = rest_field() + """The resource status information.""" + sub_statuses: Optional[List["_models.InstanceViewStatus"]] = rest_field(name="subStatuses") + """The resource status information.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + statuses: Optional[List["_models.InstanceViewStatus"]] = None, + sub_statuses: Optional[List["_models.InstanceViewStatus"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class WindowsConfiguration(_model_base.Model): """Windows operating system settings to apply to the virtual machine. - :param enable_automatic_updates: Whether automatic updates are enabled on - the virtual machine. If omitted, the default value is true. - :type enable_automatic_updates: bool + :ivar enable_automatic_updates: Whether automatic updates are enabled on the virtual machine. + If omitted, the default value is true. + :vartype enable_automatic_updates: bool """ - _attribute_map = { - 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, - } + enable_automatic_updates: Optional[bool] = rest_field(name="enableAutomaticUpdates") + """Whether automatic updates are enabled on the virtual machine. If omitted, the default value is + true.""" + + @overload + def __init__( + self, + *, + enable_automatic_updates: Optional[bool] = None, + ): ... - def __init__(self, **kwargs): - super(WindowsConfiguration, self).__init__(**kwargs) - self.enable_automatic_updates = kwargs.get('enable_automatic_updates', None) + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) -class WindowsUserConfiguration(Model): + +class WindowsUserConfiguration(_model_base.Model): """Properties used to create a user Account on a Windows Compute Node. - :param login_mode: The default value for VirtualMachineConfiguration Pools - is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. - Possible values include: 'batch', 'interactive' - :type login_mode: str or ~azure.batch.models.LoginMode + :ivar login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration Pools is 'batch'. Known values are: "batch" and "interactive". + :vartype login_mode: str or ~azure.batch.models.LoginMode """ - _attribute_map = { - 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, - } + login_mode: Optional[Union[str, "_models.LoginMode"]] = rest_field(name="loginMode") + """The login mode for the user. The default value for VirtualMachineConfiguration Pools is + 'batch'. Known values are: \"batch\" and \"interactive\".""" + + @overload + def __init__( + self, + *, + login_mode: Optional[Union[str, "_models.LoginMode"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - def __init__(self, **kwargs): - super(WindowsUserConfiguration, self).__init__(**kwargs) - self.login_mode = kwargs.get('login_mode', None) + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/batch/azure-batch/azure/batch/models/_models_py3.py b/sdk/batch/azure-batch/azure/batch/models/_models_py3.py deleted file mode 100644 index 9691dd16ea08..000000000000 --- a/sdk/batch/azure-batch/azure/batch/models/_models_py3.py +++ /dev/null @@ -1,11507 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from msrest.exceptions import HttpOperationError - - -class AccountListPoolNodeCountsOptions(Model): - """Additional parameters for list_pool_node_counts operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. - :type filter: str - :param max_results: The maximum number of items to return in the response. - Default value: 10 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, max_results: int=10, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) - self.filter = filter - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class AccountListSupportedImagesOptions(Model): - """Additional parameters for list_supported_images operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 results will be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(AccountListSupportedImagesOptions, self).__init__(**kwargs) - self.filter = filter - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class AffinityInformation(Model): - """A locality hint that can be used by the Batch service to select a Compute - Node on which to start a Task. - - All required parameters must be populated in order to send to Azure. - - :param affinity_id: Required. You can pass the affinityId of a Node to - indicate that this Task needs to run on that Compute Node. Note that this - is just a soft affinity. If the target Compute Node is busy or unavailable - at the time the Task is scheduled, then the Task will be scheduled - elsewhere. - :type affinity_id: str - """ - - _validation = { - 'affinity_id': {'required': True}, - } - - _attribute_map = { - 'affinity_id': {'key': 'affinityId', 'type': 'str'}, - } - - def __init__(self, *, affinity_id: str, **kwargs) -> None: - super(AffinityInformation, self).__init__(**kwargs) - self.affinity_id = affinity_id - - -class ApplicationGetOptions(Model): - """Additional parameters for get operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ApplicationGetOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ApplicationListOptions(Model): - """Additional parameters for list operation. - - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 applications can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ApplicationListOptions, self).__init__(**kwargs) - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ApplicationPackageReference(Model): - """A reference to an Package to be deployed to Compute Nodes. - - All required parameters must be populated in order to send to Azure. - - :param application_id: Required. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - :type application_id: str - :param version: If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - :type version: str - """ - - _validation = { - 'application_id': {'required': True}, - } - - _attribute_map = { - 'application_id': {'key': 'applicationId', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - } - - def __init__(self, *, application_id: str, version: str=None, **kwargs) -> None: - super(ApplicationPackageReference, self).__init__(**kwargs) - self.application_id = application_id - self.version = version - - -class ApplicationSummary(Model): - """Contains information about an application in an Azure Batch Account. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. - :type id: str - :param display_name: Required. - :type display_name: str - :param versions: Required. - :type versions: list[str] - """ - - _validation = { - 'id': {'required': True}, - 'display_name': {'required': True}, - 'versions': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'versions': {'key': 'versions', 'type': '[str]'}, - } - - def __init__(self, *, id: str, display_name: str, versions, **kwargs) -> None: - super(ApplicationSummary, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.versions = versions - - -class AuthenticationTokenSettings(Model): - """The settings for an authentication token that the Task can use to perform - Batch service operations. - - :param access: The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related to - the Job which contains the Task. - :type access: list[str or ~azure.batch.models.AccessScope] - """ - - _attribute_map = { - 'access': {'key': 'access', 'type': '[AccessScope]'}, - } - - def __init__(self, *, access=None, **kwargs) -> None: - super(AuthenticationTokenSettings, self).__init__(**kwargs) - self.access = access - - -class AutomaticOSUpgradePolicy(Model): - """The configuration parameters used for performing automatic OS upgrade. - - :param disable_automatic_rollback: Whether OS image rollback feature - should be disabled. - :type disable_automatic_rollback: bool - :param enable_automatic_os_upgrade: Indicates whether OS upgrades should - automatically be applied to scale set instances in a rolling fashion when - a newer version of the OS image becomes available.

If this is - set to true for Windows based pools, - [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) - cannot be set to true. - :type enable_automatic_os_upgrade: bool - :param use_rolling_upgrade_policy: Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS Upgrade will - fallback to the default policy if no policy is defined on the VMSS. - :type use_rolling_upgrade_policy: bool - :param os_rolling_upgrade_deferral: Defer OS upgrades on the TVMs if they - are running tasks. - :type os_rolling_upgrade_deferral: bool - """ - - _attribute_map = { - 'disable_automatic_rollback': {'key': 'disableAutomaticRollback', 'type': 'bool'}, - 'enable_automatic_os_upgrade': {'key': 'enableAutomaticOSUpgrade', 'type': 'bool'}, - 'use_rolling_upgrade_policy': {'key': 'useRollingUpgradePolicy', 'type': 'bool'}, - 'os_rolling_upgrade_deferral': {'key': 'osRollingUpgradeDeferral', 'type': 'bool'}, - } - - def __init__(self, *, disable_automatic_rollback: bool=None, enable_automatic_os_upgrade: bool=None, use_rolling_upgrade_policy: bool=None, os_rolling_upgrade_deferral: bool=None, **kwargs) -> None: - super(AutomaticOSUpgradePolicy, self).__init__(**kwargs) - self.disable_automatic_rollback = disable_automatic_rollback - self.enable_automatic_os_upgrade = enable_automatic_os_upgrade - self.use_rolling_upgrade_policy = use_rolling_upgrade_policy - self.os_rolling_upgrade_deferral = os_rolling_upgrade_deferral - - -class AutoPoolSpecification(Model): - """Specifies characteristics for a temporary 'auto pool'. The Batch service - will create this auto Pool when the Job is submitted. - - All required parameters must be populated in order to send to Azure. - - :param auto_pool_id_prefix: The Batch service assigns each auto Pool a - unique identifier on creation. To distinguish between Pools created for - different purposes, you can specify this element to add a prefix to the ID - that is assigned. The prefix can be up to 20 characters long. - :type auto_pool_id_prefix: str - :param pool_lifetime_option: Required. Possible values include: - 'jobSchedule', 'job' - :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption - :param keep_alive: Whether to keep an auto Pool alive after its lifetime - expires. If false, the Batch service deletes the Pool once its lifetime - (as determined by the poolLifetimeOption setting) expires; that is, when - the Job or Job Schedule completes. If true, the Batch service does not - delete the Pool automatically. It is up to the user to delete auto Pools - created with this option. - :type keep_alive: bool - :param pool: The Pool specification for the auto Pool. - :type pool: ~azure.batch.models.PoolSpecification - """ - - _validation = { - 'pool_lifetime_option': {'required': True}, - } - - _attribute_map = { - 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, - 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, - 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, - 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, - } - - def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None, keep_alive: bool=None, pool=None, **kwargs) -> None: - super(AutoPoolSpecification, self).__init__(**kwargs) - self.auto_pool_id_prefix = auto_pool_id_prefix - self.pool_lifetime_option = pool_lifetime_option - self.keep_alive = keep_alive - self.pool = pool - - -class AutoScaleRun(Model): - """The results and errors from an execution of a Pool autoscale formula. - - All required parameters must be populated in order to send to Azure. - - :param timestamp: Required. - :type timestamp: datetime - :param results: Each variable value is returned in the form - $variable=value, and variables are separated by semicolons. - :type results: str - :param error: Details of the error encountered evaluating the autoscale - formula on the Pool, if the evaluation was unsuccessful. - :type error: ~azure.batch.models.AutoScaleRunError - """ - - _validation = { - 'timestamp': {'required': True}, - } - - _attribute_map = { - 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, - 'results': {'key': 'results', 'type': 'str'}, - 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, - } - - def __init__(self, *, timestamp, results: str=None, error=None, **kwargs) -> None: - super(AutoScaleRun, self).__init__(**kwargs) - self.timestamp = timestamp - self.results = results - self.error = error - - -class AutoScaleRunError(Model): - """An error that occurred when executing or evaluating a Pool autoscale - formula. - - :param code: - :type code: str - :param message: - :type message: str - :param values: - :type values: list[~azure.batch.models.NameValuePair] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[NameValuePair]'}, - } - - def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: - super(AutoScaleRunError, self).__init__(**kwargs) - self.code = code - self.message = message - self.values = values - - -class AutoUserSpecification(Model): - """Specifies the parameters for the auto user that runs a Task on the Batch - service. - - :param scope: The default value is pool. If the pool is running Windows a - value of Task should be specified if stricter isolation between tasks is - required. For example, if the task mutates the registry in a way which - could impact other tasks, or if certificates have been specified on the - pool which should not be accessible by normal tasks but should be - accessible by StartTasks. Possible values include: 'task', 'pool' - :type scope: str or ~azure.batch.models.AutoUserScope - :param elevation_level: The elevation level of the auto user. The default - value is nonAdmin. Possible values include: 'nonAdmin', 'admin' - :type elevation_level: str or ~azure.batch.models.ElevationLevel - """ - - _attribute_map = { - 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, - 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, - } - - def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None: - super(AutoUserSpecification, self).__init__(**kwargs) - self.scope = scope - self.elevation_level = elevation_level - - -class AzureBlobFileSystemConfiguration(Model): - """Information used to connect to an Azure Storage Container using Blobfuse. - - All required parameters must be populated in order to send to Azure. - - :param account_name: Required. - :type account_name: str - :param container_name: Required. - :type container_name: str - :param account_key: This property is mutually exclusive with both sasKey - and identity; exactly one must be specified. - :type account_key: str - :param sas_key: This property is mutually exclusive with both accountKey - and identity; exactly one must be specified. - :type sas_key: str - :param blobfuse_options: These are 'net use' options in Windows and - 'mount' options in Linux. - :type blobfuse_options: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param identity_reference: The reference to the user assigned identity to - use to access containerName. This property is mutually exclusive with both - accountKey and sasKey; exactly one must be specified. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _validation = { - 'account_name': {'required': True}, - 'container_name': {'required': True}, - 'relative_mount_path': {'required': True}, - } - - _attribute_map = { - 'account_name': {'key': 'accountName', 'type': 'str'}, - 'container_name': {'key': 'containerName', 'type': 'str'}, - 'account_key': {'key': 'accountKey', 'type': 'str'}, - 'sas_key': {'key': 'sasKey', 'type': 'str'}, - 'blobfuse_options': {'key': 'blobfuseOptions', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, *, account_name: str, container_name: str, relative_mount_path: str, account_key: str=None, sas_key: str=None, blobfuse_options: str=None, identity_reference=None, **kwargs) -> None: - super(AzureBlobFileSystemConfiguration, self).__init__(**kwargs) - self.account_name = account_name - self.container_name = container_name - self.account_key = account_key - self.sas_key = sas_key - self.blobfuse_options = blobfuse_options - self.relative_mount_path = relative_mount_path - self.identity_reference = identity_reference - - -class AzureFileShareConfiguration(Model): - """Information used to connect to an Azure Fileshare. - - All required parameters must be populated in order to send to Azure. - - :param account_name: Required. - :type account_name: str - :param azure_file_url: Required. This is of the form - 'https://{account}.file.core.windows.net/'. - :type azure_file_url: str - :param account_key: Required. - :type account_key: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param mount_options: These are 'net use' options in Windows and 'mount' - options in Linux. - :type mount_options: str - """ - - _validation = { - 'account_name': {'required': True}, - 'azure_file_url': {'required': True}, - 'account_key': {'required': True}, - 'relative_mount_path': {'required': True}, - } - - _attribute_map = { - 'account_name': {'key': 'accountName', 'type': 'str'}, - 'azure_file_url': {'key': 'azureFileUrl', 'type': 'str'}, - 'account_key': {'key': 'accountKey', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'mount_options': {'key': 'mountOptions', 'type': 'str'}, - } - - def __init__(self, *, account_name: str, azure_file_url: str, account_key: str, relative_mount_path: str, mount_options: str=None, **kwargs) -> None: - super(AzureFileShareConfiguration, self).__init__(**kwargs) - self.account_name = account_name - self.azure_file_url = azure_file_url - self.account_key = account_key - self.relative_mount_path = relative_mount_path - self.mount_options = mount_options - - -class BatchError(Model): - """An error response received from the Azure Batch service. - - :param code: - :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. - :type message: ~azure.batch.models.ErrorMessage - :param values: - :type values: list[~azure.batch.models.BatchErrorDetail] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'ErrorMessage'}, - 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, - } - - def __init__(self, *, code: str=None, message=None, values=None, **kwargs) -> None: - super(BatchError, self).__init__(**kwargs) - self.code = code - self.message = message - self.values = values - - -class BatchErrorException(HttpOperationError): - """Server responsed with exception of type: 'BatchError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) - - -class BatchErrorDetail(Model): - """An item of additional information included in an Azure Batch error - response. - - :param key: - :type key: str - :param value: - :type value: str - """ - - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: - super(BatchErrorDetail, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BatchPoolIdentity(Model): - """The identity of the Batch pool, if configured. - - The identity of the Batch pool, if configured. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The list of user identities associated with the - Batch pool. The user identity dictionary key references will be ARM - resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - Possible values include: 'UserAssigned', 'None' - :type type: str or ~azure.batch.models.PoolIdentityType - :param user_assigned_identities: The user identity dictionary key - references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - :type user_assigned_identities: - list[~azure.batch.models.UserAssignedIdentity] - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'PoolIdentityType'}, - 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '[UserAssignedIdentity]'}, - } - - def __init__(self, *, type, user_assigned_identities=None, **kwargs) -> None: - super(BatchPoolIdentity, self).__init__(**kwargs) - self.type = type - self.user_assigned_identities = user_assigned_identities - - -class Certificate(Model): - """A Certificate that can be installed on Compute Nodes and can be used to - authenticate operations on the machine. - - :param thumbprint: - :type thumbprint: str - :param thumbprint_algorithm: - :type thumbprint_algorithm: str - :param url: - :type url: str - :param state: The current state of the Certificate. Possible values - include: 'active', 'deleting', 'deleteFailed' - :type state: str or ~azure.batch.models.CertificateState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Certificate. This - property is not set if the Certificate is in its initial active state. - Possible values include: 'active', 'deleting', 'deleteFailed' - :type previous_state: str or ~azure.batch.models.CertificateState - :param previous_state_transition_time: This property is not set if the - Certificate is in its initial Active state. - :type previous_state_transition_time: datetime - :param public_data: - :type public_data: str - :param delete_certificate_error: The error that occurred on the last - attempt to delete this Certificate. This property is set only if the - Certificate is in the DeleteFailed state. - :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError - """ - - _attribute_map = { - 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'state': {'key': 'state', 'type': 'CertificateState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'public_data': {'key': 'publicData', 'type': 'str'}, - 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, - } - - def __init__(self, *, thumbprint: str=None, thumbprint_algorithm: str=None, url: str=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, public_data: str=None, delete_certificate_error=None, **kwargs) -> None: - super(Certificate, self).__init__(**kwargs) - self.thumbprint = thumbprint - self.thumbprint_algorithm = thumbprint_algorithm - self.url = url - self.state = state - self.state_transition_time = state_transition_time - self.previous_state = previous_state - self.previous_state_transition_time = previous_state_transition_time - self.public_data = public_data - self.delete_certificate_error = delete_certificate_error - - -class CertificateAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(CertificateAddOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class CertificateAddParameter(Model): - """A Certificate that can be installed on Compute Nodes and can be used to - authenticate operations on the machine. - - All required parameters must be populated in order to send to Azure. - - :param thumbprint: Required. - :type thumbprint: str - :param thumbprint_algorithm: Required. - :type thumbprint_algorithm: str - :param data: Required. - :type data: str - :param certificate_format: Possible values include: 'pfx', 'cer' - :type certificate_format: str or ~azure.batch.models.CertificateFormat - :param password: This must be omitted if the Certificate format is cer. - :type password: str - """ - - _validation = { - 'thumbprint': {'required': True}, - 'thumbprint_algorithm': {'required': True}, - 'data': {'required': True}, - } - - _attribute_map = { - 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, - 'data': {'key': 'data', 'type': 'str'}, - 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, data: str, certificate_format=None, password: str=None, **kwargs) -> None: - super(CertificateAddParameter, self).__init__(**kwargs) - self.thumbprint = thumbprint - self.thumbprint_algorithm = thumbprint_algorithm - self.data = data - self.certificate_format = certificate_format - self.password = password - - -class CertificateCancelDeletionOptions(Model): - """Additional parameters for cancel_deletion operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(CertificateCancelDeletionOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class CertificateDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(CertificateDeleteOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class CertificateGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(CertificateGetOptions, self).__init__(**kwargs) - self.select = select - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class CertificateListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Certificates can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(CertificateListOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class CertificateReference(Model): - """A reference to a Certificate to be installed on Compute Nodes in a Pool. - - Warning: This object is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - All required parameters must be populated in order to send to Azure. - - :param thumbprint: Required. - :type thumbprint: str - :param thumbprint_algorithm: Required. - :type thumbprint_algorithm: str - :param store_location: The default value is currentuser. This property is - applicable only for Pools configured with Windows Compute Nodes (that is, - created with cloudServiceConfiguration, or with - virtualMachineConfiguration using a Windows Image reference). For Linux - Compute Nodes, the Certificates are stored in a directory inside the Task - working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is - supplied to the Task to query for this location. For Certificates with - visibility of 'remoteUser', a 'certs' directory is created in the user's - home directory (e.g., /home/{user-name}/certs) and Certificates are placed - in that directory. Possible values include: 'currentUser', 'localMachine' - :type store_location: str or ~azure.batch.models.CertificateStoreLocation - :param store_name: This property is applicable only for Pools configured - with Windows Compute Nodes (that is, created with - cloudServiceConfiguration, or with virtualMachineConfiguration using a - Windows Image reference). Common store names include: My, Root, CA, Trust, - Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but - any custom store name can also be used. The default value is My. - :type store_name: str - :param visibility: You can specify more than one visibility in this - collection. The default is all Accounts. - :type visibility: list[str or ~azure.batch.models.CertificateVisibility] - """ - - _validation = { - 'thumbprint': {'required': True}, - 'thumbprint_algorithm': {'required': True}, - } - - _attribute_map = { - 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, - 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, - 'store_name': {'key': 'storeName', 'type': 'str'}, - 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, - } - - def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None: - super(CertificateReference, self).__init__(**kwargs) - self.thumbprint = thumbprint - self.thumbprint_algorithm = thumbprint_algorithm - self.store_location = store_location - self.store_name = store_name - self.visibility = visibility - - -class CIFSMountConfiguration(Model): - """Information used to connect to a CIFS file system. - - All required parameters must be populated in order to send to Azure. - - :param username: Required. - :type username: str - :param source: Required. - :type source: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param mount_options: These are 'net use' options in Windows and 'mount' - options in Linux. - :type mount_options: str - :param password: Required. - :type password: str - """ - - _validation = { - 'username': {'required': True}, - 'source': {'required': True}, - 'relative_mount_path': {'required': True}, - 'password': {'required': True}, - } - - _attribute_map = { - 'username': {'key': 'username', 'type': 'str'}, - 'source': {'key': 'source', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'mount_options': {'key': 'mountOptions', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, *, username: str, source: str, relative_mount_path: str, password: str, mount_options: str=None, **kwargs) -> None: - super(CIFSMountConfiguration, self).__init__(**kwargs) - self.username = username - self.source = source - self.relative_mount_path = relative_mount_path - self.mount_options = mount_options - self.password = password - - -class CloudError(Model): - """CloudError. - """ - - _attribute_map = { - } - - -class CloudJob(Model): - """An Azure Batch Job. - - :param id: The ID is case-preserving and case-insensitive (that is, you - may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: - :type display_name: str - :param uses_task_dependencies: Whether Tasks in the Job can define - dependencies on each other. The default is false. - :type uses_task_dependencies: bool - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Job has changed between requests. In particular, you can be pass the - ETag when updating a Job to specify that your changes should take effect - only if nobody else has modified the Job in the meantime. - :type e_tag: str - :param last_modified: This is the last time at which the Job level data, - such as the Job state or priority, changed. It does not factor in - task-level changes such as adding new Tasks or Tasks changing state. - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param state: The current state of the Job. Possible values include: - 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', - 'deleting' - :type state: str or ~azure.batch.models.JobState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Job. This property is not - set if the Job is in its initial Active state. Possible values include: - 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', - 'deleting' - :type previous_state: str or ~azure.batch.models.JobState - :param previous_state_transition_time: This property is not set if the Job - is in its initial Active state. - :type previous_state_transition_time: datetime - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - :type priority: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param constraints: The execution constraints for the Job. - :type constraints: ~azure.batch.models.JobConstraints - :param job_manager_task: Details of a Job Manager Task to be launched when - the Job is started. - :type job_manager_task: ~azure.batch.models.JobManagerTask - :param job_preparation_task: The Job Preparation Task. The Job Preparation - Task is a special Task run on each Compute Node before any other Task of - the Job. - :type job_preparation_task: ~azure.batch.models.JobPreparationTask - :param job_release_task: The Job Release Task. The Job Release Task is a - special Task run at the end of the Job on each Compute Node that has run - any other Task of the Job. - :type job_release_task: ~azure.batch.models.JobReleaseTask - :param common_environment_settings: Individual Tasks can override an - environment setting specified here by specifying the same setting name - with a different value. - :type common_environment_settings: - list[~azure.batch.models.EnvironmentSetting] - :param pool_info: The Pool settings associated with the Job. - :type pool_info: ~azure.batch.models.PoolInformation - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. The default is - noaction. Possible values include: 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param on_task_failure: The action the Batch service should take when any - Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero - exit code after exhausting its retry count, or if there was an error - starting the Task, for example due to a resource file download error. The - default is noaction. Possible values include: 'noAction', - 'performExitOptionsJobAction' - :type on_task_failure: str or ~azure.batch.models.OnTaskFailure - :param network_configuration: The network configuration for the Job. - :type network_configuration: ~azure.batch.models.JobNetworkConfiguration - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param execution_info: The execution information for the Job. - :type execution_info: ~azure.batch.models.JobExecutionInformation - :param stats: Resource usage statistics for the entire lifetime of the - Job. This property is populated only if the CloudJob was retrieved with an - expand clause including the 'stats' attribute; otherwise it is null. The - statistics may not be immediately available. The Batch service performs - periodic roll-up of statistics. The typical delay is about 30 minutes. - :type stats: ~azure.batch.models.JobStatistics - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'JobState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, - 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, - 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, - 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, - 'stats': {'key': 'stats', 'type': 'JobStatistics'}, - } - - def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, allow_task_preemption: bool=None, max_parallel_tasks: int=-1, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: - super(CloudJob, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.uses_task_dependencies = uses_task_dependencies - self.url = url - self.e_tag = e_tag - self.last_modified = last_modified - self.creation_time = creation_time - self.state = state - self.state_transition_time = state_transition_time - self.previous_state = previous_state - self.previous_state_transition_time = previous_state_transition_time - self.priority = priority - self.allow_task_preemption = allow_task_preemption - self.max_parallel_tasks = max_parallel_tasks - self.constraints = constraints - self.job_manager_task = job_manager_task - self.job_preparation_task = job_preparation_task - self.job_release_task = job_release_task - self.common_environment_settings = common_environment_settings - self.pool_info = pool_info - self.on_all_tasks_complete = on_all_tasks_complete - self.on_task_failure = on_task_failure - self.network_configuration = network_configuration - self.metadata = metadata - self.execution_info = execution_info - self.stats = stats - - -class CloudJobSchedule(Model): - """A Job Schedule that allows recurring Jobs by specifying when to run Jobs - and a specification used to create each Job. - - :param id: - :type id: str - :param display_name: - :type display_name: str - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Job Schedule has changed between requests. In particular, you can be - pass the ETag with an Update Job Schedule request to specify that your - changes should take effect only if nobody else has modified the schedule - in the meantime. - :type e_tag: str - :param last_modified: This is the last time at which the schedule level - data, such as the Job specification or recurrence information, changed. It - does not factor in job-level changes such as new Jobs being created or - Jobs changing state. - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param state: The current state of the Job Schedule. Possible values - include: 'active', 'completed', 'disabled', 'terminating', 'deleting' - :type state: str or ~azure.batch.models.JobScheduleState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Job Schedule. This - property is not present if the Job Schedule is in its initial active - state. Possible values include: 'active', 'completed', 'disabled', - 'terminating', 'deleting' - :type previous_state: str or ~azure.batch.models.JobScheduleState - :param previous_state_transition_time: This property is not present if the - Job Schedule is in its initial active state. - :type previous_state_transition_time: datetime - :param schedule: The schedule according to which Jobs will be created. All - times are fixed respective to UTC and are not impacted by daylight saving - time. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: The details of the Jobs to be created on this - schedule. - :type job_specification: ~azure.batch.models.JobSpecification - :param execution_info: Information about Jobs that have been and will be - run under this schedule. - :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param stats: The lifetime resource usage statistics for the Job Schedule. - The statistics may not be immediately available. The Batch service - performs periodic roll-up of statistics. The typical delay is about 30 - minutes. - :type stats: ~azure.batch.models.JobScheduleStatistics - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobScheduleState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, - } - - def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None, **kwargs) -> None: - super(CloudJobSchedule, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.url = url - self.e_tag = e_tag - self.last_modified = last_modified - self.creation_time = creation_time - self.state = state - self.state_transition_time = state_transition_time - self.previous_state = previous_state - self.previous_state_transition_time = previous_state_transition_time - self.schedule = schedule - self.job_specification = job_specification - self.execution_info = execution_info - self.metadata = metadata - self.stats = stats - - -class CloudPool(Model): - """A Pool in the Azure Batch service. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 - characters. The ID is case-preserving and case-insensitive (that is, you - may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Pool has changed between requests. In particular, you can be pass the - ETag when updating a Pool to specify that your changes should take effect - only if nobody else has modified the Pool in the meantime. - :type e_tag: str - :param last_modified: This is the last time at which the Pool level data, - such as the targetDedicatedNodes or enableAutoscale settings, changed. It - does not factor in node-level changes such as a Compute Node changing - state. - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param state: Possible values include: 'active', 'deleting' - :type state: str or ~azure.batch.models.PoolState - :param state_transition_time: - :type state_transition_time: datetime - :param allocation_state: Possible values include: 'steady', 'resizing', - 'stopping' - :type allocation_state: str or ~azure.batch.models.AllocationState - :param allocation_state_transition_time: - :type allocation_state_transition_time: datetime - :param vm_size: For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param cloud_service_configuration: The cloud service configuration for - the Pool. This property and virtualMachineConfiguration are mutually - exclusive and one of the properties must be specified. This property - cannot be specified if the Batch Account was created with its - poolAllocationMode property set to 'UserSubscription'. - :type cloud_service_configuration: - ~azure.batch.models.CloudServiceConfiguration - :param virtual_machine_configuration: The virtual machine configuration - for the Pool. This property and cloudServiceConfiguration are mutually - exclusive and one of the properties must be specified. - :type virtual_machine_configuration: - ~azure.batch.models.VirtualMachineConfiguration - :param resize_timeout: This is the timeout for the most recent resize - operation. (The initial sizing when the Pool is created counts as a - resize.) The default value is 15 minutes. - :type resize_timeout: timedelta - :param resize_errors: This property is set only if one or more errors - occurred during the last Pool resize, and only when the Pool - allocationState is Steady. - :type resize_errors: list[~azure.batch.models.ResizeError] - :param current_dedicated_nodes: The number of dedicated Compute Nodes - currently in the Pool. - :type current_dedicated_nodes: int - :param current_low_priority_nodes: The number of Spot/Low-priority Compute - Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have - been preempted are included in this count. - :type current_low_priority_nodes: int - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. - :type target_low_priority_nodes: int - :param enable_auto_scale: Whether the Pool size should automatically - adjust over time. If false, at least one of targetDedicatedNodes and - targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the - formula. The default value is false. - :type enable_auto_scale: bool - :param auto_scale_formula: This property is set only if the Pool - automatically scales, i.e. enableAutoScale is true. - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: This property is set only if the - Pool automatically scales, i.e. enableAutoScale is true. - :type auto_scale_evaluation_interval: timedelta - :param auto_scale_run: The results and errors from the last execution of - the autoscale formula. This property is set only if the Pool automatically - scales, i.e. enableAutoScale is true. - :type auto_scale_run: ~azure.batch.models.AutoScaleRun - :param enable_inter_node_communication: Whether the Pool permits direct - communication between Compute Nodes. This imposes restrictions on which - Compute Nodes can be assigned to the Pool. Specifying this value can - reduce the chance of the requested number of Compute Nodes to be allocated - in the Pool. - :type enable_inter_node_communication: bool - :param network_configuration: The network configuration for the Pool. - :type network_configuration: ~azure.batch.models.NetworkConfiguration - :param start_task: A Task specified to run on each Compute Node as it - joins the Pool. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: Changes to Package references - affect all new Nodes joining the Pool, but do not affect Compute Nodes - that are already in the Pool until they are rebooted or reimaged. There is - a maximum of 10 Package references on any given Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses must be a - subset of available Batch service application licenses. If a license is - requested which is not supported, Pool creation will fail. - :type application_licenses: list[str] - :param task_slots_per_node: The number of task slots that can be used to - run concurrent tasks on a single compute node in the pool. The default - value is 1. The maximum value is the smaller of 4 times the number of - cores of the vmSize of the pool or 256. - :type task_slots_per_node: int - :param task_scheduling_policy: How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. - :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy - :param user_accounts: - :type user_accounts: list[~azure.batch.models.UserAccount] - :param metadata: - :type metadata: list[~azure.batch.models.MetadataItem] - :param stats: Utilization and resource usage statistics for the entire - lifetime of the Pool. This property is populated only if the CloudPool was - retrieved with an expand clause including the 'stats' attribute; otherwise - it is null. The statistics may not be immediately available. The Batch - service performs periodic roll-up of statistics. The typical delay is - about 30 minutes. - :type stats: ~azure.batch.models.PoolStatistics - :param mount_configuration: This supports Azure Files, NFS, CIFS/SMB, and - Blobfuse. - :type mount_configuration: list[~azure.batch.models.MountConfiguration] - :param identity: The identity of the Batch pool, if configured. The list - of user identities associated with the Batch pool. The user identity - dictionary key references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - :type identity: ~azure.batch.models.BatchPoolIdentity - :param target_node_communication_mode: The desired node communication mode - for the pool. If omitted, the default value is Default. Possible values - include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :ivar current_node_communication_mode: The current state of the pool - communication mode. Possible values include: 'default', 'classic', - 'simplified' - :vartype current_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :param upgrade_policy: The upgrade policy for the Pool. Describes an - upgrade policy - automatic, manual, or rolling. - :type upgrade_policy: ~azure.batch.models.UpgradePolicy - :param resource_tags: The user-defined tags to be associated with the - Azure Batch Pool. When specified, these tags are propagated to the backing - Azure resources associated with the pool. This property can only be - specified when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - :type resource_tags: dict[str, str] - """ - - _validation = { - 'current_node_communication_mode': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'PoolState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, - 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, - 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, - 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, - 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, - 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, - 'task_slots_per_node': {'key': 'taskSlotsPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, - 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'identity': {'key': 'identity', 'type': 'BatchPoolIdentity'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'current_node_communication_mode': {'key': 'currentNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'upgrade_policy': {'key': 'upgradePolicy', 'type': 'UpgradePolicy'}, - 'resource_tags': {'key': 'resourceTags', 'type': '{str}'}, - } - - def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_errors=None, current_dedicated_nodes: int=None, current_low_priority_nodes: int=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, task_slots_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, stats=None, mount_configuration=None, identity=None, target_node_communication_mode=None, upgrade_policy=None, resource_tags=None, **kwargs) -> None: - super(CloudPool, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.url = url - self.e_tag = e_tag - self.last_modified = last_modified - self.creation_time = creation_time - self.state = state - self.state_transition_time = state_transition_time - self.allocation_state = allocation_state - self.allocation_state_transition_time = allocation_state_transition_time - self.vm_size = vm_size - self.cloud_service_configuration = cloud_service_configuration - self.virtual_machine_configuration = virtual_machine_configuration - self.resize_timeout = resize_timeout - self.resize_errors = resize_errors - self.current_dedicated_nodes = current_dedicated_nodes - self.current_low_priority_nodes = current_low_priority_nodes - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.enable_auto_scale = enable_auto_scale - self.auto_scale_formula = auto_scale_formula - self.auto_scale_evaluation_interval = auto_scale_evaluation_interval - self.auto_scale_run = auto_scale_run - self.enable_inter_node_communication = enable_inter_node_communication - self.network_configuration = network_configuration - self.start_task = start_task - self.certificate_references = certificate_references - self.application_package_references = application_package_references - self.application_licenses = application_licenses - self.task_slots_per_node = task_slots_per_node - self.task_scheduling_policy = task_scheduling_policy - self.user_accounts = user_accounts - self.metadata = metadata - self.stats = stats - self.mount_configuration = mount_configuration - self.identity = identity - self.target_node_communication_mode = target_node_communication_mode - self.current_node_communication_mode = None - self.upgrade_policy = upgrade_policy - self.resource_tags = resource_tags - - -class CloudServiceConfiguration(Model): - """The configuration for Compute Nodes in a Pool based on the Azure Cloud - Services platform. - - All required parameters must be populated in order to send to Azure. - - :param os_family: Required. Possible values are: - 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. - 3 - OS Family 3, equivalent to Windows Server 2012. - 4 - OS Family 4, equivalent to Windows Server 2012 R2. - 5 - OS Family 5, equivalent to Windows Server 2016. - 6 - OS Family 6, equivalent to Windows Server 2019. For more information, - see Azure Guest OS Releases - (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). - :type os_family: str - :param os_version: The default value is * which specifies the latest - operating system version for the specified OS family. - :type os_version: str - """ - - _validation = { - 'os_family': {'required': True}, - } - - _attribute_map = { - 'os_family': {'key': 'osFamily', 'type': 'str'}, - 'os_version': {'key': 'osVersion', 'type': 'str'}, - } - - def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None: - super(CloudServiceConfiguration, self).__init__(**kwargs) - self.os_family = os_family - self.os_version = os_version - - -class CloudTask(Model): - """An Azure Batch Task. - - Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host - failure. Retries due to recovery operations are independent of and are not - counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, - an internal retry due to a recovery operation may occur. Because of this, - all Tasks should be idempotent. This means Tasks need to tolerate being - interrupted and restarted without causing any corruption or duplicate data. - The best practice for long running Tasks is to use some form of - checkpointing. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 - characters. - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param url: - :type url: str - :param e_tag: This is an opaque string. You can use it to detect whether - the Task has changed between requests. In particular, you can be pass the - ETag when updating a Task to specify that your changes should take effect - only if nobody else has modified the Task in the meantime. - :type e_tag: str - :param last_modified: - :type last_modified: datetime - :param creation_time: - :type creation_time: datetime - :param exit_conditions: How the Batch service should respond when the Task - completes. - :type exit_conditions: ~azure.batch.models.ExitConditions - :param state: The current state of the Task. Possible values include: - 'active', 'preparing', 'running', 'completed' - :type state: str or ~azure.batch.models.TaskState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the Task. This property is - not set if the Task is in its initial Active state. Possible values - include: 'active', 'preparing', 'running', 'completed' - :type previous_state: str or ~azure.batch.models.TaskState - :param previous_state_transition_time: This property is not set if the - Task is in its initial Active state. - :type previous_state_transition_time: datetime - :param command_line: For multi-instance Tasks, the command line is - executed as the primary Task, after the primary Task and all subtasks have - finished executing the coordination command line. The command line does - not run under a shell, and therefore cannot take advantage of shell - features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should - use a relative path (relative to the Task working directory), or use the - Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Task runs. If the Pool that will run this Task has containerConfiguration - set, this must be set as well. If the Pool that will run this Task doesn't - have containerConfiguration set, this must not be set. When this is - specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR - (the root of Azure Batch directories on the node) are mapped into the - container, all Task environment variables are mapped into the container, - and the Task command line is executed in the container. Files produced in - the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to - the host disk, meaning that Batch file APIs will not be able to access - those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: For multi-instance Tasks, the resource files will - only be downloaded to the Compute Node on which the primary Task is - executed. There is a maximum size for the list of resource files. When - the max size is exceeded, the request will fail and the response error - code will be RequestEntityTooLarge. If this occurs, the collection of - ResourceFiles must be reduced in size. This can be achieved using .zip - files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param output_files: For multi-instance Tasks, the files will only be - uploaded from the Compute Node on which the primary Task is executed. - :type output_files: list[~azure.batch.models.OutputFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param affinity_info: A locality hint that can be used by the Batch - service to select a Compute Node on which to start the new Task. - :type affinity_info: ~azure.batch.models.AffinityInformation - :param constraints: The execution constraints that apply to this Task. - :type constraints: ~azure.batch.models.TaskConstraints - :param required_slots: The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on - a compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - :type required_slots: int - :param user_identity: The user identity under which the Task runs. If - omitted, the Task runs as a non-administrative user unique to the Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param execution_info: Information about the execution of the Task. - :type execution_info: ~azure.batch.models.TaskExecutionInformation - :param node_info: Information about the Compute Node on which the Task - ran. - :type node_info: ~azure.batch.models.ComputeNodeInformation - :param multi_instance_settings: An object that indicates that the Task is - a multi-instance Task, and contains information about how to run the - multi-instance Task. - :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings - :param stats: Resource usage statistics for the Task. - :type stats: ~azure.batch.models.TaskStatistics - :param depends_on: The Tasks that this Task depends on. This Task will not - be scheduled until all Tasks that it depends on have completed - successfully. If any of those Tasks fail and exhaust their retry counts, - this Task will never be scheduled. - :type depends_on: ~azure.batch.models.TaskDependencies - :param application_package_references: Application packages are downloaded - and deployed to a shared directory, not the Task working directory. - Therefore, if a referenced package is already on the Node, and is up to - date, then it is not re-downloaded; the existing copy on the Compute Node - is used. If a referenced Package cannot be installed, for example because - the package has been deleted or because download failed, the Task fails. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param authentication_token_settings: The settings for an authentication - token that the Task can use to perform Batch service operations. If this - property is set, the Batch service provides the Task with an - authentication token which can be used to authenticate Batch service - operations without requiring an Account access key. The token is provided - via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations - that the Task can carry out using the token depend on the settings. For - example, a Task can request Job permissions in order to add other Tasks to - the Job, or check the status of the Job or of other Tasks under the Job. - :type authentication_token_settings: - ~azure.batch.models.AuthenticationTokenSettings - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, - 'state': {'key': 'state', 'type': 'TaskState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'required_slots': {'key': 'requiredSlots', 'type': 'int'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, - 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, - 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, - 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, - 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - } - - def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, required_slots: int=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: - super(CloudTask, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.url = url - self.e_tag = e_tag - self.last_modified = last_modified - self.creation_time = creation_time - self.exit_conditions = exit_conditions - self.state = state - self.state_transition_time = state_transition_time - self.previous_state = previous_state - self.previous_state_transition_time = previous_state_transition_time - self.command_line = command_line - self.container_settings = container_settings - self.resource_files = resource_files - self.output_files = output_files - self.environment_settings = environment_settings - self.affinity_info = affinity_info - self.constraints = constraints - self.required_slots = required_slots - self.user_identity = user_identity - self.execution_info = execution_info - self.node_info = node_info - self.multi_instance_settings = multi_instance_settings - self.stats = stats - self.depends_on = depends_on - self.application_package_references = application_package_references - self.authentication_token_settings = authentication_token_settings - - -class CloudTaskListSubtasksResult(Model): - """The result of listing the subtasks of a Task. - - :param value: - :type value: list[~azure.batch.models.SubtaskInformation] - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, - } - - def __init__(self, *, value=None, **kwargs) -> None: - super(CloudTaskListSubtasksResult, self).__init__(**kwargs) - self.value = value - - -class ComputeNode(Model): - """A Compute Node in the Batch service. - - :param id: Every Compute Node that is added to a Pool is assigned a unique - ID. Whenever a Compute Node is removed from a Pool, all of its local files - are deleted, and the ID is reclaimed and could be reused for new Compute - Nodes. - :type id: str - :param url: - :type url: str - :param state: The Spot/Low-priority Compute Node has been preempted. Tasks - which were running on the Compute Node when it was preempted will be - rescheduled when another Compute Node becomes available. Possible values - include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', - 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', - 'unknown', 'leavingPool', 'offline', 'preempted', 'upgradingOS' - :type state: str or ~azure.batch.models.ComputeNodeState - :param scheduling_state: Possible values include: 'enabled', 'disabled' - :type scheduling_state: str or ~azure.batch.models.SchedulingState - :param state_transition_time: - :type state_transition_time: datetime - :param last_boot_time: This property may not be present if the Compute - Node state is unusable. - :type last_boot_time: datetime - :param allocation_time: This is the time when the Compute Node was - initially allocated and doesn't change once set. It is not updated when - the Compute Node is service healed or preempted. - :type allocation_time: datetime - :param ip_address: Every Compute Node that is added to a Pool is assigned - a unique IP address. Whenever a Compute Node is removed from a Pool, all - of its local files are deleted, and the IP address is reclaimed and could - be reused for new Compute Nodes. - :type ip_address: str - :param affinity_id: Note that this is just a soft affinity. If the target - Compute Node is busy or unavailable at the time the Task is scheduled, - then the Task will be scheduled elsewhere. - :type affinity_id: str - :param vm_size: For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param total_tasks_run: The total number of Job Tasks completed on the - Compute Node. This includes Job Manager Tasks and normal Tasks, but not - Job Preparation, Job Release or Start Tasks. - :type total_tasks_run: int - :param running_tasks_count: The total number of currently running Job - Tasks on the Compute Node. This includes Job Manager Tasks and normal - Tasks, but not Job Preparation, Job Release or Start Tasks. - :type running_tasks_count: int - :param running_task_slots_count: The total number of scheduling slots used - by currently running Job Tasks on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or - Start Tasks. - :type running_task_slots_count: int - :param total_tasks_succeeded: The total number of Job Tasks which - completed successfully (with exitCode 0) on the Compute Node. This - includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job - Release or Start Tasks. - :type total_tasks_succeeded: int - :param recent_tasks: This property is present only if at least one Task - has run on this Compute Node since it was assigned to the Pool. - :type recent_tasks: list[~azure.batch.models.TaskInformation] - :param start_task: The Task specified to run on the Compute Node as it - joins the Pool. - :type start_task: ~azure.batch.models.StartTask - :param start_task_info: Runtime information about the execution of the - StartTask on the Compute Node. - :type start_task_info: ~azure.batch.models.StartTaskInformation - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param errors: - :type errors: list[~azure.batch.models.ComputeNodeError] - :param is_dedicated: Whether this Compute Node is a dedicated Compute - Node. If false, the Compute Node is a Spot/Low-priority Compute Node. - :type is_dedicated: bool - :param endpoint_configuration: The endpoint configuration for the Compute - Node. - :type endpoint_configuration: - ~azure.batch.models.ComputeNodeEndpointConfiguration - :param node_agent_info: Information about the Compute Node agent version - and the time the Compute Node upgraded to a new version. - :type node_agent_info: ~azure.batch.models.NodeAgentInformation - :param virtual_machine_info: Info about the current state of the virtual - machine. - :type virtual_machine_info: ~azure.batch.models.VirtualMachineInfo - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'state': {'key': 'state', 'type': 'ComputeNodeState'}, - 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, - 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, - 'ip_address': {'key': 'ipAddress', 'type': 'str'}, - 'affinity_id': {'key': 'affinityId', 'type': 'str'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, - 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, - 'running_task_slots_count': {'key': 'runningTaskSlotsCount', 'type': 'int'}, - 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, - 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, - 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, - 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, - 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, - 'virtual_machine_info': {'key': 'virtualMachineInfo', 'type': 'VirtualMachineInfo'}, - } - - def __init__(self, *, id: str=None, url: str=None, state=None, scheduling_state=None, state_transition_time=None, last_boot_time=None, allocation_time=None, ip_address: str=None, affinity_id: str=None, vm_size: str=None, total_tasks_run: int=None, running_tasks_count: int=None, running_task_slots_count: int=None, total_tasks_succeeded: int=None, recent_tasks=None, start_task=None, start_task_info=None, certificate_references=None, errors=None, is_dedicated: bool=None, endpoint_configuration=None, node_agent_info=None, virtual_machine_info=None, **kwargs) -> None: - super(ComputeNode, self).__init__(**kwargs) - self.id = id - self.url = url - self.state = state - self.scheduling_state = scheduling_state - self.state_transition_time = state_transition_time - self.last_boot_time = last_boot_time - self.allocation_time = allocation_time - self.ip_address = ip_address - self.affinity_id = affinity_id - self.vm_size = vm_size - self.total_tasks_run = total_tasks_run - self.running_tasks_count = running_tasks_count - self.running_task_slots_count = running_task_slots_count - self.total_tasks_succeeded = total_tasks_succeeded - self.recent_tasks = recent_tasks - self.start_task = start_task - self.start_task_info = start_task_info - self.certificate_references = certificate_references - self.errors = errors - self.is_dedicated = is_dedicated - self.endpoint_configuration = endpoint_configuration - self.node_agent_info = node_agent_info - self.virtual_machine_info = virtual_machine_info - - -class ComputeNodeAddUserOptions(Model): - """Additional parameters for add_user operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeAddUserOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeDeleteUserOptions(Model): - """Additional parameters for delete_user operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeDisableSchedulingOptions(Model): - """Additional parameters for disable_scheduling operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeEnableSchedulingOptions(Model): - """Additional parameters for enable_scheduling operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeEndpointConfiguration(Model): - """The endpoint configuration for the Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param inbound_endpoints: Required. - :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] - """ - - _validation = { - 'inbound_endpoints': {'required': True}, - } - - _attribute_map = { - 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, - } - - def __init__(self, *, inbound_endpoints, **kwargs) -> None: - super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) - self.inbound_endpoints = inbound_endpoints - - -class ComputeNodeError(Model): - """An error encountered by a Compute Node. - - :param code: - :type code: str - :param message: - :type message: str - :param error_details: - :type error_details: list[~azure.batch.models.NameValuePair] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, - } - - def __init__(self, *, code: str=None, message: str=None, error_details=None, **kwargs) -> None: - super(ComputeNodeError, self).__init__(**kwargs) - self.code = code - self.message = message - self.error_details = error_details - - -class ComputeNodeExtensionGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeExtensionGetOptions, self).__init__(**kwargs) - self.select = select - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeExtensionListOptions(Model): - """Additional parameters for list operation. - - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeExtensionListOptions, self).__init__(**kwargs) - self.select = select - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeGetOptions, self).__init__(**kwargs) - self.select = select - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeGetRemoteDesktopOptions(Model): - """Additional parameters for get_remote_desktop operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeGetRemoteLoginSettingsOptions(Model): - """Additional parameters for get_remote_login_settings operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeGetRemoteLoginSettingsResult(Model): - """The remote login settings for a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param remote_login_ip_address: Required. - :type remote_login_ip_address: str - :param remote_login_port: Required. The port used for remote login to the - Compute Node. - :type remote_login_port: int - """ - - _validation = { - 'remote_login_ip_address': {'required': True}, - 'remote_login_port': {'required': True}, - } - - _attribute_map = { - 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, - 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, - } - - def __init__(self, *, remote_login_ip_address: str, remote_login_port: int, **kwargs) -> None: - super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) - self.remote_login_ip_address = remote_login_ip_address - self.remote_login_port = remote_login_port - - -class ComputeNodeIdentityReference(Model): - """The reference to a user assigned identity associated with the Batch pool - which a compute node will use. - - :param resource_id: The ARM resource id of the user assigned identity. - :type resource_id: str - """ - - _attribute_map = { - 'resource_id': {'key': 'resourceId', 'type': 'str'}, - } - - def __init__(self, *, resource_id: str=None, **kwargs) -> None: - super(ComputeNodeIdentityReference, self).__init__(**kwargs) - self.resource_id = resource_id - - -class ComputeNodeInformation(Model): - """Information about the Compute Node on which a Task ran. - - :param affinity_id: - :type affinity_id: str - :param node_url: - :type node_url: str - :param pool_id: - :type pool_id: str - :param node_id: - :type node_id: str - :param task_root_directory: - :type task_root_directory: str - :param task_root_directory_url: - :type task_root_directory_url: str - """ - - _attribute_map = { - 'affinity_id': {'key': 'affinityId', 'type': 'str'}, - 'node_url': {'key': 'nodeUrl', 'type': 'str'}, - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'node_id': {'key': 'nodeId', 'type': 'str'}, - 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, - 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, - } - - def __init__(self, *, affinity_id: str=None, node_url: str=None, pool_id: str=None, node_id: str=None, task_root_directory: str=None, task_root_directory_url: str=None, **kwargs) -> None: - super(ComputeNodeInformation, self).__init__(**kwargs) - self.affinity_id = affinity_id - self.node_url = node_url - self.pool_id = pool_id - self.node_id = node_id - self.task_root_directory = task_root_directory - self.task_root_directory_url = task_root_directory_url - - -class ComputeNodeListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeListOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeRebootOptions(Model): - """Additional parameters for reboot operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeRebootOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeReimageOptions(Model): - """Additional parameters for reimage operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeReimageOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeUpdateUserOptions(Model): - """Additional parameters for update_user operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeUploadBatchServiceLogsOptions(Model): - """Additional parameters for upload_batch_service_logs operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class ComputeNodeUser(Model): - """A user Account for RDP or SSH access on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param is_admin: Whether the Account should be an administrator on the - Compute Node. The default value is false. - :type is_admin: bool - :param expiry_time: If omitted, the default is 1 day from the current - time. For Linux Compute Nodes, the expiryTime has a precision up to a day. - :type expiry_time: datetime - :param password: The password is required for Windows Compute Nodes (those - created with 'cloudServiceConfiguration', or created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux - Compute Nodes, the password can optionally be specified along with the - sshPublicKey property. - :type password: str - :param ssh_public_key: The public key should be compatible with OpenSSH - encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute - Node, then the Batch service rejects the request; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - :type ssh_public_key: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, - 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, - 'password': {'key': 'password', 'type': 'str'}, - 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, - } - - def __init__(self, *, name: str, is_admin: bool=None, expiry_time=None, password: str=None, ssh_public_key: str=None, **kwargs) -> None: - super(ComputeNodeUser, self).__init__(**kwargs) - self.name = name - self.is_admin = is_admin - self.expiry_time = expiry_time - self.password = password - self.ssh_public_key = ssh_public_key - - -class ContainerConfiguration(Model): - """The configuration for container-enabled Pools. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. Possible values include: 'dockerCompatible', - 'criCompatible' - :type type: str or ~azure.batch.models.ContainerType - :param container_image_names: This is the full Image reference, as would - be specified to "docker pull". An Image will be sourced from the default - Docker registry unless the Image is fully qualified with an alternative - registry. - :type container_image_names: list[str] - :param container_registries: If any Images must be downloaded from a - private registry which requires credentials, then those credentials must - be provided here. - :type container_registries: list[~azure.batch.models.ContainerRegistry] - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'str'}, - 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, - 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, - } - - def __init__(self, *, type, container_image_names=None, container_registries=None, **kwargs) -> None: - super(ContainerConfiguration, self).__init__(**kwargs) - self.type = type - self.container_image_names = container_image_names - self.container_registries = container_registries - - -class ContainerRegistry(Model): - """A private container registry. - - :param user_name: - :type user_name: str - :param password: - :type password: str - :param registry_server: If omitted, the default is "docker.io". - :type registry_server: str - :param identity_reference: The reference to the user assigned identity to - use to access an Azure Container Registry instead of username and - password. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _attribute_map = { - 'user_name': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - 'registry_server': {'key': 'registryServer', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, *, user_name: str=None, password: str=None, registry_server: str=None, identity_reference=None, **kwargs) -> None: - super(ContainerRegistry, self).__init__(**kwargs) - self.user_name = user_name - self.password = password - self.registry_server = registry_server - self.identity_reference = identity_reference - - -class DataDisk(Model): - """Settings which will be used by the data disks associated to Compute Nodes - in the Pool. When using attached data disks, you need to mount and format - the disks from within a VM to use them. - - All required parameters must be populated in order to send to Azure. - - :param lun: Required. The logical unit number. The lun is used to uniquely - identify each data disk. If attaching multiple disks, each should have a - distinct lun. The value must be between 0 and 63, inclusive. - :type lun: int - :param caching: The type of caching to be enabled for the data disks. The - default value for caching is readwrite. For information about the caching - options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Possible values include: 'none', 'readOnly', 'readWrite' - :type caching: str or ~azure.batch.models.CachingType - :param disk_size_gb: Required. The initial disk size in gigabytes. - :type disk_size_gb: int - :param storage_account_type: The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". Possible values - include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS' - :type storage_account_type: str or ~azure.batch.models.StorageAccountType - """ - - _validation = { - 'lun': {'required': True}, - 'disk_size_gb': {'required': True}, - } - - _attribute_map = { - 'lun': {'key': 'lun', 'type': 'int'}, - 'caching': {'key': 'caching', 'type': 'CachingType'}, - 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, - 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, - } - - def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None: - super(DataDisk, self).__init__(**kwargs) - self.lun = lun - self.caching = caching - self.disk_size_gb = disk_size_gb - self.storage_account_type = storage_account_type - - -class DeleteCertificateError(Model): - """An error encountered by the Batch service when deleting a Certificate. - - :param code: - :type code: str - :param message: - :type message: str - :param values: This list includes details such as the active Pools and - Compute Nodes referencing this Certificate. However, if a large number of - resources reference the Certificate, the list contains only about the - first hundred. - :type values: list[~azure.batch.models.NameValuePair] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[NameValuePair]'}, - } - - def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: - super(DeleteCertificateError, self).__init__(**kwargs) - self.code = code - self.message = message - self.values = values - - -class DiffDiskSettings(Model): - """Specifies the ephemeral Disk Settings for the operating system disk used by - the compute node (VM). - - :param placement: Specifies the ephemeral disk placement for operating - system disk for all VMs in the pool. This property can be used by user in - the request to choose the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral OS disk size - requirements, please refer to Ephemeral OS disk size requirements for - Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - Possible values include: 'CacheDisk' - :type placement: str or ~azure.batch.models.DiffDiskPlacement - """ - - _attribute_map = { - 'placement': {'key': 'placement', 'type': 'DiffDiskPlacement'}, - } - - def __init__(self, *, placement=None, **kwargs) -> None: - super(DiffDiskSettings, self).__init__(**kwargs) - self.placement = placement - - -class DiskEncryptionConfiguration(Model): - """The disk encryption configuration applied on compute nodes in the pool. - Disk encryption configuration is not supported on Linux pool created with - Azure Compute Gallery Image. - - :param targets: If omitted, no disks on the compute nodes in the pool will - be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - :type targets: list[str or ~azure.batch.models.DiskEncryptionTarget] - """ - - _attribute_map = { - 'targets': {'key': 'targets', 'type': '[DiskEncryptionTarget]'}, - } - - def __init__(self, *, targets=None, **kwargs) -> None: - super(DiskEncryptionConfiguration, self).__init__(**kwargs) - self.targets = targets - - -class EnvironmentSetting(Model): - """An environment variable to be set on a Task process. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param value: - :type value: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, *, name: str, value: str=None, **kwargs) -> None: - super(EnvironmentSetting, self).__init__(**kwargs) - self.name = name - self.value = value - - -class ErrorMessage(Model): - """An error message received in an Azure Batch error response. - - :param lang: - :type lang: str - :param value: - :type value: str - """ - - _attribute_map = { - 'lang': {'key': 'lang', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, *, lang: str=None, value: str=None, **kwargs) -> None: - super(ErrorMessage, self).__init__(**kwargs) - self.lang = lang - self.value = value - - -class ExitCodeMapping(Model): - """How the Batch service should respond if a Task exits with a particular exit - code. - - All required parameters must be populated in order to send to Azure. - - :param code: Required. A process exit code. - :type code: int - :param exit_options: Required. How the Batch service should respond if the - Task exits with this exit code. - :type exit_options: ~azure.batch.models.ExitOptions - """ - - _validation = { - 'code': {'required': True}, - 'exit_options': {'required': True}, - } - - _attribute_map = { - 'code': {'key': 'code', 'type': 'int'}, - 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, - } - - def __init__(self, *, code: int, exit_options, **kwargs) -> None: - super(ExitCodeMapping, self).__init__(**kwargs) - self.code = code - self.exit_options = exit_options - - -class ExitCodeRangeMapping(Model): - """A range of exit codes and how the Batch service should respond to exit - codes within that range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The first exit code in the range. - :type start: int - :param end: Required. The last exit code in the range. - :type end: int - :param exit_options: Required. How the Batch service should respond if the - Task exits with an exit code in the range start to end (inclusive). - :type exit_options: ~azure.batch.models.ExitOptions - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - 'exit_options': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'start', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, - } - - def __init__(self, *, start: int, end: int, exit_options, **kwargs) -> None: - super(ExitCodeRangeMapping, self).__init__(**kwargs) - self.start = start - self.end = end - self.exit_options = exit_options - - -class ExitConditions(Model): - """Specifies how the Batch service should respond when the Task completes. - - :param exit_codes: - :type exit_codes: list[~azure.batch.models.ExitCodeMapping] - :param exit_code_ranges: - :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] - :param pre_processing_error: How the Batch service should respond if the - Task fails to start due to an error. - :type pre_processing_error: ~azure.batch.models.ExitOptions - :param file_upload_error: How the Batch service should respond if a file - upload error occurs. If the Task exited with an exit code that was - specified via exitCodes or exitCodeRanges, and then encountered a file - upload error, then the action specified by the exit code takes precedence. - :type file_upload_error: ~azure.batch.models.ExitOptions - :param default: How the Batch service should respond if the Task fails - with an exit condition not covered by any of the other properties. This - value is used if the Task exits with any nonzero exit code not listed in - the exitCodes or exitCodeRanges collection, with a pre-processing error if - the preProcessingError property is not present, or with a file upload - error if the fileUploadError property is not present. If you want - non-default behavior on exit code 0, you must list it explicitly using the - exitCodes or exitCodeRanges collection. - :type default: ~azure.batch.models.ExitOptions - """ - - _attribute_map = { - 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, - 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, - 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, - 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, - 'default': {'key': 'default', 'type': 'ExitOptions'}, - } - - def __init__(self, *, exit_codes=None, exit_code_ranges=None, pre_processing_error=None, file_upload_error=None, default=None, **kwargs) -> None: - super(ExitConditions, self).__init__(**kwargs) - self.exit_codes = exit_codes - self.exit_code_ranges = exit_code_ranges - self.pre_processing_error = pre_processing_error - self.file_upload_error = file_upload_error - self.default = default - - -class ExitOptions(Model): - """Specifies how the Batch service responds to a particular exit condition. - - :param job_action: The default is none for exit code 0 and terminate for - all other exit conditions. If the Job's onTaskFailed property is noaction, - then specifying this property returns an error and the add Task request - fails with an invalid property value error; if you are calling the REST - API directly, the HTTP status code is 400 (Bad Request). Possible values - include: 'none', 'disable', 'terminate' - :type job_action: str or ~azure.batch.models.JobAction - :param dependency_action: Possible values are 'satisfy' (allowing - dependent tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent tasks. - Possible values include: 'satisfy', 'block' - :type dependency_action: str or ~azure.batch.models.DependencyAction - """ - - _attribute_map = { - 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, - 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, - } - - def __init__(self, *, job_action=None, dependency_action=None, **kwargs) -> None: - super(ExitOptions, self).__init__(**kwargs) - self.job_action = job_action - self.dependency_action = dependency_action - - -class FileDeleteFromComputeNodeOptions(Model): - """Additional parameters for delete_from_compute_node operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class FileDeleteFromTaskOptions(Model): - """Additional parameters for delete_from_task operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(FileDeleteFromTaskOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class FileGetFromComputeNodeOptions(Model): - """Additional parameters for get_from_compute_node operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param ocp_range: The byte range to be retrieved. The default is to - retrieve the entire file. The format is bytes=startRange-endRange. - :type ocp_range: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'ocp_range': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.ocp_range = ocp_range - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class FileGetFromTaskOptions(Model): - """Additional parameters for get_from_task operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param ocp_range: The byte range to be retrieved. The default is to - retrieve the entire file. The format is bytes=startRange-endRange. - :type ocp_range: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'ocp_range': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(FileGetFromTaskOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.ocp_range = ocp_range - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class FileGetPropertiesFromComputeNodeOptions(Model): - """Additional parameters for get_properties_from_compute_node operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class FileGetPropertiesFromTaskOptions(Model): - """Additional parameters for get_properties_from_task operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class FileListFromComputeNodeOptions(Model): - """Additional parameters for list_from_compute_node operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 files can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(FileListFromComputeNodeOptions, self).__init__(**kwargs) - self.filter = filter - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class FileListFromTaskOptions(Model): - """Additional parameters for list_from_task operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 files can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(FileListFromTaskOptions, self).__init__(**kwargs) - self.filter = filter - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class FileProperties(Model): - """The properties of a file on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: The creation time is not returned for files on Linux - Compute Nodes. - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param content_length: Required. The length of the file. - :type content_length: long - :param content_type: - :type content_type: str - :param file_mode: The file mode is returned only for files on Linux - Compute Nodes. - :type file_mode: str - """ - - _validation = { - 'last_modified': {'required': True}, - 'content_length': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'file_mode': {'key': 'fileMode', 'type': 'str'}, - } - - def __init__(self, *, last_modified, content_length: int, creation_time=None, content_type: str=None, file_mode: str=None, **kwargs) -> None: - super(FileProperties, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.content_length = content_length - self.content_type = content_type - self.file_mode = file_mode - - -class HttpHeader(Model): - """An HTTP header name-value pair. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param value: - :type value: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, *, name: str, value: str=None, **kwargs) -> None: - super(HttpHeader, self).__init__(**kwargs) - self.name = name - self.value = value - - -class ImageInformation(Model): - """A reference to the Azure Virtual Machines Marketplace Image and additional - information about the Image. - - All required parameters must be populated in order to send to Azure. - - :param node_agent_sku_id: Required. - :type node_agent_sku_id: str - :param image_reference: Required. The reference to the Azure Virtual - Machine's Marketplace Image. - :type image_reference: ~azure.batch.models.ImageReference - :param os_type: Required. Possible values include: 'linux', 'windows' - :type os_type: str or ~azure.batch.models.OSType - :param capabilities: Not every capability of the Image is listed. - Capabilities in this list are considered of special interest and are - generally related to integration with other features in the Azure Batch - service. - :type capabilities: list[str] - :param batch_support_end_of_life: - :type batch_support_end_of_life: datetime - :param verification_type: Required. Possible values include: 'verified', - 'unverified' - :type verification_type: str or ~azure.batch.models.VerificationType - """ - - _validation = { - 'node_agent_sku_id': {'required': True}, - 'image_reference': {'required': True}, - 'os_type': {'required': True}, - 'verification_type': {'required': True}, - } - - _attribute_map = { - 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'os_type': {'key': 'osType', 'type': 'OSType'}, - 'capabilities': {'key': 'capabilities', 'type': '[str]'}, - 'batch_support_end_of_life': {'key': 'batchSupportEndOfLife', 'type': 'iso-8601'}, - 'verification_type': {'key': 'verificationType', 'type': 'VerificationType'}, - } - - def __init__(self, *, node_agent_sku_id: str, image_reference, os_type, verification_type, capabilities=None, batch_support_end_of_life=None, **kwargs) -> None: - super(ImageInformation, self).__init__(**kwargs) - self.node_agent_sku_id = node_agent_sku_id - self.image_reference = image_reference - self.os_type = os_type - self.capabilities = capabilities - self.batch_support_end_of_life = batch_support_end_of_life - self.verification_type = verification_type - - -class ImageReference(Model): - """A reference to an Azure Virtual Machines Marketplace Image or a Azure - Compute Gallery Image. To get the list of all Azure Marketplace Image - references verified by Azure Batch, see the 'List Supported Images' - operation. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param publisher: For example, Canonical or MicrosoftWindowsServer. - :type publisher: str - :param offer: For example, UbuntuServer or WindowsServer. - :type offer: str - :param sku: For example, 18.04-LTS or 2019-Datacenter. - :type sku: str - :param version: A value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - :type version: str - :param virtual_machine_image_id: This property is mutually exclusive with - other ImageReference properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same subscription as the - Azure Batch account. If the image version is not specified in the imageId, - the latest version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - :type virtual_machine_image_id: str - :ivar exact_version: The specific version of the platform image or - marketplace image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when the pool was - created was 'latest'. - :vartype exact_version: str - """ - - _validation = { - 'exact_version': {'readonly': True}, - } - - _attribute_map = { - 'publisher': {'key': 'publisher', 'type': 'str'}, - 'offer': {'key': 'offer', 'type': 'str'}, - 'sku': {'key': 'sku', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, - 'exact_version': {'key': 'exactVersion', 'type': 'str'}, - } - - def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None: - super(ImageReference, self).__init__(**kwargs) - self.publisher = publisher - self.offer = offer - self.sku = sku - self.version = version - self.virtual_machine_image_id = virtual_machine_image_id - self.exact_version = None - - -class InboundEndpoint(Model): - """An inbound endpoint on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param protocol: Required. The protocol of the endpoint. Possible values - include: 'tcp', 'udp' - :type protocol: str or ~azure.batch.models.InboundEndpointProtocol - :param public_ip_address: Required. - :type public_ip_address: str - :param public_fqdn: Required. - :type public_fqdn: str - :param frontend_port: Required. The public port number of the endpoint. - :type frontend_port: int - :param backend_port: Required. The backend port number of the endpoint. - :type backend_port: int - """ - - _validation = { - 'name': {'required': True}, - 'protocol': {'required': True}, - 'public_ip_address': {'required': True}, - 'public_fqdn': {'required': True}, - 'frontend_port': {'required': True}, - 'backend_port': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, - 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, - 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, - 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, - 'backend_port': {'key': 'backendPort', 'type': 'int'}, - } - - def __init__(self, *, name: str, protocol, public_ip_address: str, public_fqdn: str, frontend_port: int, backend_port: int, **kwargs) -> None: - super(InboundEndpoint, self).__init__(**kwargs) - self.name = name - self.protocol = protocol - self.public_ip_address = public_ip_address - self.public_fqdn = public_fqdn - self.frontend_port = frontend_port - self.backend_port = backend_port - - -class InboundNATPool(Model): - """A inbound NAT Pool that can be used to address specific ports on Compute - Nodes in a Batch Pool externally. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name must be unique within a Batch Pool, can - contain letters, numbers, underscores, periods, and hyphens. Names must - start with a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid values are - provided the request fails with HTTP status code 400. - :type name: str - :param protocol: Required. The protocol of the endpoint. Possible values - include: 'tcp', 'udp' - :type protocol: str or ~azure.batch.models.InboundEndpointProtocol - :param backend_port: Required. The port number on the Compute Node. This - must be unique within a Batch Pool. Acceptable values are between 1 and - 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - reserved values are provided the request fails with HTTP status code 400. - :type backend_port: int - :param frontend_port_range_start: Required. The first port number in the - range of external ports that will be used to provide inbound access to the - backendPort on individual Compute Nodes. Acceptable values range between 1 - and 65534 except ports from 50000 to 55000 which are reserved. All ranges - within a Pool must be distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping values are provided the - request fails with HTTP status code 400. - :type frontend_port_range_start: int - :param frontend_port_range_end: Required. The last port number in the - range of external ports that will be used to provide inbound access to the - backendPort on individual Compute Nodes. Acceptable values range between 1 - and 65534 except ports from 50000 to 55000 which are reserved by the Batch - service. All ranges within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP status code 400. - :type frontend_port_range_end: int - :param network_security_group_rules: The maximum number of rules that can - be specified across all the endpoints on a Batch Pool is 25. If no network - security group rules are specified, a default rule will be created to - allow inbound access to the specified backendPort. If the maximum number - of network security group rules is exceeded the request fails with HTTP - status code 400. - :type network_security_group_rules: - list[~azure.batch.models.NetworkSecurityGroupRule] - """ - - _validation = { - 'name': {'required': True}, - 'protocol': {'required': True}, - 'backend_port': {'required': True}, - 'frontend_port_range_start': {'required': True}, - 'frontend_port_range_end': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, - 'backend_port': {'key': 'backendPort', 'type': 'int'}, - 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, - 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, - 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, - } - - def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None: - super(InboundNATPool, self).__init__(**kwargs) - self.name = name - self.protocol = protocol - self.backend_port = backend_port - self.frontend_port_range_start = frontend_port_range_start - self.frontend_port_range_end = frontend_port_range_end - self.network_security_group_rules = network_security_group_rules - - -class InstanceViewStatus(Model): - """The instance view status. - - :param code: - :type code: str - :param display_status: - :type display_status: str - :param level: Possible values include: 'Error', 'Info', 'Warning' - :type level: str or ~azure.batch.models.StatusLevelTypes - :param message: - :type message: str - :param time: The time of the status. - :type time: str - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'display_status': {'key': 'displayStatus', 'type': 'str'}, - 'level': {'key': 'level', 'type': 'StatusLevelTypes'}, - 'message': {'key': 'message', 'type': 'str'}, - 'time': {'key': 'time', 'type': 'str'}, - } - - def __init__(self, *, code: str=None, display_status: str=None, level=None, message: str=None, time: str=None, **kwargs) -> None: - super(InstanceViewStatus, self).__init__(**kwargs) - self.code = code - self.display_status = display_status - self.level = level - self.message = message - self.time = time - - -class JobAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobAddOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobAddParameter(Model): - """An Azure Batch Job to add. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - :type priority: int - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param constraints: The execution constraints for the Job. - :type constraints: ~azure.batch.models.JobConstraints - :param job_manager_task: Details of a Job Manager Task to be launched when - the Job is started. If the Job does not specify a Job Manager Task, the - user must explicitly add Tasks to the Job. If the Job does specify a Job - Manager Task, the Batch service creates the Job Manager Task when the Job - is created, and will try to schedule the Job Manager Task before - scheduling other Tasks in the Job. The Job Manager Task's typical purpose - is to control and/or monitor Job execution, for example by deciding what - additional Tasks to run, determining when the work is complete, etc. - (However, a Job Manager Task is not restricted to these activities - it is - a fully-fledged Task in the system and perform whatever actions are - required for the Job.) For example, a Job Manager Task might download a - file specified as a parameter, analyze the contents of that file and - submit additional Tasks based on those contents. - :type job_manager_task: ~azure.batch.models.JobManagerTask - :param job_preparation_task: The Job Preparation Task. If a Job has a Job - Preparation Task, the Batch service will run the Job Preparation Task on a - Node before starting any Tasks of that Job on that Compute Node. - :type job_preparation_task: ~azure.batch.models.JobPreparationTask - :param job_release_task: The Job Release Task. A Job Release Task cannot - be specified without also specifying a Job Preparation Task for the Job. - The Batch service runs the Job Release Task on the Nodes that have run the - Job Preparation Task. The primary purpose of the Job Release Task is to - undo changes to Compute Nodes made by the Job Preparation Task. Example - activities include deleting local files, or shutting down services that - were started as part of Job preparation. - :type job_release_task: ~azure.batch.models.JobReleaseTask - :param common_environment_settings: Individual Tasks can override an - environment setting specified here by specifying the same setting name - with a different value. - :type common_environment_settings: - list[~azure.batch.models.EnvironmentSetting] - :param pool_info: Required. The Pool on which the Batch service runs the - Job's Tasks. - :type pool_info: ~azure.batch.models.PoolInformation - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. Note that if a Job - contains no Tasks, then all Tasks are considered complete. This option is - therefore most commonly used with a Job Manager task; if you want to use - automatic Job termination without a Job Manager, you should initially set - onAllTasksComplete to noaction and update the Job properties to set - onAllTasksComplete to terminatejob once you have finished adding Tasks. - The default is noaction. Possible values include: 'noAction', - 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param on_task_failure: The action the Batch service should take when any - Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero - exit code after exhausting its retry count, or if there was an error - starting the Task, for example due to a resource file download error. The - default is noaction. Possible values include: 'noAction', - 'performExitOptionsJobAction' - :type on_task_failure: str or ~azure.batch.models.OnTaskFailure - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param uses_task_dependencies: Whether Tasks in the Job can define - dependencies on each other. The default is false. - :type uses_task_dependencies: bool - :param network_configuration: The network configuration for the Job. - :type network_configuration: ~azure.batch.models.JobNetworkConfiguration - """ - - _validation = { - 'id': {'required': True}, - 'pool_info': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, - 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, - 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, - 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, - } - - def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, max_parallel_tasks: int=-1, allow_task_preemption: bool=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, network_configuration=None, **kwargs) -> None: - super(JobAddParameter, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.priority = priority - self.max_parallel_tasks = max_parallel_tasks - self.allow_task_preemption = allow_task_preemption - self.constraints = constraints - self.job_manager_task = job_manager_task - self.job_preparation_task = job_preparation_task - self.job_release_task = job_release_task - self.common_environment_settings = common_environment_settings - self.pool_info = pool_info - self.on_all_tasks_complete = on_all_tasks_complete - self.on_task_failure = on_task_failure - self.metadata = metadata - self.uses_task_dependencies = uses_task_dependencies - self.network_configuration = network_configuration - - -class JobConstraints(Model): - """The execution constraints for a Job. - - :param max_wall_clock_time: If the Job does not complete within the time - limit, the Batch service terminates it and any Tasks that are still - running. In this case, the termination reason will be - MaxWallClockTimeExpiry. If this property is not specified, there is no - time limit on how long the Job may run. - :type max_wall_clock_time: timedelta - :param max_task_retry_count: The maximum number of times each Task may be - retried. The Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of retries. The - Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is - 0, the Batch service does not retry Tasks. If the maximum retry count is - -1, the Batch service retries Tasks without limit. The default value is 0 - (no retries). - :type max_task_retry_count: int - """ - - _attribute_map = { - 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - } - - def __init__(self, *, max_wall_clock_time=None, max_task_retry_count: int=None, **kwargs) -> None: - super(JobConstraints, self).__init__(**kwargs) - self.max_wall_clock_time = max_wall_clock_time - self.max_task_retry_count = max_task_retry_count - - -class JobDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobDeleteOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobDisableOptions(Model): - """Additional parameters for disable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobDisableOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobDisableParameter(Model): - """Options when disabling a Job. - - All required parameters must be populated in order to send to Azure. - - :param disable_tasks: Required. Possible values include: 'requeue', - 'terminate', 'wait' - :type disable_tasks: str or ~azure.batch.models.DisableJobOption - """ - - _validation = { - 'disable_tasks': {'required': True}, - } - - _attribute_map = { - 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, - } - - def __init__(self, *, disable_tasks, **kwargs) -> None: - super(JobDisableParameter, self).__init__(**kwargs) - self.disable_tasks = disable_tasks - - -class JobEnableOptions(Model): - """Additional parameters for enable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobEnableOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobExecutionInformation(Model): - """Contains information about the execution of a Job in the Azure Batch - service. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. This is the time at which the Job was - created. - :type start_time: datetime - :param end_time: This property is set only if the Job is in the completed - state. - :type end_time: datetime - :param pool_id: This element contains the actual Pool where the Job is - assigned. When you get Job details from the service, they also contain a - poolInfo element, which contains the Pool configuration data from when the - Job was added or updated. That poolInfo element may also contain a poolId - element. If it does, the two IDs are the same. If it does not, it means - the Job ran on an auto Pool, and this property contains the ID of that - auto Pool. - :type pool_id: str - :param scheduling_error: Details of any error encountered by the service - in starting the Job. This property is not set if there was no error - starting the Job. - :type scheduling_error: ~azure.batch.models.JobSchedulingError - :param terminate_reason: This property is set only if the Job is in the - completed state. If the Batch service terminates the Job, it sets the - reason as follows: JMComplete - the Job Manager Task completed, and - killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job - reached its maxWallClockTime constraint. TerminateJobSchedule - the Job - ran as part of a schedule, and the schedule terminated. AllTasksComplete - - the Job's onAllTasksComplete attribute is set to terminatejob, and all - Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure - attribute is set to performExitOptionsJobAction, and a Task in the Job - failed with an exit condition that specified a jobAction of terminatejob. - Any other string is a user-defined reason specified in a call to the - 'Terminate a Job' operation. - :type terminate_reason: str - """ - - _validation = { - 'start_time': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, - 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, - } - - def __init__(self, *, start_time, end_time=None, pool_id: str=None, scheduling_error=None, terminate_reason: str=None, **kwargs) -> None: - super(JobExecutionInformation, self).__init__(**kwargs) - self.start_time = start_time - self.end_time = end_time - self.pool_id = pool_id - self.scheduling_error = scheduling_error - self.terminate_reason = terminate_reason - - -class JobGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobGetOptions, self).__init__(**kwargs) - self.select = select - self.expand = expand - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobGetTaskCountsOptions(Model): - """Additional parameters for get_task_counts operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobGetTaskCountsOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobListFromJobScheduleOptions(Model): - """Additional parameters for list_from_job_schedule operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Jobs can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobListFromJobScheduleOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.expand = expand - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Jobs can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobListOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.expand = expand - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobListPreparationAndReleaseTaskStatusOptions(Model): - """Additional parameters for list_preparation_and_release_task_status - operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Tasks can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobManagerTask(Model): - """Specifies details of a Job Manager Task. - - The Job Manager Task is automatically started when the Job is created. The - Batch service tries to schedule the Job Manager Task before any other Tasks - in the Job. When shrinking a Pool, the Batch service tries to preserve - Nodes where Job Manager Tasks are running for as long as possible (that is, - Compute Nodes running 'normal' Tasks are removed before Compute Nodes - running Job Manager Tasks). When a Job Manager Task fails and needs to be - restarted, the system tries to schedule it at the highest priority. If - there are no idle Compute Nodes available, the system may terminate one of - the running Tasks in the Pool and return it to the queue in order to make - room for the Job Manager Task to restart. Note that a Job Manager Task in - one Job does not have priority over Tasks in other Jobs. Across Jobs, only - Job level priorities are observed. For example, if a Job Manager in a - priority 0 Job needs to be restarted, it will not displace Tasks of a - priority 1 Job. Batch will retry Tasks when a recovery operation is - triggered on a Node. Examples of recovery operations include (but are not - limited to) when an unhealthy Node is rebooted or a Compute Node - disappeared due to host failure. Retries due to recovery operations are - independent of and are not counted against the maxTaskRetryCount. Even if - the maxTaskRetryCount is 0, an internal retry due to a recovery operation - may occur. Because of this, all Tasks should be idempotent. This means - Tasks need to tolerate being interrupted and restarted without causing any - corruption or duplicate data. The best practice for long running Tasks is - to use some form of checkpointing. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than - 64 characters. - :type id: str - :param display_name: It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - :type display_name: str - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Job Manager Task runs. If the Pool that will run this Task has - containerConfiguration set, this must be set as well. If the Pool that - will run this Task doesn't have containerConfiguration set, this must not - be set. When this is specified, all directories recursively below the - AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) - are mapped into the container, all Task environment variables are mapped - into the container, and the Task command line is executed in the - container. Files produced in the container outside of - AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. There is a maximum size for the list of resource - files. When the max size is exceeded, the request will fail and the - response error code will be RequestEntityTooLarge. If this occurs, the - collection of ResourceFiles must be reduced in size. This can be achieved - using .zip files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param output_files: For multi-instance Tasks, the files will only be - uploaded from the Compute Node on which the primary Task is executed. - :type output_files: list[~azure.batch.models.OutputFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param constraints: Constraints that apply to the Job Manager Task. - :type constraints: ~azure.batch.models.TaskConstraints - :param required_slots: The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on - a compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this property is not supported and must not be - specified. - :type required_slots: int - :param kill_job_on_completion: Whether completion of the Job Manager Task - signifies completion of the entire Job. If true, when the Job Manager Task - completes, the Batch service marks the Job as complete. If any Tasks are - still running at this time (other than Job Release), those Tasks are - terminated. If false, the completion of the Job Manager Task does not - affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or - user terminate the Job explicitly. An example of this is if the Job - Manager creates a set of Tasks but then takes no further role in their - execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - :type kill_job_on_completion: bool - :param user_identity: The user identity under which the Job Manager Task - runs. If omitted, the Task runs as a non-administrative user unique to the - Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param run_exclusive: Whether the Job Manager Task requires exclusive use - of the Compute Node where it runs. If true, no other Tasks will run on the - same Node for as long as the Job Manager is running. If false, other Tasks - can run simultaneously with the Job Manager on a Compute Node. The Job - Manager Task counts normally against the Compute Node's concurrent Task - limit, so this is only relevant if the Compute Node allows multiple - concurrent Tasks. The default value is true. - :type run_exclusive: bool - :param application_package_references: Application Packages are downloaded - and deployed to a shared directory, not the Task working directory. - Therefore, if a referenced Application Package is already on the Compute - Node, and is up to date, then it is not re-downloaded; the existing copy - on the Compute Node is used. If a referenced Application Package cannot be - installed, for example because the package has been deleted or because - download failed, the Task fails. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param authentication_token_settings: The settings for an authentication - token that the Task can use to perform Batch service operations. If this - property is set, the Batch service provides the Task with an - authentication token which can be used to authenticate Batch service - operations without requiring an Account access key. The token is provided - via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations - that the Task can carry out using the token depend on the settings. For - example, a Task can request Job permissions in order to add other Tasks to - the Job, or check the status of the Job or of other Tasks under the Job. - :type authentication_token_settings: - ~azure.batch.models.AuthenticationTokenSettings - :param allow_low_priority_node: Whether the Job Manager Task may run on a - Spot/Low-priority Compute Node. The default value is true. - :type allow_low_priority_node: bool - """ - - _validation = { - 'id': {'required': True}, - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'required_slots': {'key': 'requiredSlots', 'type': 'int'}, - 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, - } - - def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, required_slots: int=None, kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node: bool=None, **kwargs) -> None: - super(JobManagerTask, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.command_line = command_line - self.container_settings = container_settings - self.resource_files = resource_files - self.output_files = output_files - self.environment_settings = environment_settings - self.constraints = constraints - self.required_slots = required_slots - self.kill_job_on_completion = kill_job_on_completion - self.user_identity = user_identity - self.run_exclusive = run_exclusive - self.application_package_references = application_package_references - self.authentication_token_settings = authentication_token_settings - self.allow_low_priority_node = allow_low_priority_node - - -class JobNetworkConfiguration(Model): - """The network configuration for the Job. - - All required parameters must be populated in order to send to Azure. - - :param subnet_id: Required. The virtual network must be in the same region - and subscription as the Azure Batch Account. The specified subnet should - have enough free IP addresses to accommodate the number of Compute Nodes - which will run Tasks from the Job. This can be up to the number of Compute - Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have - the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) - role for the specified VNet so that Azure Batch service can schedule Tasks - on the Nodes. This can be verified by checking if the specified VNet has - any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound communication - from the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux - and port 3389 for Windows. Port 443 is also required to be open for - outbound connections for communications to Azure Storage. For more details - see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - :type subnet_id: str - """ - - _validation = { - 'subnet_id': {'required': True}, - } - - _attribute_map = { - 'subnet_id': {'key': 'subnetId', 'type': 'str'}, - } - - def __init__(self, *, subnet_id: str, **kwargs) -> None: - super(JobNetworkConfiguration, self).__init__(**kwargs) - self.subnet_id = subnet_id - - -class JobPatchOptions(Model): - """Additional parameters for patch operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobPatchOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobPatchParameter(Model): - """The set of changes to be made to a Job. - - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. If omitted, the priority of the Job is left unchanged. - :type priority: int - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. - :type max_parallel_tasks: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. If omitted, the - completion behavior is left unchanged. You may not change the value from - terminatejob to noaction - that is, once you have engaged automatic Job - termination, you cannot turn it off again. If you try to do this, the - request fails with an 'invalid property value' error response; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - Possible values include: 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param constraints: The execution constraints for the Job. If omitted, the - existing execution constraints are left unchanged. - :type constraints: ~azure.batch.models.JobConstraints - :param pool_info: The Pool on which the Batch service runs the Job's - Tasks. You may change the Pool for a Job only when the Job is disabled. - The Patch Job call will fail if you include the poolInfo element and the - Job is not disabled. If you specify an autoPoolSpecification in the - poolInfo, only the keepAlive property of the autoPoolSpecification can be - updated, and then only if the autoPoolSpecification has a - poolLifetimeOption of Job (other job properties can be updated as normal). - If omitted, the Job continues to run on its current Pool. - :type pool_info: ~azure.batch.models.PoolInformation - :param metadata: If omitted, the existing Job metadata is left unchanged. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, *, priority: int=None, max_parallel_tasks: int=None, allow_task_preemption: bool=None, on_all_tasks_complete=None, constraints=None, pool_info=None, metadata=None, **kwargs) -> None: - super(JobPatchParameter, self).__init__(**kwargs) - self.priority = priority - self.max_parallel_tasks = max_parallel_tasks - self.allow_task_preemption = allow_task_preemption - self.on_all_tasks_complete = on_all_tasks_complete - self.constraints = constraints - self.pool_info = pool_info - self.metadata = metadata - - -class JobPreparationAndReleaseTaskExecutionInformation(Model): - """The status of the Job Preparation and Job Release Tasks on a Compute Node. - - :param pool_id: - :type pool_id: str - :param node_id: - :type node_id: str - :param node_url: - :type node_url: str - :param job_preparation_task_execution_info: Information about the - execution status of the Job Preparation Task on this Compute Node. - :type job_preparation_task_execution_info: - ~azure.batch.models.JobPreparationTaskExecutionInformation - :param job_release_task_execution_info: Information about the execution - status of the Job Release Task on this Compute Node. This property is set - only if the Job Release Task has run on the Compute Node. - :type job_release_task_execution_info: - ~azure.batch.models.JobReleaseTaskExecutionInformation - """ - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'node_id': {'key': 'nodeId', 'type': 'str'}, - 'node_url': {'key': 'nodeUrl', 'type': 'str'}, - 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, - 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, - } - - def __init__(self, *, pool_id: str=None, node_id: str=None, node_url: str=None, job_preparation_task_execution_info=None, job_release_task_execution_info=None, **kwargs) -> None: - super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) - self.pool_id = pool_id - self.node_id = node_id - self.node_url = node_url - self.job_preparation_task_execution_info = job_preparation_task_execution_info - self.job_release_task_execution_info = job_release_task_execution_info - - -class JobPreparationTask(Model): - """A Job Preparation Task to run before any Tasks of the Job on any given - Compute Node. - - You can use Job Preparation to prepare a Node to run Tasks for the Job. - Activities commonly performed in Job Preparation include: Downloading - common resource files used by all the Tasks in the Job. The Job Preparation - Task can download these common resource files to the shared location on the - Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the - Node so that all Tasks of that Job can communicate with it. If the Job - Preparation Task fails (that is, exhausts its retry count before exiting - with exit code 0), Batch will not run Tasks of this Job on the Node. The - Compute Node remains ineligible to run Tasks of this Job until it is - reimaged. The Compute Node remains active and can be used for other Jobs. - The Job Preparation Task can run multiple times on the same Node. - Therefore, you should write the Job Preparation Task to handle - re-execution. If the Node is rebooted, the Job Preparation Task is run - again on the Compute Node before scheduling any other Task of the Job, if - rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did - not previously complete. If the Node is reimaged, the Job Preparation Task - is run again before scheduling any Task of the Job. Batch will retry Tasks - when a recovery operation is triggered on a Node. Examples of recovery - operations include (but are not limited to) when an unhealthy Node is - rebooted or a Compute Node disappeared due to host failure. Retries due to - recovery operations are independent of and are not counted against the - maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry - due to a recovery operation may occur. Because of this, all Tasks should be - idempotent. This means Tasks need to tolerate being interrupted and - restarted without causing any corruption or duplicate data. The best - practice for long running Tasks is to use some form of checkpointing. - - All required parameters must be populated in order to send to Azure. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores and cannot contain more than 64 - characters. If you do not specify this property, the Batch service assigns - a default value of 'jobpreparation'. No other Task in the Job can have the - same ID as the Job Preparation Task. If you try to submit a Task with the - same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, - the HTTP status code is 409 (Conflict). - :type id: str - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Job Preparation Task runs. When this is specified, all directories - recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch - directories on the node) are mapped into the container, all Task - environment variables are mapped into the container, and the Task command - line is executed in the container. Files produced in the container outside - of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. There is a maximum size for the list of - resource files. When the max size is exceeded, the request will fail and - the response error code will be RequestEntityTooLarge. If this occurs, the - collection of ResourceFiles must be reduced in size. This can be achieved - using .zip files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param constraints: Constraints that apply to the Job Preparation Task. - :type constraints: ~azure.batch.models.TaskConstraints - :param wait_for_success: Whether the Batch service should wait for the Job - Preparation Task to complete successfully before scheduling any other - Tasks of the Job on the Compute Node. A Job Preparation Task has completed - successfully if it exits with exit code 0. If true and the Job Preparation - Task fails on a Node, the Batch service retries the Job Preparation Task - up to its maximum retry count (as specified in the constraints element). - If the Task has still not completed successfully after all retries, then - the Batch service will not schedule Tasks of the Job to the Node. The Node - remains active and eligible to run Tasks of other Jobs. If false, the - Batch service will not wait for the Job Preparation Task to complete. In - this case, other Tasks of the Job can start executing on the Compute Node - while the Job Preparation Task is still running; and even if the Job - Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - :type wait_for_success: bool - :param user_identity: The user identity under which the Job Preparation - Task runs. If omitted, the Task runs as a non-administrative user unique - to the Task on Windows Compute Nodes, or a non-administrative user unique - to the Pool on Linux Compute Nodes. - :type user_identity: ~azure.batch.models.UserIdentity - :param rerun_on_node_reboot_after_success: Whether the Batch service - should rerun the Job Preparation Task after a Compute Node reboots. The - Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot - occurred while the Task was running). Therefore, you should always write a - Job Preparation Task to be idempotent and to behave correctly if run - multiple times. The default value is true. - :type rerun_on_node_reboot_after_success: bool - """ - - _validation = { - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, - } - - def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None, rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None: - super(JobPreparationTask, self).__init__(**kwargs) - self.id = id - self.command_line = command_line - self.container_settings = container_settings - self.resource_files = resource_files - self.environment_settings = environment_settings - self.constraints = constraints - self.wait_for_success = wait_for_success - self.user_identity = user_identity - self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success - - -class JobPreparationTaskExecutionInformation(Model): - """Contains information about the execution of a Job Preparation Task on a - Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. If the Task has been restarted or retried, - this is the most recent time at which the Task started running. - :type start_time: datetime - :param end_time: This property is set only if the Task is in the Completed - state. - :type end_time: datetime - :param state: Required. Possible values include: 'running', 'completed' - :type state: str or ~azure.batch.models.JobPreparationTaskState - :param task_root_directory: - :type task_root_directory: str - :param task_root_directory_url: - :type task_root_directory_url: str - :param exit_code: The exit code of the program specified on the Task - command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. Note - that the exit code may also be generated by the Compute Node operating - system, such as when a process is forcibly terminated. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param retry_count: Required. The number of times the Task has been - retried by the Batch service. Task application failures (non-zero exit - code) are retried, pre-processing errors (the Task could not be run) and - file upload errors are not retried. The Batch service will retry the Task - up to the limit specified by the constraints. Task application failures - (non-zero exit code) are retried, pre-processing errors (the Task could - not be run) and file upload errors are not retried. The Batch service will - retry the Task up to the limit specified by the constraints. - :type retry_count: int - :param last_retry_time: This property is set only if the Task was retried - (i.e. retryCount is nonzero). If present, this is typically the same as - startTime, but may be different if the Task has been restarted for reasons - other than retry; for example, if the Compute Node was rebooted during a - retry, then the startTime is updated but the lastRetryTime is not. - :type last_retry_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'start_time': {'required': True}, - 'state': {'required': True}, - 'retry_count': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, - 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, - 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'retry_count': {'key': 'retryCount', 'type': 'int'}, - 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, *, start_time, state, retry_count: int, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: - super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) - self.start_time = start_time - self.end_time = end_time - self.state = state - self.task_root_directory = task_root_directory - self.task_root_directory_url = task_root_directory_url - self.exit_code = exit_code - self.container_info = container_info - self.failure_info = failure_info - self.retry_count = retry_count - self.last_retry_time = last_retry_time - self.result = result - - -class JobReleaseTask(Model): - """A Job Release Task to run on Job completion on any Compute Node where the - Job has run. - - The Job Release Task runs when the Job ends, because of one of the - following: The user calls the Terminate Job API, or the Delete Job API - while the Job is still active, the Job's maximum wall clock time constraint - is reached, and the Job is still active, or the Job's Job Manager Task - completed, and the Job is configured to terminate when the Job Manager - completes. The Job Release Task runs on each Node where Tasks of the Job - have run and the Job Preparation Task ran and completed. If you reimage a - Node after it has run the Job Preparation Task, and the Job ends without - any further Tasks of the Job running on that Node (and hence the Job - Preparation Task does not re-run), then the Job Release Task does not run - on that Compute Node. If a Node reboots while the Job Release Task is still - running, the Job Release Task runs again when the Compute Node starts up. - The Job is not marked as complete until all Job Release Tasks have - completed. The Job Release Task runs in the background. It does not occupy - a scheduling slot; that is, it does not count towards the taskSlotsPerNode - limit specified on the Pool. - - All required parameters must be populated in order to send to Azure. - - :param id: The ID can contain any combination of alphanumeric characters - including hyphens and underscores and cannot contain more than 64 - characters. If you do not specify this property, the Batch service assigns - a default value of 'jobrelease'. No other Task in the Job can have the - same ID as the Job Release Task. If you try to submit a Task with the same - id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - :type id: str - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Job Release Task runs. When this is specified, all directories recursively - below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on - the node) are mapped into the container, all Task environment variables - are mapped into the container, and the Task command line is executed in - the container. Files produced in the container outside of - AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param max_wall_clock_time: - :type max_wall_clock_time: timedelta - :param retention_time: The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - :type retention_time: timedelta - :param user_identity: The user identity under which the Job Release Task - runs. If omitted, the Task runs as a non-administrative user unique to the - Task. - :type user_identity: ~azure.batch.models.UserIdentity - """ - - _validation = { - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, - 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - } - - def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None, **kwargs) -> None: - super(JobReleaseTask, self).__init__(**kwargs) - self.id = id - self.command_line = command_line - self.container_settings = container_settings - self.resource_files = resource_files - self.environment_settings = environment_settings - self.max_wall_clock_time = max_wall_clock_time - self.retention_time = retention_time - self.user_identity = user_identity - - -class JobReleaseTaskExecutionInformation(Model): - """Contains information about the execution of a Job Release Task on a Compute - Node. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. If the Task has been restarted or retried, - this is the most recent time at which the Task started running. - :type start_time: datetime - :param end_time: This property is set only if the Task is in the Completed - state. - :type end_time: datetime - :param state: Required. Possible values include: 'running', 'completed' - :type state: str or ~azure.batch.models.JobReleaseTaskState - :param task_root_directory: - :type task_root_directory: str - :param task_root_directory_url: - :type task_root_directory_url: str - :param exit_code: The exit code of the program specified on the Task - command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. Note - that the exit code may also be generated by the Compute Node operating - system, such as when a process is forcibly terminated. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'start_time': {'required': True}, - 'state': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, - 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, - 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, *, start_time, state, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, result=None, **kwargs) -> None: - super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) - self.start_time = start_time - self.end_time = end_time - self.state = state - self.task_root_directory = task_root_directory - self.task_root_directory_url = task_root_directory_url - self.exit_code = exit_code - self.container_info = container_info - self.failure_info = failure_info - self.result = result - - -class JobScheduleAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobScheduleAddOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobScheduleAddParameter(Model): - """A Job Schedule that allows recurring Jobs by specifying when to run Jobs - and a specification used to create each Job. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param schedule: Required. The schedule according to which Jobs will be - created. All times are fixed respective to UTC and are not impacted by - daylight saving time. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: Required. The details of the Jobs to be created - on this schedule. - :type job_specification: ~azure.batch.models.JobSpecification - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _validation = { - 'id': {'required': True}, - 'schedule': {'required': True}, - 'job_specification': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, *, id: str, schedule, job_specification, display_name: str=None, metadata=None, **kwargs) -> None: - super(JobScheduleAddParameter, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.schedule = schedule - self.job_specification = job_specification - self.metadata = metadata - - -class JobScheduleDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleDeleteOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleDisableOptions(Model): - """Additional parameters for disable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleDisableOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleEnableOptions(Model): - """Additional parameters for enable operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleEnableOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleExecutionInformation(Model): - """Contains information about Jobs that have been and will be run under a Job - Schedule. - - :param next_run_time: This property is meaningful only if the schedule is - in the active state when the time comes around. For example, if the - schedule is disabled, no Job will be created at nextRunTime unless the Job - is enabled before then. - :type next_run_time: datetime - :param recent_job: Information about the most recent Job under the Job - Schedule. This property is present only if the at least one Job has run - under the schedule. - :type recent_job: ~azure.batch.models.RecentJob - :param end_time: This property is set only if the Job Schedule is in the - completed state. - :type end_time: datetime - """ - - _attribute_map = { - 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, - 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - } - - def __init__(self, *, next_run_time=None, recent_job=None, end_time=None, **kwargs) -> None: - super(JobScheduleExecutionInformation, self).__init__(**kwargs) - self.next_run_time = next_run_time - self.recent_job = recent_job - self.end_time = end_time - - -class JobScheduleExistsOptions(Model): - """Additional parameters for exists operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleExistsOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleGetOptions, self).__init__(**kwargs) - self.select = select - self.expand = expand - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Job Schedules can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(JobScheduleListOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.expand = expand - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class JobSchedulePatchOptions(Model): - """Additional parameters for patch operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobSchedulePatchOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobSchedulePatchParameter(Model): - """The set of changes to be made to a Job Schedule. - - :param schedule: The schedule according to which Jobs will be created. All - times are fixed respective to UTC and are not impacted by daylight saving - time. If you do not specify this element, the existing schedule is left - unchanged. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: The details of the Jobs to be created on this - schedule. Updates affect only Jobs that are started after the update has - taken place. Any currently active Job continues with the older - specification. - :type job_specification: ~azure.batch.models.JobSpecification - :param metadata: If you do not specify this element, existing metadata is - left unchanged. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _attribute_map = { - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, *, schedule=None, job_specification=None, metadata=None, **kwargs) -> None: - super(JobSchedulePatchParameter, self).__init__(**kwargs) - self.schedule = schedule - self.job_specification = job_specification - self.metadata = metadata - - -class JobScheduleStatistics(Model): - """Resource usage statistics for a Job Schedule. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param user_cpu_time: Required. - :type user_cpu_time: timedelta - :param kernel_cpu_time: Required. - :type kernel_cpu_time: timedelta - :param wall_clock_time: Required. The wall clock time is the elapsed time - from when the Task started running on a Compute Node to when it finished - (or to the last time the statistics were updated, if the Task had not - finished by then). If a Task was retried, this includes the wall clock - time of all the Task retries. - :type wall_clock_time: timedelta - :param read_iops: Required. The total number of disk read operations made - by all Tasks in all Jobs created under the schedule. - :type read_iops: long - :param write_iops: Required. The total number of disk write operations - made by all Tasks in all Jobs created under the schedule. - :type write_iops: long - :param read_io_gi_b: Required. The total gibibytes read from disk by all - Tasks in all Jobs created under the schedule. - :type read_io_gi_b: float - :param write_io_gi_b: Required. The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. - :type write_io_gi_b: float - :param num_succeeded_tasks: Required. The total number of Tasks - successfully completed during the given time range in Jobs created under - the schedule. A Task completes successfully if it returns exit code 0. - :type num_succeeded_tasks: long - :param num_failed_tasks: Required. The total number of Tasks that failed - during the given time range in Jobs created under the schedule. A Task - fails if it exhausts its maximum retry count without returning exit code - 0. - :type num_failed_tasks: long - :param num_task_retries: Required. The total number of retries during the - given time range on all Tasks in all Jobs created under the schedule. - :type num_task_retries: long - :param wait_time: Required. This value is only reported in the Account - lifetime statistics; it is not included in the Job statistics. - :type wait_time: timedelta - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'user_cpu_time': {'required': True}, - 'kernel_cpu_time': {'required': True}, - 'wall_clock_time': {'required': True}, - 'read_iops': {'required': True}, - 'write_iops': {'required': True}, - 'read_io_gi_b': {'required': True}, - 'write_io_gi_b': {'required': True}, - 'num_succeeded_tasks': {'required': True}, - 'num_failed_tasks': {'required': True}, - 'num_task_retries': {'required': True}, - 'wait_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, - 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, - 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, - 'read_iops': {'key': 'readIOps', 'type': 'long'}, - 'write_iops': {'key': 'writeIOps', 'type': 'long'}, - 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, - 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, - 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, - 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, - 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, - 'wait_time': {'key': 'waitTime', 'type': 'duration'}, - } - - def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: - super(JobScheduleStatistics, self).__init__(**kwargs) - self.url = url - self.start_time = start_time - self.last_update_time = last_update_time - self.user_cpu_time = user_cpu_time - self.kernel_cpu_time = kernel_cpu_time - self.wall_clock_time = wall_clock_time - self.read_iops = read_iops - self.write_iops = write_iops - self.read_io_gi_b = read_io_gi_b - self.write_io_gi_b = write_io_gi_b - self.num_succeeded_tasks = num_succeeded_tasks - self.num_failed_tasks = num_failed_tasks - self.num_task_retries = num_task_retries - self.wait_time = wait_time - - -class JobScheduleTerminateOptions(Model): - """Additional parameters for terminate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleTerminateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleUpdateOptions(Model): - """Additional parameters for update operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobScheduleUpdateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobScheduleUpdateParameter(Model): - """The set of changes to be made to a Job Schedule. - - All required parameters must be populated in order to send to Azure. - - :param schedule: Required. The schedule according to which Jobs will be - created. All times are fixed respective to UTC and are not impacted by - daylight saving time. If you do not specify this element, it is equivalent - to passing the default schedule: that is, a single Job scheduled to run - immediately. - :type schedule: ~azure.batch.models.Schedule - :param job_specification: Required. Details of the Jobs to be created on - this schedule. Updates affect only Jobs that are started after the update - has taken place. Any currently active Job continues with the older - specification. - :type job_specification: ~azure.batch.models.JobSpecification - :param metadata: If you do not specify this element, it takes the default - value of an empty list; in effect, any existing metadata is deleted. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _validation = { - 'schedule': {'required': True}, - 'job_specification': {'required': True}, - } - - _attribute_map = { - 'schedule': {'key': 'schedule', 'type': 'Schedule'}, - 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, *, schedule, job_specification, metadata=None, **kwargs) -> None: - super(JobScheduleUpdateParameter, self).__init__(**kwargs) - self.schedule = schedule - self.job_specification = job_specification - self.metadata = metadata - - -class JobSchedulingError(Model): - """An error encountered by the Batch service when scheduling a Job. - - All required parameters must be populated in order to send to Azure. - - :param category: Required. The category of the Job scheduling error. - Possible values include: 'userError', 'serverError' - :type category: str or ~azure.batch.models.ErrorCategory - :param code: - :type code: str - :param message: - :type message: str - :param details: - :type details: list[~azure.batch.models.NameValuePair] - """ - - _validation = { - 'category': {'required': True}, - } - - _attribute_map = { - 'category': {'key': 'category', 'type': 'ErrorCategory'}, - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[NameValuePair]'}, - } - - def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: - super(JobSchedulingError, self).__init__(**kwargs) - self.category = category - self.code = code - self.message = message - self.details = details - - -class JobSpecification(Model): - """Specifies details of the Jobs to be created on a schedule. - - All required parameters must be populated in order to send to Azure. - - :param priority: The priority of Jobs created under this schedule. - Priority values can range from -1000 to 1000, with -1000 being the lowest - priority and 1000 being the highest priority. The default value is 0. This - priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - :type priority: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param display_name: The name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - :type display_name: str - :param uses_task_dependencies: Whether Tasks in the Job can define - dependencies on each other. The default is false. - :type uses_task_dependencies: bool - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in a Job created under this schedule are in the completed - state. Note that if a Job contains no Tasks, then all Tasks are considered - complete. This option is therefore most commonly used with a Job Manager - task; if you want to use automatic Job termination without a Job Manager, - you should initially set onAllTasksComplete to noaction and update the Job - properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Possible values include: - 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - :param on_task_failure: The action the Batch service should take when any - Task fails in a Job created under this schedule. A Task is considered to - have failed if it have failed if has a failureInfo. A failureInfo is set - if the Task completes with a non-zero exit code after exhausting its retry - count, or if there was an error starting the Task, for example due to a - resource file download error. The default is noaction. Possible values - include: 'noAction', 'performExitOptionsJobAction' - :type on_task_failure: str or ~azure.batch.models.OnTaskFailure - :param network_configuration: The network configuration for the Job. - :type network_configuration: ~azure.batch.models.JobNetworkConfiguration - :param constraints: The execution constraints for Jobs created under this - schedule. - :type constraints: ~azure.batch.models.JobConstraints - :param job_manager_task: The details of a Job Manager Task to be launched - when a Job is started under this schedule. If the Job does not specify a - Job Manager Task, the user must explicitly add Tasks to the Job using the - Task API. If the Job does specify a Job Manager Task, the Batch service - creates the Job Manager Task when the Job is created, and will try to - schedule the Job Manager Task before scheduling other Tasks in the Job. - :type job_manager_task: ~azure.batch.models.JobManagerTask - :param job_preparation_task: The Job Preparation Task for Jobs created - under this schedule. If a Job has a Job Preparation Task, the Batch - service will run the Job Preparation Task on a Node before starting any - Tasks of that Job on that Compute Node. - :type job_preparation_task: ~azure.batch.models.JobPreparationTask - :param job_release_task: The Job Release Task for Jobs created under this - schedule. The primary purpose of the Job Release Task is to undo changes - to Nodes made by the Job Preparation Task. Example activities include - deleting local files, or shutting down services that were started as part - of Job preparation. A Job Release Task cannot be specified without also - specifying a Job Preparation Task for the Job. The Batch service runs the - Job Release Task on the Compute Nodes that have run the Job Preparation - Task. - :type job_release_task: ~azure.batch.models.JobReleaseTask - :param common_environment_settings: Individual Tasks can override an - environment setting specified here by specifying the same setting name - with a different value. - :type common_environment_settings: - list[~azure.batch.models.EnvironmentSetting] - :param pool_info: Required. The Pool on which the Batch service runs the - Tasks of Jobs created under this schedule. - :type pool_info: ~azure.batch.models.PoolInformation - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - """ - - _validation = { - 'pool_info': {'required': True}, - } - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, - 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, - 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, - 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - } - - def __init__(self, *, pool_info, priority: int=None, allow_task_preemption: bool=None, max_parallel_tasks: int=-1, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: - super(JobSpecification, self).__init__(**kwargs) - self.priority = priority - self.allow_task_preemption = allow_task_preemption - self.max_parallel_tasks = max_parallel_tasks - self.display_name = display_name - self.uses_task_dependencies = uses_task_dependencies - self.on_all_tasks_complete = on_all_tasks_complete - self.on_task_failure = on_task_failure - self.network_configuration = network_configuration - self.constraints = constraints - self.job_manager_task = job_manager_task - self.job_preparation_task = job_preparation_task - self.job_release_task = job_release_task - self.common_environment_settings = common_environment_settings - self.pool_info = pool_info - self.metadata = metadata - - -class JobStatistics(Model): - """Resource usage statistics for a Job. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param user_cpu_time: Required. - :type user_cpu_time: timedelta - :param kernel_cpu_time: Required. - :type kernel_cpu_time: timedelta - :param wall_clock_time: Required. The wall clock time is the elapsed time - from when the Task started running on a Compute Node to when it finished - (or to the last time the statistics were updated, if the Task had not - finished by then). If a Task was retried, this includes the wall clock - time of all the Task retries. - :type wall_clock_time: timedelta - :param read_iops: Required. The total number of disk read operations made - by all Tasks in the Job. - :type read_iops: long - :param write_iops: Required. The total number of disk write operations - made by all Tasks in the Job. - :type write_iops: long - :param read_io_gi_b: Required. The total amount of data in GiB read from - disk by all Tasks in the Job. - :type read_io_gi_b: float - :param write_io_gi_b: Required. The total amount of data in GiB written to - disk by all Tasks in the Job. - :type write_io_gi_b: float - :param num_succeeded_tasks: Required. The total number of Tasks - successfully completed in the Job during the given time range. A Task - completes successfully if it returns exit code 0. - :type num_succeeded_tasks: long - :param num_failed_tasks: Required. The total number of Tasks in the Job - that failed during the given time range. A Task fails if it exhausts its - maximum retry count without returning exit code 0. - :type num_failed_tasks: long - :param num_task_retries: Required. The total number of retries on all the - Tasks in the Job during the given time range. - :type num_task_retries: long - :param wait_time: Required. The wait time for a Task is defined as the - elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the - time to the most recent Task execution.) This value is only reported in - the Account lifetime statistics; it is not included in the Job statistics. - :type wait_time: timedelta - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'user_cpu_time': {'required': True}, - 'kernel_cpu_time': {'required': True}, - 'wall_clock_time': {'required': True}, - 'read_iops': {'required': True}, - 'write_iops': {'required': True}, - 'read_io_gi_b': {'required': True}, - 'write_io_gi_b': {'required': True}, - 'num_succeeded_tasks': {'required': True}, - 'num_failed_tasks': {'required': True}, - 'num_task_retries': {'required': True}, - 'wait_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, - 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, - 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, - 'read_iops': {'key': 'readIOps', 'type': 'long'}, - 'write_iops': {'key': 'writeIOps', 'type': 'long'}, - 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, - 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, - 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, - 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, - 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, - 'wait_time': {'key': 'waitTime', 'type': 'duration'}, - } - - def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: - super(JobStatistics, self).__init__(**kwargs) - self.url = url - self.start_time = start_time - self.last_update_time = last_update_time - self.user_cpu_time = user_cpu_time - self.kernel_cpu_time = kernel_cpu_time - self.wall_clock_time = wall_clock_time - self.read_iops = read_iops - self.write_iops = write_iops - self.read_io_gi_b = read_io_gi_b - self.write_io_gi_b = write_io_gi_b - self.num_succeeded_tasks = num_succeeded_tasks - self.num_failed_tasks = num_failed_tasks - self.num_task_retries = num_task_retries - self.wait_time = wait_time - - -class JobTerminateOptions(Model): - """Additional parameters for terminate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobTerminateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobTerminateParameter(Model): - """Options when terminating a Job. - - :param terminate_reason: - :type terminate_reason: str - """ - - _attribute_map = { - 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, - } - - def __init__(self, *, terminate_reason: str=None, **kwargs) -> None: - super(JobTerminateParameter, self).__init__(**kwargs) - self.terminate_reason = terminate_reason - - -class JobUpdateOptions(Model): - """Additional parameters for update operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(JobUpdateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class JobUpdateParameter(Model): - """The set of changes to be made to a Job. - - All required parameters must be populated in order to send to Azure. - - :param priority: The priority of the Job. Priority values can range from - -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. If omitted, it is set to the default value 0. - :type priority: int - :param max_parallel_tasks: The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 - or greater than 0 if specified. If not specified, the default value is -1, - which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created - using the update job API. Default value: -1 . - :type max_parallel_tasks: int - :param allow_task_preemption: Whether Tasks in this job can be preempted - by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be - able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - :type allow_task_preemption: bool - :param constraints: The execution constraints for the Job. If omitted, the - constraints are cleared. - :type constraints: ~azure.batch.models.JobConstraints - :param pool_info: Required. The Pool on which the Batch service runs the - Job's Tasks. You may change the Pool for a Job only when the Job is - disabled. The Update Job call will fail if you include the poolInfo - element and the Job is not disabled. If you specify an - autoPoolSpecification in the poolInfo, only the keepAlive property of the - autoPoolSpecification can be updated, and then only if the - autoPoolSpecification has a poolLifetimeOption of Job (other job - properties can be updated as normal). - :type pool_info: ~azure.batch.models.PoolInformation - :param metadata: If omitted, it takes the default value of an empty list; - in effect, any existing metadata is deleted. - :type metadata: list[~azure.batch.models.MetadataItem] - :param on_all_tasks_complete: The action the Batch service should take - when all Tasks in the Job are in the completed state. If omitted, the - completion behavior is set to noaction. If the current value is - terminatejob, this is an error because a Job's completion behavior may not - be changed from terminatejob to noaction. You may not change the value - from terminatejob to noaction - that is, once you have engaged automatic - Job termination, you cannot turn it off again. If you try to do this, the - request fails and Batch returns status code 400 (Bad Request) and an - 'invalid property value' error response. If you do not specify this - element in a PUT request, it is equivalent to passing noaction. This is an - error if the current value is terminatejob. Possible values include: - 'noAction', 'terminateJob' - :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete - """ - - _validation = { - 'pool_info': {'required': True}, - } - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'max_parallel_tasks': {'key': 'maxParallelTasks', 'type': 'int'}, - 'allow_task_preemption': {'key': 'allowTaskPreemption', 'type': 'bool'}, - 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, - 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, - } - - def __init__(self, *, pool_info, priority: int=None, max_parallel_tasks: int=-1, allow_task_preemption: bool=None, constraints=None, metadata=None, on_all_tasks_complete=None, **kwargs) -> None: - super(JobUpdateParameter, self).__init__(**kwargs) - self.priority = priority - self.max_parallel_tasks = max_parallel_tasks - self.allow_task_preemption = allow_task_preemption - self.constraints = constraints - self.pool_info = pool_info - self.metadata = metadata - self.on_all_tasks_complete = on_all_tasks_complete - - -class LinuxUserConfiguration(Model): - """Properties used to create a user Account on a Linux Compute Node. - - :param uid: The user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not specified the underlying - operating system picks the uid. - :type uid: int - :param gid: The group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not specified the underlying - operating system picks the gid. - :type gid: int - :param ssh_private_key: The private key must not be password protected. - The private key is used to automatically configure asymmetric-key based - authentication for SSH between Compute Nodes in a Linux Pool when the - Pool's enableInterNodeCommunication property is true (it is ignored if - enableInterNodeCommunication is false). It does this by placing the key - pair into the user's .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no modification of the user's - .ssh directory is done). - :type ssh_private_key: str - """ - - _attribute_map = { - 'uid': {'key': 'uid', 'type': 'int'}, - 'gid': {'key': 'gid', 'type': 'int'}, - 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, - } - - def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None: - super(LinuxUserConfiguration, self).__init__(**kwargs) - self.uid = uid - self.gid = gid - self.ssh_private_key = ssh_private_key - - -class ManagedDisk(Model): - """ManagedDisk. - - :param storage_account_type: The storage account type for managed disk. - Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS' - :type storage_account_type: str or ~azure.batch.models.StorageAccountType - """ - - _attribute_map = { - 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, - } - - def __init__(self, *, storage_account_type=None, **kwargs) -> None: - super(ManagedDisk, self).__init__(**kwargs) - self.storage_account_type = storage_account_type - - -class MetadataItem(Model): - """A name-value pair associated with a Batch service resource. - - The Batch service does not assign any meaning to this metadata; it is - solely for the use of user code. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param value: Required. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, *, name: str, value: str, **kwargs) -> None: - super(MetadataItem, self).__init__(**kwargs) - self.name = name - self.value = value - - -class MountConfiguration(Model): - """The file system to mount on each node. - - :param azure_blob_file_system_configuration: The Azure Storage Container - to mount using blob FUSE on each node. This property is mutually exclusive - with all other properties. - :type azure_blob_file_system_configuration: - ~azure.batch.models.AzureBlobFileSystemConfiguration - :param nfs_mount_configuration: The NFS file system to mount on each node. - This property is mutually exclusive with all other properties. - :type nfs_mount_configuration: ~azure.batch.models.NFSMountConfiguration - :param cifs_mount_configuration: The CIFS/SMB file system to mount on each - node. This property is mutually exclusive with all other properties. - :type cifs_mount_configuration: ~azure.batch.models.CIFSMountConfiguration - :param azure_file_share_configuration: The Azure File Share to mount on - each node. This property is mutually exclusive with all other properties. - :type azure_file_share_configuration: - ~azure.batch.models.AzureFileShareConfiguration - """ - - _attribute_map = { - 'azure_blob_file_system_configuration': {'key': 'azureBlobFileSystemConfiguration', 'type': 'AzureBlobFileSystemConfiguration'}, - 'nfs_mount_configuration': {'key': 'nfsMountConfiguration', 'type': 'NFSMountConfiguration'}, - 'cifs_mount_configuration': {'key': 'cifsMountConfiguration', 'type': 'CIFSMountConfiguration'}, - 'azure_file_share_configuration': {'key': 'azureFileShareConfiguration', 'type': 'AzureFileShareConfiguration'}, - } - - def __init__(self, *, azure_blob_file_system_configuration=None, nfs_mount_configuration=None, cifs_mount_configuration=None, azure_file_share_configuration=None, **kwargs) -> None: - super(MountConfiguration, self).__init__(**kwargs) - self.azure_blob_file_system_configuration = azure_blob_file_system_configuration - self.nfs_mount_configuration = nfs_mount_configuration - self.cifs_mount_configuration = cifs_mount_configuration - self.azure_file_share_configuration = azure_file_share_configuration - - -class MultiInstanceSettings(Model): - """Settings which specify how to run a multi-instance Task. - - Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI - case, if any of the subtasks fail (for example due to exiting with a - non-zero exit code) the entire multi-instance Task fails. The - multi-instance Task is then terminated and retried, up to its retry limit. - - All required parameters must be populated in order to send to Azure. - - :param number_of_instances: The number of Compute Nodes required by the - Task. If omitted, the default is 1. - :type number_of_instances: int - :param coordination_command_line: Required. A typical coordination command - line launches a background service and verifies that the service is ready - to process inter-node messages. - :type coordination_command_line: str - :param common_resource_files: The difference between common resource files - and Task resource files is that common resource files are downloaded for - all subtasks including the primary, whereas Task resource files are - downloaded only for the primary. Also note that these resource files are - not downloaded to the Task working directory, but instead are downloaded - to the Task root directory (one directory above the working directory). - There is a maximum size for the list of resource files. When the max size - is exceeded, the request will fail and the response error code will be - RequestEntityTooLarge. If this occurs, the collection of ResourceFiles - must be reduced in size. This can be achieved using .zip files, - Application Packages, or Docker Containers. - :type common_resource_files: list[~azure.batch.models.ResourceFile] - """ - - _validation = { - 'coordination_command_line': {'required': True}, - } - - _attribute_map = { - 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, - 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, - 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, - } - - def __init__(self, *, coordination_command_line: str, number_of_instances: int=None, common_resource_files=None, **kwargs) -> None: - super(MultiInstanceSettings, self).__init__(**kwargs) - self.number_of_instances = number_of_instances - self.coordination_command_line = coordination_command_line - self.common_resource_files = common_resource_files - - -class NameValuePair(Model): - """Represents a name-value pair. - - :param name: - :type name: str - :param value: - :type value: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, value: str=None, **kwargs) -> None: - super(NameValuePair, self).__init__(**kwargs) - self.name = name - self.value = value - - -class NetworkConfiguration(Model): - """The network configuration for a Pool. - - :param subnet_id: The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute Nodes in the - Pool. If the subnet doesn't have enough free IP addresses, the Pool will - partially allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual - Machine Contributor' Role-Based Access Control (RBAC) role for the - specified VNet. The specified subnet must allow communication from the - Azure Batch service to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the specified - subnet is denied by an NSG, then the Batch service will set the state of - the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported, but for Pools created - with cloudServiceConfiguration both ARM and classic virtual networks are - supported. If the specified VNet has any associated Network Security - Groups (NSG), then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine configuration, - enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 - for Windows. For Pools created with a cloud service configuration, enable - ports 10100, 20100, and 30100. Also enable outbound connections to Azure - Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - :type subnet_id: str - :param dynamic_vnet_assignment_scope: Possible values include: 'none', - 'job' - :type dynamic_vnet_assignment_scope: str or - ~azure.batch.models.DynamicVNetAssignmentScope - :param endpoint_configuration: The configuration for endpoints on Compute - Nodes in the Batch Pool. Pool endpoint configuration is only supported on - Pools with the virtualMachineConfiguration property. - :type endpoint_configuration: - ~azure.batch.models.PoolEndpointConfiguration - :param public_ip_address_configuration: The Public IPAddress configuration - for Compute Nodes in the Batch Pool. Public IP configuration property is - only supported on Pools with the virtualMachineConfiguration property. - :type public_ip_address_configuration: - ~azure.batch.models.PublicIPAddressConfiguration - :param enable_accelerated_networking: Whether this pool should enable - accelerated networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - :type enable_accelerated_networking: bool - """ - - _attribute_map = { - 'subnet_id': {'key': 'subnetId', 'type': 'str'}, - 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, - 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, - 'public_ip_address_configuration': {'key': 'publicIPAddressConfiguration', 'type': 'PublicIPAddressConfiguration'}, - 'enable_accelerated_networking': {'key': 'enableAcceleratedNetworking', 'type': 'bool'}, - } - - def __init__(self, *, subnet_id: str=None, dynamic_vnet_assignment_scope=None, endpoint_configuration=None, public_ip_address_configuration=None, enable_accelerated_networking: bool=None, **kwargs) -> None: - super(NetworkConfiguration, self).__init__(**kwargs) - self.subnet_id = subnet_id - self.dynamic_vnet_assignment_scope = dynamic_vnet_assignment_scope - self.endpoint_configuration = endpoint_configuration - self.public_ip_address_configuration = public_ip_address_configuration - self.enable_accelerated_networking = enable_accelerated_networking - - -class NetworkSecurityGroupRule(Model): - """A network security group rule to apply to an inbound endpoint. - - All required parameters must be populated in order to send to Azure. - - :param priority: Required. The priority for this rule. Priorities within a - Pool must be unique and are evaluated in order of priority. The lower the - number the higher the priority. For example, rules could be specified with - order numbers of 150, 250, and 350. The rule with the order number of 150 - takes precedence over the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved or duplicate values are - provided the request fails with HTTP status code 400. - :type priority: int - :param access: Required. Possible values include: 'allow', 'deny' - :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess - :param source_address_prefix: Required. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, - or * (for all addresses). If any other values are provided the request - fails with HTTP status code 400. - :type source_address_prefix: str - :param source_port_ranges: Valid values are '*' (for all ports 0 - 65535), - a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must - be in the range of 0 to 65535. Each entry in this collection must not - overlap any other entry (either a range or an individual port). If any - other values are provided the request fails with HTTP status code 400. The - default value is '*'. - :type source_port_ranges: list[str] - """ - - _validation = { - 'priority': {'required': True}, - 'access': {'required': True}, - 'source_address_prefix': {'required': True}, - } - - _attribute_map = { - 'priority': {'key': 'priority', 'type': 'int'}, - 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, - 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, - 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, - } - - def __init__(self, *, priority: int, access, source_address_prefix: str, source_port_ranges=None, **kwargs) -> None: - super(NetworkSecurityGroupRule, self).__init__(**kwargs) - self.priority = priority - self.access = access - self.source_address_prefix = source_address_prefix - self.source_port_ranges = source_port_ranges - - -class NFSMountConfiguration(Model): - """Information used to connect to an NFS file system. - - All required parameters must be populated in order to send to Azure. - - :param source: Required. - :type source: str - :param relative_mount_path: Required. All file systems are mounted - relative to the Batch mounts directory, accessible via the - AZ_BATCH_NODE_MOUNTS_DIR environment variable. - :type relative_mount_path: str - :param mount_options: These are 'net use' options in Windows and 'mount' - options in Linux. - :type mount_options: str - """ - - _validation = { - 'source': {'required': True}, - 'relative_mount_path': {'required': True}, - } - - _attribute_map = { - 'source': {'key': 'source', 'type': 'str'}, - 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, - 'mount_options': {'key': 'mountOptions', 'type': 'str'}, - } - - def __init__(self, *, source: str, relative_mount_path: str, mount_options: str=None, **kwargs) -> None: - super(NFSMountConfiguration, self).__init__(**kwargs) - self.source = source - self.relative_mount_path = relative_mount_path - self.mount_options = mount_options - - -class NodeAgentInformation(Model): - """Information about the Compute Node agent. - - The Batch Compute Node agent is a program that runs on each Compute Node in - the Pool and provides Batch capability on the Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. This version number can be checked against the - Compute Node agent release notes located at - https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - :type version: str - :param last_update_time: Required. This is the most recent time that the - Compute Node agent was updated to a new version. - :type last_update_time: datetime - """ - - _validation = { - 'version': {'required': True}, - 'last_update_time': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'version', 'type': 'str'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - } - - def __init__(self, *, version: str, last_update_time, **kwargs) -> None: - super(NodeAgentInformation, self).__init__(**kwargs) - self.version = version - self.last_update_time = last_update_time - - -class NodeCounts(Model): - """The number of Compute Nodes in each Compute Node state. - - All required parameters must be populated in order to send to Azure. - - :param creating: Required. The number of Compute Nodes in the creating - state. - :type creating: int - :param idle: Required. The number of Compute Nodes in the idle state. - :type idle: int - :param offline: Required. The number of Compute Nodes in the offline - state. - :type offline: int - :param preempted: Required. The number of Compute Nodes in the preempted - state. - :type preempted: int - :param rebooting: Required. The count of Compute Nodes in the rebooting - state. - :type rebooting: int - :param reimaging: Required. The number of Compute Nodes in the reimaging - state. - :type reimaging: int - :param running: Required. The number of Compute Nodes in the running - state. - :type running: int - :param starting: Required. The number of Compute Nodes in the starting - state. - :type starting: int - :param start_task_failed: Required. The number of Compute Nodes in the - startTaskFailed state. - :type start_task_failed: int - :param leaving_pool: Required. The number of Compute Nodes in the - leavingPool state. - :type leaving_pool: int - :param unknown: Required. The number of Compute Nodes in the unknown - state. - :type unknown: int - :param unusable: Required. The number of Compute Nodes in the unusable - state. - :type unusable: int - :param waiting_for_start_task: Required. The number of Compute Nodes in - the waitingForStartTask state. - :type waiting_for_start_task: int - :param upgrading_os: Required. The number of Compute Nodes in the - upgradingOS state. - :type upgrading_os: int - :param total: Required. The total number of Compute Nodes. - :type total: int - """ - - _validation = { - 'creating': {'required': True}, - 'idle': {'required': True}, - 'offline': {'required': True}, - 'preempted': {'required': True}, - 'rebooting': {'required': True}, - 'reimaging': {'required': True}, - 'running': {'required': True}, - 'starting': {'required': True}, - 'start_task_failed': {'required': True}, - 'leaving_pool': {'required': True}, - 'unknown': {'required': True}, - 'unusable': {'required': True}, - 'waiting_for_start_task': {'required': True}, - 'upgrading_os': {'required': True}, - 'total': {'required': True}, - } - - _attribute_map = { - 'creating': {'key': 'creating', 'type': 'int'}, - 'idle': {'key': 'idle', 'type': 'int'}, - 'offline': {'key': 'offline', 'type': 'int'}, - 'preempted': {'key': 'preempted', 'type': 'int'}, - 'rebooting': {'key': 'rebooting', 'type': 'int'}, - 'reimaging': {'key': 'reimaging', 'type': 'int'}, - 'running': {'key': 'running', 'type': 'int'}, - 'starting': {'key': 'starting', 'type': 'int'}, - 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, - 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, - 'unknown': {'key': 'unknown', 'type': 'int'}, - 'unusable': {'key': 'unusable', 'type': 'int'}, - 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, - 'upgrading_os': {'key': 'upgradingOS', 'type': 'int'}, - 'total': {'key': 'total', 'type': 'int'}, - } - - def __init__(self, *, creating: int, idle: int, offline: int, preempted: int, rebooting: int, reimaging: int, running: int, starting: int, start_task_failed: int, leaving_pool: int, unknown: int, unusable: int, waiting_for_start_task: int, upgrading_os: int, total: int, **kwargs) -> None: - super(NodeCounts, self).__init__(**kwargs) - self.creating = creating - self.idle = idle - self.offline = offline - self.preempted = preempted - self.rebooting = rebooting - self.reimaging = reimaging - self.running = running - self.starting = starting - self.start_task_failed = start_task_failed - self.leaving_pool = leaving_pool - self.unknown = unknown - self.unusable = unusable - self.waiting_for_start_task = waiting_for_start_task - self.upgrading_os = upgrading_os - self.total = total - - -class NodeDisableSchedulingParameter(Model): - """Options for disabling scheduling on a Compute Node. - - :param node_disable_scheduling_option: The default value is requeue. - Possible values include: 'requeue', 'terminate', 'taskCompletion' - :type node_disable_scheduling_option: str or - ~azure.batch.models.DisableComputeNodeSchedulingOption - """ - - _attribute_map = { - 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, - } - - def __init__(self, *, node_disable_scheduling_option=None, **kwargs) -> None: - super(NodeDisableSchedulingParameter, self).__init__(**kwargs) - self.node_disable_scheduling_option = node_disable_scheduling_option - - -class NodeFile(Model): - """Information about a file or directory on a Compute Node. - - :param name: - :type name: str - :param url: - :type url: str - :param is_directory: Whether the object represents a directory. - :type is_directory: bool - :param properties: The file properties. - :type properties: ~azure.batch.models.FileProperties - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'properties': {'key': 'properties', 'type': 'FileProperties'}, - } - - def __init__(self, *, name: str=None, url: str=None, is_directory: bool=None, properties=None, **kwargs) -> None: - super(NodeFile, self).__init__(**kwargs) - self.name = name - self.url = url - self.is_directory = is_directory - self.properties = properties - - -class NodePlacementConfiguration(Model): - """Node placement configuration for a pool. - - For regional placement, nodes in the pool will be allocated in the same - region. For zonal placement, nodes in the pool will be spread across - different zones with best effort balancing. - - :param policy: Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Possible values include: 'regional', - 'zonal' - :type policy: str or ~azure.batch.models.NodePlacementPolicyType - """ - - _attribute_map = { - 'policy': {'key': 'policy', 'type': 'NodePlacementPolicyType'}, - } - - def __init__(self, *, policy=None, **kwargs) -> None: - super(NodePlacementConfiguration, self).__init__(**kwargs) - self.policy = policy - - -class NodeRebootParameter(Model): - """Options for rebooting a Compute Node. - - :param node_reboot_option: The default value is requeue. Possible values - include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' - :type node_reboot_option: str or - ~azure.batch.models.ComputeNodeRebootOption - """ - - _attribute_map = { - 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, - } - - def __init__(self, *, node_reboot_option=None, **kwargs) -> None: - super(NodeRebootParameter, self).__init__(**kwargs) - self.node_reboot_option = node_reboot_option - - -class NodeReimageParameter(Model): - """Options for reimaging a Compute Node. - - :param node_reimage_option: The default value is requeue. Possible values - include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' - :type node_reimage_option: str or - ~azure.batch.models.ComputeNodeReimageOption - """ - - _attribute_map = { - 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, - } - - def __init__(self, *, node_reimage_option=None, **kwargs) -> None: - super(NodeReimageParameter, self).__init__(**kwargs) - self.node_reimage_option = node_reimage_option - - -class NodeRemoveParameter(Model): - """Options for removing Compute Nodes from a Pool. - - All required parameters must be populated in order to send to Azure. - - :param node_list: Required. A maximum of 100 nodes may be removed per - request. - :type node_list: list[str] - :param resize_timeout: The default value is 15 minutes. The minimum value - is 5 minutes. If you specify a value less than 5 minutes, the Batch - service returns an error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param node_deallocation_option: Determines what to do with a Compute Node - and its running task(s) after it has been selected for deallocation. The - default value is requeue. Possible values include: 'requeue', 'terminate', - 'taskCompletion', 'retainedData' - :type node_deallocation_option: str or - ~azure.batch.models.ComputeNodeDeallocationOption - """ - - _validation = { - 'node_list': {'required': True}, - } - - _attribute_map = { - 'node_list': {'key': 'nodeList', 'type': '[str]'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, - } - - def __init__(self, *, node_list, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: - super(NodeRemoveParameter, self).__init__(**kwargs) - self.node_list = node_list - self.resize_timeout = resize_timeout - self.node_deallocation_option = node_deallocation_option - - -class NodeUpdateUserParameter(Model): - """The set of changes to be made to a user Account on a Compute Node. - - :param password: The password is required for Windows Compute Nodes (those - created with 'cloudServiceConfiguration', or created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux - Compute Nodes, the password can optionally be specified along with the - sshPublicKey property. If omitted, any existing password is removed. - :type password: str - :param expiry_time: If omitted, the default is 1 day from the current - time. For Linux Compute Nodes, the expiryTime has a precision up to a day. - :type expiry_time: datetime - :param ssh_public_key: The public key should be compatible with OpenSSH - encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute - Node, then the Batch service rejects the request; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). If omitted, - any existing SSH public key is removed. - :type ssh_public_key: str - """ - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, - 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, - } - - def __init__(self, *, password: str=None, expiry_time=None, ssh_public_key: str=None, **kwargs) -> None: - super(NodeUpdateUserParameter, self).__init__(**kwargs) - self.password = password - self.expiry_time = expiry_time - self.ssh_public_key = ssh_public_key - - -class NodeVMExtension(Model): - """The configuration for virtual machine extension instance view. - - :param provisioning_state: - :type provisioning_state: str - :param vm_extension: The virtual machine extension. - :type vm_extension: ~azure.batch.models.VMExtension - :param instance_view: The vm extension instance view. - :type instance_view: ~azure.batch.models.VMExtensionInstanceView - """ - - _attribute_map = { - 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, - 'vm_extension': {'key': 'vmExtension', 'type': 'VMExtension'}, - 'instance_view': {'key': 'instanceView', 'type': 'VMExtensionInstanceView'}, - } - - def __init__(self, *, provisioning_state: str=None, vm_extension=None, instance_view=None, **kwargs) -> None: - super(NodeVMExtension, self).__init__(**kwargs) - self.provisioning_state = provisioning_state - self.vm_extension = vm_extension - self.instance_view = instance_view - - -class OSDisk(Model): - """Settings for the operating system disk of the compute node (VM). - - :param ephemeral_os_disk_settings: Specifies the ephemeral Disk Settings - for the operating system disk used by the compute node (VM). - :type ephemeral_os_disk_settings: ~azure.batch.models.DiffDiskSettings - :param caching: Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for Standard - storage. ReadOnly for Premium storage. Possible values include: 'none', - 'readOnly', 'readWrite' - :type caching: str or ~azure.batch.models.CachingType - :param managed_disk: The managed disk parameters. - :type managed_disk: ~azure.batch.models.ManagedDisk - :param disk_size_gb: The initial disk size in GB when creating new OS - disk. - :type disk_size_gb: int - :param write_accelerator_enabled: Specifies whether writeAccelerator - should be enabled or disabled on the disk. - :type write_accelerator_enabled: bool - """ - - _attribute_map = { - 'ephemeral_os_disk_settings': {'key': 'ephemeralOSDiskSettings', 'type': 'DiffDiskSettings'}, - 'caching': {'key': 'caching', 'type': 'CachingType'}, - 'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDisk'}, - 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, - 'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'}, - } - - def __init__(self, *, ephemeral_os_disk_settings=None, caching=None, managed_disk=None, disk_size_gb: int=None, write_accelerator_enabled: bool=None, **kwargs) -> None: - super(OSDisk, self).__init__(**kwargs) - self.ephemeral_os_disk_settings = ephemeral_os_disk_settings - self.caching = caching - self.managed_disk = managed_disk - self.disk_size_gb = disk_size_gb - self.write_accelerator_enabled = write_accelerator_enabled - - -class OutputFile(Model): - """A specification for uploading files from an Azure Batch Compute Node to - another location after the Batch service has finished executing the Task - process. - - On every file uploads, Batch service writes two log files to the compute - node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used - to learn more about a specific failure. - - All required parameters must be populated in order to send to Azure. - - :param file_pattern: Required. Both relative and absolute paths are - supported. Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters (for - example pattern abc* would match abc or abcdef), ** matches any directory, - ? matches any single character, [abc] matches one character in the - brackets, and [a-c] matches one character in the range. Brackets can - include a negation to match any character not specified (for example - [!abc] matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif will). A - simple example: **\\*.txt matches any file that does not start in '.' and - ends with .txt in the Task working directory or any subdirectory. If the - filename contains a wildcard character it can be escaped using brackets - (for example abc[*] would match a file named abc*). Note that both \\ and - / are treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are expanded - prior to the pattern being applied. - :type file_pattern: str - :param destination: Required. The destination for the output file(s). - :type destination: ~azure.batch.models.OutputFileDestination - :param upload_options: Required. Additional options for the upload - operation, including under what conditions to perform the upload. - :type upload_options: ~azure.batch.models.OutputFileUploadOptions - """ - - _validation = { - 'file_pattern': {'required': True}, - 'destination': {'required': True}, - 'upload_options': {'required': True}, - } - - _attribute_map = { - 'file_pattern': {'key': 'filePattern', 'type': 'str'}, - 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, - 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, - } - - def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None: - super(OutputFile, self).__init__(**kwargs) - self.file_pattern = file_pattern - self.destination = destination - self.upload_options = upload_options - - -class OutputFileBlobContainerDestination(Model): - """Specifies a file upload destination within an Azure blob storage container. - - All required parameters must be populated in order to send to Azure. - - :param path: If filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload that - file. If filePattern contains one or more wildcards (and therefore may - match multiple files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the file(s). If - omitted, file(s) are uploaded to the root of the container with a blob - name matching their file name. - :type path: str - :param container_url: Required. If not using a managed identity, the URL - must include a Shared Access Signature (SAS) granting write permissions to - the container. - :type container_url: str - :param identity_reference: The reference to the user assigned identity to - use to access Azure Blob Storage specified by containerUrl. The identity - must have write access to the Azure Blob Storage container - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - :param upload_headers: These headers will be specified when uploading - files to Azure Storage. Official document on allowed headers when - uploading blobs: - https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types - :type upload_headers: list[~azure.batch.models.HttpHeader] - """ - - _validation = { - 'container_url': {'required': True}, - } - - _attribute_map = { - 'path': {'key': 'path', 'type': 'str'}, - 'container_url': {'key': 'containerUrl', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - 'upload_headers': {'key': 'uploadHeaders', 'type': '[HttpHeader]'}, - } - - def __init__(self, *, container_url: str, path: str=None, identity_reference=None, upload_headers=None, **kwargs) -> None: - super(OutputFileBlobContainerDestination, self).__init__(**kwargs) - self.path = path - self.container_url = container_url - self.identity_reference = identity_reference - self.upload_headers = upload_headers - - -class OutputFileDestination(Model): - """The destination to which a file should be uploaded. - - :param container: A location in Azure blob storage to which files are - uploaded. - :type container: ~azure.batch.models.OutputFileBlobContainerDestination - """ - - _attribute_map = { - 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, - } - - def __init__(self, *, container=None, **kwargs) -> None: - super(OutputFileDestination, self).__init__(**kwargs) - self.container = container - - -class OutputFileUploadOptions(Model): - """Details about an output file upload operation, including under what - conditions to perform the upload. - - All required parameters must be populated in order to send to Azure. - - :param upload_condition: Required. The conditions under which the Task - output file or set of files should be uploaded. The default is - taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', - 'taskCompletion' - :type upload_condition: str or - ~azure.batch.models.OutputFileUploadCondition - """ - - _validation = { - 'upload_condition': {'required': True}, - } - - _attribute_map = { - 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, - } - - def __init__(self, *, upload_condition, **kwargs) -> None: - super(OutputFileUploadOptions, self).__init__(**kwargs) - self.upload_condition = upload_condition - - -class PoolAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(PoolAddOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class PoolAddParameter(Model): - """A Pool in the Azure Batch service to add. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two Pool IDs within an Account that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param vm_size: Required. For information about available sizes of virtual - machines for Cloud Services Pools (pools created with - cloudServiceConfiguration), see Sizes for Cloud Services - (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). - Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and - A2V2. For information about available VM sizes for Pools using Images from - the Virtual Machines Marketplace (pools created with - virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) - (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) - or Sizes for Virtual Machines (Windows) - (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). - Batch supports all Azure VM sizes except STANDARD_A0 and those with - premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). - :type vm_size: str - :param cloud_service_configuration: The cloud service configuration for - the Pool. This property and virtualMachineConfiguration are mutually - exclusive and one of the properties must be specified. This property - cannot be specified if the Batch Account was created with its - poolAllocationMode property set to 'UserSubscription'. - :type cloud_service_configuration: - ~azure.batch.models.CloudServiceConfiguration - :param virtual_machine_configuration: The virtual machine configuration - for the Pool. This property and cloudServiceConfiguration are mutually - exclusive and one of the properties must be specified. - :type virtual_machine_configuration: - ~azure.batch.models.VirtualMachineConfiguration - :param resize_timeout: This timeout applies only to manual scaling; it has - no effect when enableAutoScale is set to true. The default value is 15 - minutes. The minimum value is 5 minutes. If you specify a value less than - 5 minutes, the Batch service returns an error; if you are calling the REST - API directly, the HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. This property must not be specified if enableAutoScale - is set to true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. This property must not be specified if - enableAutoScale is set to true. If enableAutoScale is set to false, then - you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_low_priority_nodes: int - :param enable_auto_scale: Whether the Pool size should automatically - adjust over time. If false, at least one of targetDedicatedNodes and - targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the - formula. The default value is false. - :type enable_auto_scale: bool - :param auto_scale_formula: This property must not be specified if - enableAutoScale is set to false. It is required if enableAutoScale is set - to true. The formula is checked for validity before the Pool is created. - If the formula is not valid, the Batch service rejects the request with - detailed error information. For more information about specifying this - formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' - (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 minutes. - The minimum and maximum value are 5 minutes and 168 hours respectively. If - you specify a value less than 5 minutes or greater than 168 hours, the - Batch service returns an error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). - :type auto_scale_evaluation_interval: timedelta - :param enable_inter_node_communication: Whether the Pool permits direct - communication between Compute Nodes. Enabling inter-node communication - limits the maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not reaching its - desired size. The default value is false. - :type enable_inter_node_communication: bool - :param network_configuration: The network configuration for the Pool. - :type network_configuration: ~azure.batch.models.NetworkConfiguration - :param start_task: A Task specified to run on each Compute Node as it - joins the Pool. The Task runs when the Compute Node is added to the Pool - or when the Compute Node is restarted. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Changes to Package references affect all new Nodes joining the Pool, but - do not affect Compute Nodes that are already in the Pool until they are - rebooted or reimaged. There is a maximum of 10 Package references on any - given Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses must be a - subset of available Batch service application licenses. If a license is - requested which is not supported, Pool creation will fail. - :type application_licenses: list[str] - :param task_slots_per_node: The number of task slots that can be used to - run concurrent tasks on a single compute node in the pool. The default - value is 1. The maximum value is the smaller of 4 times the number of - cores of the vmSize of the pool or 256. - :type task_slots_per_node: int - :param task_scheduling_policy: How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. - :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy - :param user_accounts: - :type user_accounts: list[~azure.batch.models.UserAccount] - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param mount_configuration: Mount the storage using Azure fileshare, NFS, - CIFS or Blobfuse based file system. - :type mount_configuration: list[~azure.batch.models.MountConfiguration] - :param target_node_communication_mode: The desired node communication mode - for the pool. If omitted, the default value is Default. Possible values - include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :param upgrade_policy: The upgrade policy for the Pool. Describes an - upgrade policy - automatic, manual, or rolling. - :type upgrade_policy: ~azure.batch.models.UpgradePolicy - :param resource_tags: The user-defined tags to be associated with the - Azure Batch Pool. When specified, these tags are propagated to the backing - Azure resources associated with the pool. This property can only be - specified when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - :type resource_tags: dict[str, str] - """ - - _validation = { - 'id': {'required': True}, - 'vm_size': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, - 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, - 'task_slots_per_node': {'key': 'taskSlotsPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'upgrade_policy': {'key': 'upgradePolicy', 'type': 'UpgradePolicy'}, - 'resource_tags': {'key': 'resourceTags', 'type': '{str}'}, - } - - def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, task_slots_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, mount_configuration=None, target_node_communication_mode=None, upgrade_policy=None, resource_tags=None, **kwargs) -> None: - super(PoolAddParameter, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.vm_size = vm_size - self.cloud_service_configuration = cloud_service_configuration - self.virtual_machine_configuration = virtual_machine_configuration - self.resize_timeout = resize_timeout - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.enable_auto_scale = enable_auto_scale - self.auto_scale_formula = auto_scale_formula - self.auto_scale_evaluation_interval = auto_scale_evaluation_interval - self.enable_inter_node_communication = enable_inter_node_communication - self.network_configuration = network_configuration - self.start_task = start_task - self.certificate_references = certificate_references - self.application_package_references = application_package_references - self.application_licenses = application_licenses - self.task_slots_per_node = task_slots_per_node - self.task_scheduling_policy = task_scheduling_policy - self.user_accounts = user_accounts - self.metadata = metadata - self.mount_configuration = mount_configuration - self.target_node_communication_mode = target_node_communication_mode - self.upgrade_policy = upgrade_policy - self.resource_tags = resource_tags - - -class PoolDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolDeleteOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolDisableAutoScaleOptions(Model): - """Additional parameters for disable_auto_scale operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class PoolEnableAutoScaleOptions(Model): - """Additional parameters for enable_auto_scale operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolEnableAutoScaleParameter(Model): - """Options for enabling automatic scaling on a Pool. - - :param auto_scale_formula: The formula is checked for validity before it - is applied to the Pool. If the formula is not valid, the Batch service - rejects the request with detailed error information. For more information - about specifying this formula, see Automatically scale Compute Nodes in an - Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 minutes. - The minimum and maximum value are 5 minutes and 168 hours respectively. If - you specify a value less than 5 minutes or greater than 168 hours, the - Batch service rejects the request with an invalid property value error; if - you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). If you specify a new interval, then the existing autoscale - evaluation schedule will be stopped and a new autoscale evaluation - schedule will be started, with its starting time being the time when this - request was issued. - :type auto_scale_evaluation_interval: timedelta - """ - - _attribute_map = { - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - } - - def __init__(self, *, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, **kwargs) -> None: - super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) - self.auto_scale_formula = auto_scale_formula - self.auto_scale_evaluation_interval = auto_scale_evaluation_interval - - -class PoolEndpointConfiguration(Model): - """The endpoint configuration for a Pool. - - All required parameters must be populated in order to send to Azure. - - :param inbound_nat_pools: Required. The maximum number of inbound NAT - Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is - exceeded the request fails with HTTP status code 400. This cannot be - specified if the IPAddressProvisioningType is NoPublicIPAddresses. - :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] - """ - - _validation = { - 'inbound_nat_pools': {'required': True}, - } - - _attribute_map = { - 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, - } - - def __init__(self, *, inbound_nat_pools, **kwargs) -> None: - super(PoolEndpointConfiguration, self).__init__(**kwargs) - self.inbound_nat_pools = inbound_nat_pools - - -class PoolEvaluateAutoScaleOptions(Model): - """Additional parameters for evaluate_auto_scale operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class PoolEvaluateAutoScaleParameter(Model): - """Options for evaluating an automatic scaling formula on a Pool. - - All required parameters must be populated in order to send to Azure. - - :param auto_scale_formula: Required. The formula is validated and its - results calculated, but it is not applied to the Pool. To apply the - formula to the Pool, 'Enable automatic scaling on a Pool'. For more - information about specifying this formula, see Automatically scale Compute - Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - :type auto_scale_formula: str - """ - - _validation = { - 'auto_scale_formula': {'required': True}, - } - - _attribute_map = { - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - } - - def __init__(self, *, auto_scale_formula: str, **kwargs) -> None: - super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) - self.auto_scale_formula = auto_scale_formula - - -class PoolExistsOptions(Model): - """Additional parameters for exists operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolExistsOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolGetOptions, self).__init__(**kwargs) - self.select = select - self.expand = expand - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolInformation(Model): - """Specifies how a Job should be assigned to a Pool. - - :param pool_id: You must ensure that the Pool referenced by this property - exists. If the Pool does not exist at the time the Batch service tries to - schedule a Job, no Tasks for the Job will run until you create a Pool with - that id. Note that the Batch service will not reject the Job request; it - will simply not run Tasks until the Pool exists. You must specify either - the Pool ID or the auto Pool specification, but not both. - :type pool_id: str - :param auto_pool_specification: Characteristics for a temporary 'auto - pool'. The Batch service will create this auto Pool when the Job is - submitted. If auto Pool creation fails, the Batch service moves the Job to - a completed state, and the Pool creation error is set in the Job's - scheduling error property. The Batch service manages the lifetime (both - creation and, unless keepAlive is specified, deletion) of the auto Pool. - Any user actions that affect the lifetime of the auto Pool while the Job - is active will result in unexpected behavior. You must specify either the - Pool ID or the auto Pool specification, but not both. - :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification - """ - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, - } - - def __init__(self, *, pool_id: str=None, auto_pool_specification=None, **kwargs) -> None: - super(PoolInformation, self).__init__(**kwargs) - self.pool_id = pool_id - self.auto_pool_specification = auto_pool_specification - - -class PoolListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Pools can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(PoolListOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.expand = expand - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class PoolListUsageMetricsOptions(Model): - """Additional parameters for list_usage_metrics operation. - - :param start_time: The earliest time from which to include metrics. This - must be at least two and a half hours before the current time. If not - specified this defaults to the start time of the last aggregation interval - currently available. - :type start_time: datetime - :param end_time: The latest time from which to include metrics. This must - be at least two hours before the current time. If not specified this - defaults to the end time of the last aggregation interval currently - available. - :type end_time: datetime - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. - :type filter: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 results will be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'start_time': {'key': '', 'type': 'iso-8601'}, - 'end_time': {'key': '', 'type': 'iso-8601'}, - 'filter': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, start_time=None, end_time=None, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(PoolListUsageMetricsOptions, self).__init__(**kwargs) - self.start_time = start_time - self.end_time = end_time - self.filter = filter - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class PoolNodeCounts(Model): - """The number of Compute Nodes in each state for a Pool. - - All required parameters must be populated in order to send to Azure. - - :param pool_id: Required. - :type pool_id: str - :param dedicated: The number of dedicated Compute Nodes in each state. - :type dedicated: ~azure.batch.models.NodeCounts - :param low_priority: The number of Spot/Low-priority Compute Nodes in each - state. - :type low_priority: ~azure.batch.models.NodeCounts - """ - - _validation = { - 'pool_id': {'required': True}, - } - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, - 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, - } - - def __init__(self, *, pool_id: str, dedicated=None, low_priority=None, **kwargs) -> None: - super(PoolNodeCounts, self).__init__(**kwargs) - self.pool_id = pool_id - self.dedicated = dedicated - self.low_priority = low_priority - - -class PoolPatchOptions(Model): - """Additional parameters for patch operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolPatchOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolPatchParameter(Model): - """The set of changes to be made to a Pool. - - :param start_task: A Task to run on each Compute Node as it joins the - Pool. The Task runs when the Compute Node is added to the Pool or when the - Compute Node is restarted. If this element is present, it overwrites any - existing StartTask. If omitted, any existing StartTask is left unchanged. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: If this element is present, it replaces any - existing Certificate references configured on the Pool. If omitted, any - existing Certificate references are left unchanged. For Windows Nodes, the - Batch service installs the Certificates to the specified Certificate store - and location. For Linux Compute Nodes, the Certificates are stored in a - directory inside the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: Changes to Package references - affect all new Nodes joining the Pool, but do not affect Compute Nodes - that are already in the Pool until they are rebooted or reimaged. If this - element is present, it replaces any existing Package references. If you - specify an empty collection, then all Package references are removed from - the Pool. If omitted, any existing Package references are left unchanged. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param metadata: If this element is present, it replaces any existing - metadata configured on the Pool. If you specify an empty collection, any - metadata is removed from the Pool. If omitted, any existing metadata is - left unchanged. - :type metadata: list[~azure.batch.models.MetadataItem] - :param target_node_communication_mode: The desired node communication mode - for the pool. If this element is present, it replaces the existing - targetNodeCommunicationMode configured on the Pool. If omitted, any - existing metadata is left unchanged. Possible values include: 'default', - 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - """ - - _attribute_map = { - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - } - - def __init__(self, *, start_task=None, certificate_references=None, application_package_references=None, metadata=None, target_node_communication_mode=None, **kwargs) -> None: - super(PoolPatchParameter, self).__init__(**kwargs) - self.start_task = start_task - self.certificate_references = certificate_references - self.application_package_references = application_package_references - self.metadata = metadata - self.target_node_communication_mode = target_node_communication_mode - - -class PoolRemoveNodesOptions(Model): - """Additional parameters for remove_nodes operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolRemoveNodesOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolResizeOptions(Model): - """Additional parameters for resize operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolResizeOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolResizeParameter(Model): - """Options for changing the size of a Pool. - - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. - :type target_low_priority_nodes: int - :param resize_timeout: The default value is 15 minutes. The minimum value - is 5 minutes. If you specify a value less than 5 minutes, the Batch - service returns an error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param node_deallocation_option: Determines what to do with a Compute Node - and its running task(s) if the Pool size is decreasing. The default value - is requeue. Possible values include: 'requeue', 'terminate', - 'taskCompletion', 'retainedData' - :type node_deallocation_option: str or - ~azure.batch.models.ComputeNodeDeallocationOption - """ - - _attribute_map = { - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, - } - - def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: - super(PoolResizeParameter, self).__init__(**kwargs) - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.resize_timeout = resize_timeout - self.node_deallocation_option = node_deallocation_option - - -class PoolSpecification(Model): - """Specification for creating a new Pool. - - All required parameters must be populated in order to send to Azure. - - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param vm_size: Required. For information about available sizes of virtual - machines in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param cloud_service_configuration: The cloud service configuration for - the Pool. This property must be specified if the Pool needs to be created - with Azure PaaS VMs. This property and virtualMachineConfiguration are - mutually exclusive and one of the properties must be specified. If neither - is specified then the Batch service returns an error; if you are calling - the REST API directly, the HTTP status code is 400 (Bad Request). This - property cannot be specified if the Batch Account was created with its - poolAllocationMode property set to 'UserSubscription'. - :type cloud_service_configuration: - ~azure.batch.models.CloudServiceConfiguration - :param virtual_machine_configuration: The virtual machine configuration - for the Pool. This property must be specified if the Pool needs to be - created with Azure IaaS VMs. This property and cloudServiceConfiguration - are mutually exclusive and one of the properties must be specified. If - neither is specified then the Batch service returns an error; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - :type virtual_machine_configuration: - ~azure.batch.models.VirtualMachineConfiguration - :param task_slots_per_node: The number of task slots that can be used to - run concurrent tasks on a single compute node in the pool. The default - value is 1. The maximum value is the smaller of 4 times the number of - cores of the vmSize of the pool or 256. - :type task_slots_per_node: int - :param task_scheduling_policy: How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. - :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy - :param resize_timeout: This timeout applies only to manual scaling; it has - no effect when enableAutoScale is set to true. The default value is 15 - minutes. The minimum value is 5 minutes. If you specify a value less than - 5 minutes, the Batch service rejects the request with an error; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param target_dedicated_nodes: The desired number of dedicated Compute - Nodes in the Pool. This property must not be specified if enableAutoScale - is set to true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of Spot/Low-priority - Compute Nodes in the Pool. This property must not be specified if - enableAutoScale is set to true. If enableAutoScale is set to false, then - you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - :type target_low_priority_nodes: int - :param enable_auto_scale: Whether the Pool size should automatically - adjust over time. If false, at least one of targetDedicatedNodes and - targetLowPriorityNodes must be specified. If true, the autoScaleFormula - element is required. The Pool automatically resizes according to the - formula. The default value is false. - :type enable_auto_scale: bool - :param auto_scale_formula: This property must not be specified if - enableAutoScale is set to false. It is required if enableAutoScale is set - to true. The formula is checked for validity before the Pool is created. - If the formula is not valid, the Batch service rejects the request with - detailed error information. - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 minutes. - The minimum and maximum value are 5 minutes and 168 hours respectively. If - you specify a value less than 5 minutes or greater than 168 hours, the - Batch service rejects the request with an invalid property value error; if - you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - :type auto_scale_evaluation_interval: timedelta - :param enable_inter_node_communication: Whether the Pool permits direct - communication between Compute Nodes. Enabling inter-node communication - limits the maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not reaching its - desired size. The default value is false. - :type enable_inter_node_communication: bool - :param network_configuration: The network configuration for the Pool. - :type network_configuration: ~azure.batch.models.NetworkConfiguration - :param start_task: A Task to run on each Compute Node as it joins the - Pool. The Task runs when the Compute Node is added to the Pool or when the - Compute Node is restarted. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: For Windows Nodes, the Batch service - installs the Certificates to the specified Certificate store and location. - For Linux Compute Nodes, the Certificates are stored in a directory inside - the Task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - location. For Certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and Certificates are placed in that directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Changes to Package references affect all new Nodes joining the Pool, but - do not affect Compute Nodes that are already in the Pool until they are - rebooted or reimaged. There is a maximum of 10 Package references on any - given Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses must be a - subset of available Batch service application licenses. If a license is - requested which is not supported, Pool creation will fail. The permitted - licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An - additional charge applies for each application license added to the Pool. - :type application_licenses: list[str] - :param user_accounts: - :type user_accounts: list[~azure.batch.models.UserAccount] - :param metadata: The Batch service does not assign any meaning to - metadata; it is solely for the use of user code. - :type metadata: list[~azure.batch.models.MetadataItem] - :param mount_configuration: This supports Azure Files, NFS, CIFS/SMB, and - Blobfuse. - :type mount_configuration: list[~azure.batch.models.MountConfiguration] - :param target_node_communication_mode: The desired node communication mode - for the pool. If omitted, the default value is Default. Possible values - include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - :param upgrade_policy: The upgrade policy for the pool. - :type upgrade_policy: ~azure.batch.models.UpgradePolicy - :param resource_tags: The user-defined tags to be associated with the - Azure Batch Pool. When specified, these tags are propagated to the backing - Azure resources associated with the pool. This property can only be - specified when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - :type resource_tags: dict[str, str] - """ - - _validation = { - 'vm_size': {'required': True}, - } - - _attribute_map = { - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, - 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, - 'task_slots_per_node': {'key': 'taskSlotsPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, - 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, - 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, - 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, - 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, - 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - 'upgrade_policy': {'key': 'upgradePolicy', 'type': 'UpgradePolicy'}, - 'resource_tags': {'key': 'resourceTags', 'type': '{str}'}, - } - - def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, task_slots_per_node: int=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, mount_configuration=None, target_node_communication_mode=None, upgrade_policy=None, resource_tags=None, **kwargs) -> None: - super(PoolSpecification, self).__init__(**kwargs) - self.display_name = display_name - self.vm_size = vm_size - self.cloud_service_configuration = cloud_service_configuration - self.virtual_machine_configuration = virtual_machine_configuration - self.task_slots_per_node = task_slots_per_node - self.task_scheduling_policy = task_scheduling_policy - self.resize_timeout = resize_timeout - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.enable_auto_scale = enable_auto_scale - self.auto_scale_formula = auto_scale_formula - self.auto_scale_evaluation_interval = auto_scale_evaluation_interval - self.enable_inter_node_communication = enable_inter_node_communication - self.network_configuration = network_configuration - self.start_task = start_task - self.certificate_references = certificate_references - self.application_package_references = application_package_references - self.application_licenses = application_licenses - self.user_accounts = user_accounts - self.metadata = metadata - self.mount_configuration = mount_configuration - self.target_node_communication_mode = target_node_communication_mode - self.upgrade_policy = upgrade_policy - self.resource_tags = resource_tags - - -class PoolStatistics(Model): - """Contains utilization and resource usage statistics for the lifetime of a - Pool. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param usage_stats: Statistics related to Pool usage, such as the amount - of core-time used. - :type usage_stats: ~azure.batch.models.UsageStatistics - :param resource_stats: Statistics related to resource consumption by - Compute Nodes in the Pool. - :type resource_stats: ~azure.batch.models.ResourceStatistics - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, - 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, - } - - def __init__(self, *, url: str, start_time, last_update_time, usage_stats=None, resource_stats=None, **kwargs) -> None: - super(PoolStatistics, self).__init__(**kwargs) - self.url = url - self.start_time = start_time - self.last_update_time = last_update_time - self.usage_stats = usage_stats - self.resource_stats = resource_stats - - -class PoolStopResizeOptions(Model): - """Additional parameters for stop_resize operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolStopResizeOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class PoolUpdatePropertiesOptions(Model): - """Additional parameters for update_properties operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class PoolUpdatePropertiesParameter(Model): - """The set of changes to be made to a Pool. - - All required parameters must be populated in order to send to Azure. - - :param start_task: A Task to run on each Compute Node as it joins the - Pool. The Task runs when the Compute Node is added to the Pool or when the - Compute Node is restarted. If this element is present, it overwrites any - existing StartTask. If omitted, any existing StartTask is removed from the - Pool. - :type start_task: ~azure.batch.models.StartTask - :param certificate_references: Required. This list replaces any existing - Certificate references configured on the Pool. If you specify an empty - collection, any existing Certificate references are removed from the Pool. - For Windows Nodes, the Batch service installs the Certificates to the - specified Certificate store and location. For Linux Compute Nodes, the - Certificates are stored in a directory inside the Task working directory - and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the - Task to query for this location. For Certificates with visibility of - 'remoteUser', a 'certs' directory is created in the user's home directory - (e.g., /home/{user-name}/certs) and Certificates are placed in that - directory. - Warning: This property is deprecated and will be removed after February, - 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - :type certificate_references: - list[~azure.batch.models.CertificateReference] - :param application_package_references: Required. The list replaces any - existing Application Package references on the Pool. Changes to - Application Package references affect all new Compute Nodes joining the - Pool, but do not affect Compute Nodes that are already in the Pool until - they are rebooted or reimaged. There is a maximum of 10 Application - Package references on any given Pool. If omitted, or if you specify an - empty collection, any existing Application Packages references are removed - from the Pool. A maximum of 10 references may be specified on a given - Pool. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param metadata: Required. This list replaces any existing metadata - configured on the Pool. If omitted, or if you specify an empty collection, - any existing metadata is removed from the Pool. - :type metadata: list[~azure.batch.models.MetadataItem] - :param target_node_communication_mode: The desired node communication mode - for the pool. This setting replaces any existing targetNodeCommunication - setting on the Pool. If omitted, the existing setting is default. Possible - values include: 'default', 'classic', 'simplified' - :type target_node_communication_mode: str or - ~azure.batch.models.NodeCommunicationMode - """ - - _validation = { - 'certificate_references': {'required': True}, - 'application_package_references': {'required': True}, - 'metadata': {'required': True}, - } - - _attribute_map = { - 'start_task': {'key': 'startTask', 'type': 'StartTask'}, - 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'target_node_communication_mode': {'key': 'targetNodeCommunicationMode', 'type': 'NodeCommunicationMode'}, - } - - def __init__(self, *, certificate_references, application_package_references, metadata, start_task=None, target_node_communication_mode=None, **kwargs) -> None: - super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) - self.start_task = start_task - self.certificate_references = certificate_references - self.application_package_references = application_package_references - self.metadata = metadata - self.target_node_communication_mode = target_node_communication_mode - - -class PoolUsageMetrics(Model): - """Usage metrics for a Pool across an aggregation interval. - - All required parameters must be populated in order to send to Azure. - - :param pool_id: Required. - :type pool_id: str - :param start_time: Required. - :type start_time: datetime - :param end_time: Required. - :type end_time: datetime - :param vm_size: Required. For information about available sizes of virtual - machines in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - :type vm_size: str - :param total_core_hours: Required. The total core hours used in the Pool - during this aggregation interval. - :type total_core_hours: float - """ - - _validation = { - 'pool_id': {'required': True}, - 'start_time': {'required': True}, - 'end_time': {'required': True}, - 'vm_size': {'required': True}, - 'total_core_hours': {'required': True}, - } - - _attribute_map = { - 'pool_id': {'key': 'poolId', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'vm_size': {'key': 'vmSize', 'type': 'str'}, - 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, - } - - def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, **kwargs) -> None: - super(PoolUsageMetrics, self).__init__(**kwargs) - self.pool_id = pool_id - self.start_time = start_time - self.end_time = end_time - self.vm_size = vm_size - self.total_core_hours = total_core_hours - - -class PublicIPAddressConfiguration(Model): - """The public IP Address configuration of the networking configuration of a - Pool. - - :param provision: The provisioning type for Public IP Addresses for the - Pool. The default value is BatchManaged. Possible values include: - 'batchManaged', 'userManaged', 'noPublicIPAddresses' - :type provision: str or ~azure.batch.models.IPAddressProvisioningType - :param ip_address_ids: The number of IPs specified here limits the maximum - size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can - be allocated for each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - :type ip_address_ids: list[str] - """ - - _attribute_map = { - 'provision': {'key': 'provision', 'type': 'IPAddressProvisioningType'}, - 'ip_address_ids': {'key': 'ipAddressIds', 'type': '[str]'}, - } - - def __init__(self, *, provision=None, ip_address_ids=None, **kwargs) -> None: - super(PublicIPAddressConfiguration, self).__init__(**kwargs) - self.provision = provision - self.ip_address_ids = ip_address_ids - - -class RecentJob(Model): - """Information about the most recent Job to run under the Job Schedule. - - :param id: - :type id: str - :param url: - :type url: str - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__(self, *, id: str=None, url: str=None, **kwargs) -> None: - super(RecentJob, self).__init__(**kwargs) - self.id = id - self.url = url - - -class ResizeError(Model): - """An error that occurred when resizing a Pool. - - :param code: - :type code: str - :param message: - :type message: str - :param values: - :type values: list[~azure.batch.models.NameValuePair] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[NameValuePair]'}, - } - - def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: - super(ResizeError, self).__init__(**kwargs) - self.code = code - self.message = message - self.values = values - - -class ResourceFile(Model): - """A single file or multiple files to be downloaded to a Compute Node. - - :param auto_storage_container_name: The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - :type auto_storage_container_name: str - :param storage_container_url: The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public access. - :type storage_container_url: str - :param http_url: The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be readable - from compute nodes. There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - :type http_url: str - :param blob_prefix: The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This prefix can - be a partial filename or a subdirectory. If a prefix is not specified, all - the files in the container will be downloaded. - :type blob_prefix: str - :param file_path: If the httpUrl property is specified, the filePath is - required and describes the path which the file will be downloaded to, - including the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is the - directory to download the files to. In the case where filePath is used as - a directory, any directory structure already associated with the input - data will be retained in full and appended to the specified filePath - directory. The specified relative path cannot break out of the Task's - working directory (for example by using '..'). - :type file_path: str - :param file_mode: This property applies only to files being downloaded to - Linux Compute Nodes. It will be ignored if it is specified for a - resourceFile which will be downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, then a default value - of 0770 is applied to the file. - :type file_mode: str - :param identity_reference: The reference to the user assigned identity to - use to access Azure Blob Storage specified by storageContainerUrl or - httpUrl. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _attribute_map = { - 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, - 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, - 'http_url': {'key': 'httpUrl', 'type': 'str'}, - 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, - 'file_path': {'key': 'filePath', 'type': 'str'}, - 'file_mode': {'key': 'fileMode', 'type': 'str'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, identity_reference=None, **kwargs) -> None: - super(ResourceFile, self).__init__(**kwargs) - self.auto_storage_container_name = auto_storage_container_name - self.storage_container_url = storage_container_url - self.http_url = http_url - self.blob_prefix = blob_prefix - self.file_path = file_path - self.file_mode = file_mode - self.identity_reference = identity_reference - - -class ResourceStatistics(Model): - """Statistics related to resource consumption by Compute Nodes in a Pool. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param avg_cpu_percentage: Required. The average CPU usage across all - Compute Nodes in the Pool (percentage per node). - :type avg_cpu_percentage: float - :param avg_memory_gi_b: Required. The average memory usage in GiB across - all Compute Nodes in the Pool. - :type avg_memory_gi_b: float - :param peak_memory_gi_b: Required. The peak memory usage in GiB across all - Compute Nodes in the Pool. - :type peak_memory_gi_b: float - :param avg_disk_gi_b: Required. The average used disk space in GiB across - all Compute Nodes in the Pool. - :type avg_disk_gi_b: float - :param peak_disk_gi_b: Required. The peak used disk space in GiB across - all Compute Nodes in the Pool. - :type peak_disk_gi_b: float - :param disk_read_iops: Required. The total number of disk read operations - across all Compute Nodes in the Pool. - :type disk_read_iops: long - :param disk_write_iops: Required. The total number of disk write - operations across all Compute Nodes in the Pool. - :type disk_write_iops: long - :param disk_read_gi_b: Required. The total amount of data in GiB of disk - reads across all Compute Nodes in the Pool. - :type disk_read_gi_b: float - :param disk_write_gi_b: Required. The total amount of data in GiB of disk - writes across all Compute Nodes in the Pool. - :type disk_write_gi_b: float - :param network_read_gi_b: Required. The total amount of data in GiB of - network reads across all Compute Nodes in the Pool. - :type network_read_gi_b: float - :param network_write_gi_b: Required. The total amount of data in GiB of - network writes across all Compute Nodes in the Pool. - :type network_write_gi_b: float - """ - - _validation = { - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'avg_cpu_percentage': {'required': True}, - 'avg_memory_gi_b': {'required': True}, - 'peak_memory_gi_b': {'required': True}, - 'avg_disk_gi_b': {'required': True}, - 'peak_disk_gi_b': {'required': True}, - 'disk_read_iops': {'required': True}, - 'disk_write_iops': {'required': True}, - 'disk_read_gi_b': {'required': True}, - 'disk_write_gi_b': {'required': True}, - 'network_read_gi_b': {'required': True}, - 'network_write_gi_b': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, - 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, - 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, - 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, - 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, - 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, - 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, - 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, - 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, - 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, - 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, - } - - def __init__(self, *, start_time, last_update_time, avg_cpu_percentage: float, avg_memory_gi_b: float, peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, disk_read_iops: int, disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, **kwargs) -> None: - super(ResourceStatistics, self).__init__(**kwargs) - self.start_time = start_time - self.last_update_time = last_update_time - self.avg_cpu_percentage = avg_cpu_percentage - self.avg_memory_gi_b = avg_memory_gi_b - self.peak_memory_gi_b = peak_memory_gi_b - self.avg_disk_gi_b = avg_disk_gi_b - self.peak_disk_gi_b = peak_disk_gi_b - self.disk_read_iops = disk_read_iops - self.disk_write_iops = disk_write_iops - self.disk_read_gi_b = disk_read_gi_b - self.disk_write_gi_b = disk_write_gi_b - self.network_read_gi_b = network_read_gi_b - self.network_write_gi_b = network_write_gi_b - - -class RollingUpgradePolicy(Model): - """The configuration parameters used while performing a rolling upgrade. - - :param enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the Update Domain - and maxBatchInstancePercent to determine the batch size. This field is - able to be set to true or false only when using NodePlacementConfiguration - as Zonal. - :type enable_cross_zone_upgrade: bool - :param max_batch_instance_percent: The maximum percent of total virtual - machine instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy instances in - previous or future batches can cause the percentage of instances in a - batch to decrease to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both maxBatchInstancePercent - and maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - :type max_batch_instance_percent: int - :param max_unhealthy_instance_percent: The maximum percentage of the total - virtual machine instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being found in an - unhealthy state by the virtual machine health checks before the rolling - upgrade aborts. This constraint will be checked prior to starting any - batch. The value of this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned - with value, the value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - :type max_unhealthy_instance_percent: int - :param max_unhealthy_upgraded_instance_percent: The maximum percentage of - upgraded virtual machine instances that can be found to be in an unhealthy - state. This check will happen after each batch is upgraded. If this - percentage is ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - :type max_unhealthy_upgraded_instance_percent: int - :param pause_time_between_batches: The wait time between completing the - update for all virtual machines in one batch and starting the next batch. - The time duration should be specified in ISO 8601 format. - :type pause_time_between_batches: timedelta - :param prioritize_unhealthy_instances: Upgrade all unhealthy instances in - a scale set before any healthy instances. - :type prioritize_unhealthy_instances: bool - :param rollback_failed_instances_on_policy_breach: Rollback failed - instances to previous model if the Rolling Upgrade policy is violated. - :type rollback_failed_instances_on_policy_breach: bool - """ - - _validation = { - 'max_batch_instance_percent': {'maximum': 100, 'minimum': 5}, - 'max_unhealthy_instance_percent': {'maximum': 100, 'minimum': 5}, - 'max_unhealthy_upgraded_instance_percent': {'maximum': 100, 'minimum': 0}, - } - - _attribute_map = { - 'enable_cross_zone_upgrade': {'key': 'enableCrossZoneUpgrade', 'type': 'bool'}, - 'max_batch_instance_percent': {'key': 'maxBatchInstancePercent', 'type': 'int'}, - 'max_unhealthy_instance_percent': {'key': 'maxUnhealthyInstancePercent', 'type': 'int'}, - 'max_unhealthy_upgraded_instance_percent': {'key': 'maxUnhealthyUpgradedInstancePercent', 'type': 'int'}, - 'pause_time_between_batches': {'key': 'pauseTimeBetweenBatches', 'type': 'duration'}, - 'prioritize_unhealthy_instances': {'key': 'prioritizeUnhealthyInstances', 'type': 'bool'}, - 'rollback_failed_instances_on_policy_breach': {'key': 'rollbackFailedInstancesOnPolicyBreach', 'type': 'bool'}, - } - - def __init__(self, *, enable_cross_zone_upgrade: bool=None, max_batch_instance_percent: int=None, max_unhealthy_instance_percent: int=None, max_unhealthy_upgraded_instance_percent: int=None, pause_time_between_batches=None, prioritize_unhealthy_instances: bool=None, rollback_failed_instances_on_policy_breach: bool=None, **kwargs) -> None: - super(RollingUpgradePolicy, self).__init__(**kwargs) - self.enable_cross_zone_upgrade = enable_cross_zone_upgrade - self.max_batch_instance_percent = max_batch_instance_percent - self.max_unhealthy_instance_percent = max_unhealthy_instance_percent - self.max_unhealthy_upgraded_instance_percent = max_unhealthy_upgraded_instance_percent - self.pause_time_between_batches = pause_time_between_batches - self.prioritize_unhealthy_instances = prioritize_unhealthy_instances - self.rollback_failed_instances_on_policy_breach = rollback_failed_instances_on_policy_breach - - -class Schedule(Model): - """The schedule according to which Jobs will be created. All times are fixed - respective to UTC and are not impacted by daylight saving time. - - :param do_not_run_until: If you do not specify a doNotRunUntil time, the - schedule becomes ready to create Jobs immediately. - :type do_not_run_until: datetime - :param do_not_run_after: If you do not specify a doNotRunAfter time, and - you are creating a recurring Job Schedule, the Job Schedule will remain - active until you explicitly terminate it. - :type do_not_run_after: datetime - :param start_window: If a Job is not created within the startWindow - interval, then the 'opportunity' is lost; no Job will be created until the - next recurrence of the schedule. If the schedule is recurring, and the - startWindow is longer than the recurrence interval, then this is - equivalent to an infinite startWindow, because the Job that is 'due' in - one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an - error; if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - :type start_window: timedelta - :param recurrence_interval: Because a Job Schedule can have at most one - active Job under it at any given time, if it is time to create a new Job - under a Job Schedule, but the previous Job is still running, the Batch - service will not create the new Job until the previous Job finishes. If - the previous Job does not finish within the startWindow period of the new - recurrenceInterval, then no new Job will be scheduled for that interval. - For recurring Jobs, you should normally specify a jobManagerTask in the - jobSpecification. If you do not use jobManagerTask, you will need an - external process to monitor when Jobs are created, add Tasks to the Jobs - and terminate the Jobs ready for the next recurrence. The default is that - the schedule does not recur: one Job is created, within the startWindow - after the doNotRunUntil time, and the schedule is complete as soon as that - Job finishes. The minimum value is 1 minute. If you specify a lower value, - the Batch service rejects the schedule with an error; if you are calling - the REST API directly, the HTTP status code is 400 (Bad Request). - :type recurrence_interval: timedelta - """ - - _attribute_map = { - 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, - 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, - 'start_window': {'key': 'startWindow', 'type': 'duration'}, - 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, - } - - def __init__(self, *, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None, **kwargs) -> None: - super(Schedule, self).__init__(**kwargs) - self.do_not_run_until = do_not_run_until - self.do_not_run_after = do_not_run_after - self.start_window = start_window - self.recurrence_interval = recurrence_interval - - -class SecurityProfile(Model): - """Specifies the security profile settings for the virtual machine or virtual - machine scale set. - - :param security_type: Possible values include: 'trustedLaunch' - :type security_type: str or ~azure.batch.models.SecurityTypes - :param encryption_at_host: This property can be used by user in the - request to enable or disable the Host Encryption for the virtual machine - or virtual machine scale set. This will enable the encryption for all the - disks including Resource/Temp disk at host itself. - :type encryption_at_host: bool - :param uefi_settings: Specifies the security settings like secure boot and - vTPM used while creating the virtual machine. Specifies the security - settings like secure boot and vTPM used while creating the virtual - machine. - :type uefi_settings: ~azure.batch.models.UefiSettings - """ - - _attribute_map = { - 'security_type': {'key': 'securityType', 'type': 'SecurityTypes'}, - 'encryption_at_host': {'key': 'encryptionAtHost', 'type': 'bool'}, - 'uefi_settings': {'key': 'uefiSettings', 'type': 'UefiSettings'}, - } - - def __init__(self, *, security_type=None, encryption_at_host: bool=None, uefi_settings=None, **kwargs) -> None: - super(SecurityProfile, self).__init__(**kwargs) - self.security_type = security_type - self.encryption_at_host = encryption_at_host - self.uefi_settings = uefi_settings - - -class ServiceArtifactReference(Model): - """Specifies the service artifact reference id used to set same image version - for all virtual machines in the scale set when using 'latest' image - version. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} - :type id: str - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - } - - def __init__(self, *, id: str, **kwargs) -> None: - super(ServiceArtifactReference, self).__init__(**kwargs) - self.id = id - - -class StartTask(Model): - """A Task which is run when a Node joins a Pool in the Azure Batch service, or - when the Compute Node is rebooted or reimaged. - - Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host - failure. Retries due to recovery operations are independent of and are not - counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, - an internal retry due to a recovery operation may occur. Because of this, - all Tasks should be idempotent. This means Tasks need to tolerate being - interrupted and restarted without causing any corruption or duplicate data. - The best practice for long running Tasks is to use some form of - checkpointing. In some cases the StartTask may be re-run even though the - Compute Node was not rebooted. Special care should be taken to avoid - StartTasks which create breakaway process or install/launch services from - the StartTask working directory, as this will block Batch from being able - to re-run the StartTask. - - All required parameters must be populated in order to send to Azure. - - :param command_line: Required. The command line does not run under a - shell, and therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of such - features, you should invoke the shell in the command line, for example - using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If - the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - StartTask runs. When this is specified, all directories recursively below - the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the - node) are mapped into the container, all Task environment variables are - mapped into the container, and the Task command line is executed in the - container. Files produced in the container outside of - AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning - that Batch file APIs will not be able to access those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param resource_files: Files listed under this element are located in the - Task's working directory. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param user_identity: The user identity under which the StartTask runs. If - omitted, the Task runs as a non-administrative user unique to the Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param max_task_retry_count: The maximum number of times the Task may be - retried. The Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of retries. The - Batch service will try the Task once, and may then retry up to this limit. - For example, if the maximum retry count is 3, Batch tries the Task up to 4 - times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - :type max_task_retry_count: int - :param wait_for_success: Whether the Batch service should wait for the - StartTask to complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and the StartTask - fails on a Node, the Batch service retries the StartTask up to its maximum - retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be - detected via the Compute Node state and failure info details. If false, - the Batch service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while the - StartTask is still running; and even if the StartTask fails, new Tasks - will continue to be scheduled on the Compute Node. The default is true. - :type wait_for_success: bool - """ - - _validation = { - 'command_line': {'required': True}, - } - - _attribute_map = { - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, - } - - def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None: - super(StartTask, self).__init__(**kwargs) - self.command_line = command_line - self.container_settings = container_settings - self.resource_files = resource_files - self.environment_settings = environment_settings - self.user_identity = user_identity - self.max_task_retry_count = max_task_retry_count - self.wait_for_success = wait_for_success - - -class StartTaskInformation(Model): - """Information about a StartTask running on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param state: Required. Possible values include: 'running', 'completed' - :type state: str or ~azure.batch.models.StartTaskState - :param start_time: Required. This value is reset every time the Task is - restarted or retried (that is, this is the most recent time at which the - StartTask started running). - :type start_time: datetime - :param end_time: This is the end time of the most recent run of the - StartTask, if that run has completed (even if that run failed and a retry - is pending). This element is not present if the StartTask is currently - running. - :type end_time: datetime - :param exit_code: The exit code of the program specified on the StartTask - command line. This property is set only if the StartTask is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application - process. However, if the Batch service terminates the StartTask (due to - timeout, or user termination via the API) you may see an operating - system-defined exit code. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param retry_count: Required. The number of times the Task has been - retried by the Batch service. Task application failures (non-zero exit - code) are retried, pre-processing errors (the Task could not be run) and - file upload errors are not retried. The Batch service will retry the Task - up to the limit specified by the constraints. - :type retry_count: int - :param last_retry_time: This element is present only if the Task was - retried (i.e. retryCount is nonzero). If present, this is typically the - same as startTime, but may be different if the Task has been restarted for - reasons other than retry; for example, if the Compute Node was rebooted - during a retry, then the startTime is updated but the lastRetryTime is - not. - :type last_retry_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'state': {'required': True}, - 'start_time': {'required': True}, - 'retry_count': {'required': True}, - } - - _attribute_map = { - 'state': {'key': 'state', 'type': 'StartTaskState'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'retry_count': {'key': 'retryCount', 'type': 'int'}, - 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, *, state, start_time, retry_count: int, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: - super(StartTaskInformation, self).__init__(**kwargs) - self.state = state - self.start_time = start_time - self.end_time = end_time - self.exit_code = exit_code - self.container_info = container_info - self.failure_info = failure_info - self.retry_count = retry_count - self.last_retry_time = last_retry_time - self.result = result - - -class SubtaskInformation(Model): - """Information about an Azure Batch subtask. - - :param id: The ID of the subtask. - :type id: int - :param node_info: Information about the Compute Node on which the subtask - ran. - :type node_info: ~azure.batch.models.ComputeNodeInformation - :param start_time: - :type start_time: datetime - :param end_time: This property is set only if the subtask is in the - Completed state. - :type end_time: datetime - :param exit_code: The exit code of the program specified on the subtask - command line. This property is set only if the subtask is in the completed - state. In general, the exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. - However, if the Batch service terminates the subtask (due to timeout, or - user termination via the API) you may see an operating system-defined exit - code. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param state: The current state of the subtask. Possible values include: - 'preparing', 'running', 'completed' - :type state: str or ~azure.batch.models.SubtaskState - :param state_transition_time: - :type state_transition_time: datetime - :param previous_state: The previous state of the subtask. This property is - not set if the subtask is in its initial running state. Possible values - include: 'preparing', 'running', 'completed' - :type previous_state: str or ~azure.batch.models.SubtaskState - :param previous_state_transition_time: This property is not set if the - subtask is in its initial running state. - :type previous_state_transition_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _attribute_map = { - 'id': {'key': 'id', 'type': 'int'}, - 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'state': {'key': 'state', 'type': 'SubtaskState'}, - 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, - 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, - 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, *, id: int=None, node_info=None, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, result=None, **kwargs) -> None: - super(SubtaskInformation, self).__init__(**kwargs) - self.id = id - self.node_info = node_info - self.start_time = start_time - self.end_time = end_time - self.exit_code = exit_code - self.container_info = container_info - self.failure_info = failure_info - self.state = state - self.state_transition_time = state_transition_time - self.previous_state = previous_state - self.previous_state_transition_time = previous_state_transition_time - self.result = result - - -class TaskAddCollectionOptions(Model): - """Additional parameters for add_collection operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 2 minutes. If the value is larger than - 120, the default will be used instead. Default value: 120 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=120, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(TaskAddCollectionOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class TaskAddCollectionParameter(Model): - """A collection of Azure Batch Tasks to add. - - All required parameters must be populated in order to send to Azure. - - :param value: Required. The total serialized size of this collection must - be less than 1MB. If it is greater than 1MB (for example if each Task has - 100's of resource files or environment variables), the request will fail - with code 'RequestBodyTooLarge' and should be retried again with fewer - Tasks. - :type value: list[~azure.batch.models.TaskAddParameter] - """ - - _validation = { - 'value': {'required': True}, - } - - _attribute_map = { - 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, - } - - def __init__(self, *, value, **kwargs) -> None: - super(TaskAddCollectionParameter, self).__init__(**kwargs) - self.value = value - - -class TaskAddCollectionResult(Model): - """The result of adding a collection of Tasks to a Job. - - :param value: - :type value: list[~azure.batch.models.TaskAddResult] - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[TaskAddResult]'}, - } - - def __init__(self, *, value=None, **kwargs) -> None: - super(TaskAddCollectionResult, self).__init__(**kwargs) - self.value = value - - -class TaskAddOptions(Model): - """Additional parameters for add operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(TaskAddOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class TaskAddParameter(Model): - """An Azure Batch Task to add. - - Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host - failure. Retries due to recovery operations are independent of and are not - counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, - an internal retry due to a recovery operation may occur. Because of this, - all Tasks should be idempotent. This means Tasks need to tolerate being - interrupted and restarted without causing any corruption or duplicate data. - The best practice for long running Tasks is to use some form of - checkpointing. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more than - 64 characters. The ID is case-preserving and case-insensitive (that is, - you may not have two IDs within a Job that differ only by case). - :type id: str - :param display_name: The display name need not be unique and can contain - any Unicode characters up to a maximum length of 1024. - :type display_name: str - :param command_line: Required. For multi-instance Tasks, the command line - is executed as the primary Task, after the primary Task and all subtasks - have finished executing the coordination command line. The command line - does not run under a shell, and therefore cannot take advantage of shell - features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should - use a relative path (relative to the Task working directory), or use the - Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - :type command_line: str - :param container_settings: The settings for the container under which the - Task runs. If the Pool that will run this Task has containerConfiguration - set, this must be set as well. If the Pool that will run this Task doesn't - have containerConfiguration set, this must not be set. When this is - specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR - (the root of Azure Batch directories on the node) are mapped into the - container, all Task environment variables are mapped into the container, - and the Task command line is executed in the container. Files produced in - the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to - the host disk, meaning that Batch file APIs will not be able to access - those files. - :type container_settings: ~azure.batch.models.TaskContainerSettings - :param exit_conditions: How the Batch service should respond when the Task - completes. - :type exit_conditions: ~azure.batch.models.ExitConditions - :param resource_files: For multi-instance Tasks, the resource files will - only be downloaded to the Compute Node on which the primary Task is - executed. There is a maximum size for the list of resource files. When - the max size is exceeded, the request will fail and the response error - code will be RequestEntityTooLarge. If this occurs, the collection of - ResourceFiles must be reduced in size. This can be achieved using .zip - files, Application Packages, or Docker Containers. - :type resource_files: list[~azure.batch.models.ResourceFile] - :param output_files: For multi-instance Tasks, the files will only be - uploaded from the Compute Node on which the primary Task is executed. - :type output_files: list[~azure.batch.models.OutputFile] - :param environment_settings: - :type environment_settings: list[~azure.batch.models.EnvironmentSetting] - :param affinity_info: A locality hint that can be used by the Batch - service to select a Compute Node on which to start the new Task. - :type affinity_info: ~azure.batch.models.AffinityInformation - :param constraints: The execution constraints that apply to this Task. If - you do not specify constraints, the maxTaskRetryCount is the - maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, - and the retentionTime is 7 days. - :type constraints: ~azure.batch.models.TaskConstraints - :param required_slots: The number of scheduling slots that the Task - required to run. The default is 1. A Task can only be scheduled to run on - a compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - :type required_slots: int - :param user_identity: The user identity under which the Task runs. If - omitted, the Task runs as a non-administrative user unique to the Task. - :type user_identity: ~azure.batch.models.UserIdentity - :param multi_instance_settings: An object that indicates that the Task is - a multi-instance Task, and contains information about how to run the - multi-instance Task. - :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings - :param depends_on: The Tasks that this Task depends on. This Task will not - be scheduled until all Tasks that it depends on have completed - successfully. If any of those Tasks fail and exhaust their retry counts, - this Task will never be scheduled. If the Job does not have - usesTaskDependencies set to true, and this element is present, the request - fails with error code TaskDependenciesNotSpecifiedOnJob. - :type depends_on: ~azure.batch.models.TaskDependencies - :param application_package_references: Application packages are downloaded - and deployed to a shared directory, not the Task working directory. - Therefore, if a referenced package is already on the Node, and is up to - date, then it is not re-downloaded; the existing copy on the Compute Node - is used. If a referenced Package cannot be installed, for example because - the package has been deleted or because download failed, the Task fails. - :type application_package_references: - list[~azure.batch.models.ApplicationPackageReference] - :param authentication_token_settings: The settings for an authentication - token that the Task can use to perform Batch service operations. If this - property is set, the Batch service provides the Task with an - authentication token which can be used to authenticate Batch service - operations without requiring an Account access key. The token is provided - via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations - that the Task can carry out using the token depend on the settings. For - example, a Task can request Job permissions in order to add other Tasks to - the Job, or check the status of the Job or of other Tasks under the Job. - :type authentication_token_settings: - ~azure.batch.models.AuthenticationTokenSettings - """ - - _validation = { - 'id': {'required': True}, - 'command_line': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - 'required_slots': {'key': 'requiredSlots', 'type': 'int'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, - 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, - 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, - 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - } - - def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, required_slots: int=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: - super(TaskAddParameter, self).__init__(**kwargs) - self.id = id - self.display_name = display_name - self.command_line = command_line - self.container_settings = container_settings - self.exit_conditions = exit_conditions - self.resource_files = resource_files - self.output_files = output_files - self.environment_settings = environment_settings - self.affinity_info = affinity_info - self.constraints = constraints - self.required_slots = required_slots - self.user_identity = user_identity - self.multi_instance_settings = multi_instance_settings - self.depends_on = depends_on - self.application_package_references = application_package_references - self.authentication_token_settings = authentication_token_settings - - -class TaskAddResult(Model): - """Result for a single Task added as part of an add Task collection operation. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. Possible values include: 'success', - 'clientError', 'serverError' - :type status: str or ~azure.batch.models.TaskAddStatus - :param task_id: Required. - :type task_id: str - :param e_tag: You can use this to detect whether the Task has changed - between requests. In particular, you can be pass the ETag with an Update - Task request to specify that your changes should take effect only if - nobody else has modified the Job in the meantime. - :type e_tag: str - :param last_modified: - :type last_modified: datetime - :param location: - :type location: str - :param error: The error encountered while attempting to add the Task. - :type error: ~azure.batch.models.BatchError - """ - - _validation = { - 'status': {'required': True}, - 'task_id': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'TaskAddStatus'}, - 'task_id': {'key': 'taskId', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, - 'location': {'key': 'location', 'type': 'str'}, - 'error': {'key': 'error', 'type': 'BatchError'}, - } - - def __init__(self, *, status, task_id: str, e_tag: str=None, last_modified=None, location: str=None, error=None, **kwargs) -> None: - super(TaskAddResult, self).__init__(**kwargs) - self.status = status - self.task_id = task_id - self.e_tag = e_tag - self.last_modified = last_modified - self.location = location - self.error = error - - -class TaskConstraints(Model): - """Execution constraints to apply to a Task. - - :param max_wall_clock_time: If this is not specified, there is no time - limit on how long the Task may run. - :type max_wall_clock_time: timedelta - :param retention_time: The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - :type retention_time: timedelta - :param max_task_retry_count: The maximum number of times the Task may be - retried. The Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of retries for the - Task executable due to a nonzero exit code. The Batch service will try the - Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial - try and 3 retries). If the maximum retry count is 0, the Batch service - does not retry the Task after the first attempt. If the maximum retry - count is -1, the Batch service retries the Task without limit, however - this is not recommended for a start task or any task. The default value is - 0 (no retries). - :type max_task_retry_count: int - """ - - _attribute_map = { - 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, - 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - } - - def __init__(self, *, max_wall_clock_time=None, retention_time=None, max_task_retry_count: int=None, **kwargs) -> None: - super(TaskConstraints, self).__init__(**kwargs) - self.max_wall_clock_time = max_wall_clock_time - self.retention_time = retention_time - self.max_task_retry_count = max_task_retry_count - - -class TaskContainerExecutionInformation(Model): - """Contains information about the container which a Task is executing. - - :param container_id: - :type container_id: str - :param state: This is the state of the container according to the Docker - service. It is equivalent to the status field returned by "docker - inspect". - :type state: str - :param error: This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - :type error: str - """ - - _attribute_map = { - 'container_id': {'key': 'containerId', 'type': 'str'}, - 'state': {'key': 'state', 'type': 'str'}, - 'error': {'key': 'error', 'type': 'str'}, - } - - def __init__(self, *, container_id: str=None, state: str=None, error: str=None, **kwargs) -> None: - super(TaskContainerExecutionInformation, self).__init__(**kwargs) - self.container_id = container_id - self.state = state - self.error = error - - -class TaskContainerSettings(Model): - """The container settings for a Task. - - All required parameters must be populated in order to send to Azure. - - :param container_run_options: These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - :type container_run_options: str - :param image_name: Required. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image - name, the tag ":latest" is used as a default. - :type image_name: str - :param registry: The private registry which contains the container Image. - This setting can be omitted if was already provided at Pool creation. - :type registry: ~azure.batch.models.ContainerRegistry - :param working_directory: The default is 'taskWorkingDirectory'. Possible - values include: 'taskWorkingDirectory', 'containerImageDefault' - :type working_directory: str or - ~azure.batch.models.ContainerWorkingDirectory - """ - - _validation = { - 'image_name': {'required': True}, - } - - _attribute_map = { - 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, - 'image_name': {'key': 'imageName', 'type': 'str'}, - 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, - 'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'}, - } - - def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, working_directory=None, **kwargs) -> None: - super(TaskContainerSettings, self).__init__(**kwargs) - self.container_run_options = container_run_options - self.image_name = image_name - self.registry = registry - self.working_directory = working_directory - - -class TaskCounts(Model): - """The Task counts for a Job. - - All required parameters must be populated in order to send to Azure. - - :param active: Required. The number of Tasks in the active state. - :type active: int - :param running: Required. The number of Tasks in the running or preparing - state. - :type running: int - :param completed: Required. The number of Tasks in the completed state. - :type completed: int - :param succeeded: Required. The number of Tasks which succeeded. A Task - succeeds if its result (found in the executionInfo property) is 'success'. - :type succeeded: int - :param failed: Required. The number of Tasks which failed. A Task fails if - its result (found in the executionInfo property) is 'failure'. - :type failed: int - """ - - _validation = { - 'active': {'required': True}, - 'running': {'required': True}, - 'completed': {'required': True}, - 'succeeded': {'required': True}, - 'failed': {'required': True}, - } - - _attribute_map = { - 'active': {'key': 'active', 'type': 'int'}, - 'running': {'key': 'running', 'type': 'int'}, - 'completed': {'key': 'completed', 'type': 'int'}, - 'succeeded': {'key': 'succeeded', 'type': 'int'}, - 'failed': {'key': 'failed', 'type': 'int'}, - } - - def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None: - super(TaskCounts, self).__init__(**kwargs) - self.active = active - self.running = running - self.completed = completed - self.succeeded = succeeded - self.failed = failed - - -class TaskCountsResult(Model): - """The Task and TaskSlot counts for a Job. - - All required parameters must be populated in order to send to Azure. - - :param task_counts: Required. The number of Tasks per state. - :type task_counts: ~azure.batch.models.TaskCounts - :param task_slot_counts: Required. The number of TaskSlots required by - Tasks per state. - :type task_slot_counts: ~azure.batch.models.TaskSlotCounts - """ - - _validation = { - 'task_counts': {'required': True}, - 'task_slot_counts': {'required': True}, - } - - _attribute_map = { - 'task_counts': {'key': 'taskCounts', 'type': 'TaskCounts'}, - 'task_slot_counts': {'key': 'taskSlotCounts', 'type': 'TaskSlotCounts'}, - } - - def __init__(self, *, task_counts, task_slot_counts, **kwargs) -> None: - super(TaskCountsResult, self).__init__(**kwargs) - self.task_counts = task_counts - self.task_slot_counts = task_slot_counts - - -class TaskDeleteOptions(Model): - """Additional parameters for delete operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(TaskDeleteOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class TaskDependencies(Model): - """Specifies any dependencies of a Task. Any Task that is explicitly specified - or within a dependency range must complete before the dependant Task will - be scheduled. - - :param task_ids: The taskIds collection is limited to 64000 characters - total (i.e. the combined length of all Task IDs). If the taskIds - collection exceeds the maximum length, the Add Task request fails with - error code TaskDependencyListTooLong. In this case consider using Task ID - ranges instead. - :type task_ids: list[str] - :param task_id_ranges: - :type task_id_ranges: list[~azure.batch.models.TaskIdRange] - """ - - _attribute_map = { - 'task_ids': {'key': 'taskIds', 'type': '[str]'}, - 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, - } - - def __init__(self, *, task_ids=None, task_id_ranges=None, **kwargs) -> None: - super(TaskDependencies, self).__init__(**kwargs) - self.task_ids = task_ids - self.task_id_ranges = task_id_ranges - - -class TaskExecutionInformation(Model): - """Information about the execution of a Task. - - All required parameters must be populated in order to send to Azure. - - :param start_time: 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects - the time at which the Task started downloading or deploying these. If the - Task has been restarted or retried, this is the most recent time at which - the Task started running. This property is present only for Tasks that are - in the running or completed state. - :type start_time: datetime - :param end_time: This property is set only if the Task is in the Completed - state. - :type end_time: datetime - :param exit_code: The exit code of the program specified on the Task - command line. This property is set only if the Task is in the completed - state. In general, the exit code for a process reflects the specific - convention implemented by the application developer for that process. If - you use the exit code value to make decisions in your code, be sure that - you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit - code. - :type exit_code: int - :param container_info: Information about the container under which the - Task is executing. This property is set only if the Task runs in a - container context. - :type container_info: - ~azure.batch.models.TaskContainerExecutionInformation - :param failure_info: Information describing the Task failure, if any. This - property is set only if the Task is in the completed state and encountered - a failure. - :type failure_info: ~azure.batch.models.TaskFailureInformation - :param retry_count: Required. The number of times the Task has been - retried by the Batch service. Task application failures (non-zero exit - code) are retried, pre-processing errors (the Task could not be run) and - file upload errors are not retried. The Batch service will retry the Task - up to the limit specified by the constraints. - :type retry_count: int - :param last_retry_time: This element is present only if the Task was - retried (i.e. retryCount is nonzero). If present, this is typically the - same as startTime, but may be different if the Task has been restarted for - reasons other than retry; for example, if the Compute Node was rebooted - during a retry, then the startTime is updated but the lastRetryTime is - not. - :type last_retry_time: datetime - :param requeue_count: Required. The number of times the Task has been - requeued by the Batch service as the result of a user request. When the - user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or - when the Job is being disabled, the user can specify that running Tasks on - the Compute Nodes be requeued for execution. This count tracks how many - times the Task has been requeued for these reasons. - :type requeue_count: int - :param last_requeue_time: This property is set only if the requeueCount is - nonzero. - :type last_requeue_time: datetime - :param result: The result of the Task execution. If the value is 'failed', - then the details of the failure can be found in the failureInfo property. - Possible values include: 'success', 'failure' - :type result: str or ~azure.batch.models.TaskExecutionResult - """ - - _validation = { - 'retry_count': {'required': True}, - 'requeue_count': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'exit_code': {'key': 'exitCode', 'type': 'int'}, - 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, - 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, - 'retry_count': {'key': 'retryCount', 'type': 'int'}, - 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, - 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, - 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, - 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, - } - - def __init__(self, *, retry_count: int, requeue_count: int, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, last_requeue_time=None, result=None, **kwargs) -> None: - super(TaskExecutionInformation, self).__init__(**kwargs) - self.start_time = start_time - self.end_time = end_time - self.exit_code = exit_code - self.container_info = container_info - self.failure_info = failure_info - self.retry_count = retry_count - self.last_retry_time = last_retry_time - self.requeue_count = requeue_count - self.last_requeue_time = last_requeue_time - self.result = result - - -class TaskFailureInformation(Model): - """Information about a Task failure. - - All required parameters must be populated in order to send to Azure. - - :param category: Required. The category of the Task error. Possible values - include: 'userError', 'serverError' - :type category: str or ~azure.batch.models.ErrorCategory - :param code: - :type code: str - :param message: - :type message: str - :param details: - :type details: list[~azure.batch.models.NameValuePair] - """ - - _validation = { - 'category': {'required': True}, - } - - _attribute_map = { - 'category': {'key': 'category', 'type': 'ErrorCategory'}, - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[NameValuePair]'}, - } - - def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: - super(TaskFailureInformation, self).__init__(**kwargs) - self.category = category - self.code = code - self.message = message - self.details = details - - -class TaskGetOptions(Model): - """Additional parameters for get operation. - - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(TaskGetOptions, self).__init__(**kwargs) - self.select = select - self.expand = expand - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class TaskIdRange(Model): - """A range of Task IDs that a Task can depend on. All Tasks with IDs in the - range must complete successfully before the dependent Task can be - scheduled. - - The start and end of the range are inclusive. For example, if a range has - start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The first Task ID in the range. - :type start: int - :param end: Required. The last Task ID in the range. - :type end: int - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'start', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(TaskIdRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class TaskInformation(Model): - """Information about a Task running on a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param task_url: - :type task_url: str - :param job_id: - :type job_id: str - :param task_id: - :type task_id: str - :param subtask_id: The ID of the subtask if the Task is a multi-instance - Task. - :type subtask_id: int - :param task_state: Required. The current state of the Task. Possible - values include: 'active', 'preparing', 'running', 'completed' - :type task_state: str or ~azure.batch.models.TaskState - :param execution_info: Information about the execution of the Task. - :type execution_info: ~azure.batch.models.TaskExecutionInformation - """ - - _validation = { - 'task_state': {'required': True}, - } - - _attribute_map = { - 'task_url': {'key': 'taskUrl', 'type': 'str'}, - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'task_id': {'key': 'taskId', 'type': 'str'}, - 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, - 'task_state': {'key': 'taskState', 'type': 'TaskState'}, - 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, - } - - def __init__(self, *, task_state, task_url: str=None, job_id: str=None, task_id: str=None, subtask_id: int=None, execution_info=None, **kwargs) -> None: - super(TaskInformation, self).__init__(**kwargs) - self.task_url = task_url - self.job_id = job_id - self.task_id = task_id - self.subtask_id = subtask_id - self.task_state = task_state - self.execution_info = execution_info - - -class TaskListOptions(Model): - """Additional parameters for list operation. - - :param filter: An OData $filter clause. For more information on - constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. - :type filter: str - :param select: An OData $select clause. - :type select: str - :param expand: An OData $expand clause. - :type expand: str - :param max_results: The maximum number of items to return in the response. - A maximum of 1000 Tasks can be returned. Default value: 1000 . - :type max_results: int - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'filter': {'key': '', 'type': 'str'}, - 'select': {'key': '', 'type': 'str'}, - 'expand': {'key': '', 'type': 'str'}, - 'max_results': {'key': '', 'type': 'int'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(TaskListOptions, self).__init__(**kwargs) - self.filter = filter - self.select = select - self.expand = expand - self.max_results = max_results - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class TaskListSubtasksOptions(Model): - """Additional parameters for list_subtasks operation. - - :param select: An OData $select clause. - :type select: str - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - """ - - _attribute_map = { - 'select': {'key': '', 'type': 'str'}, - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: - super(TaskListSubtasksOptions, self).__init__(**kwargs) - self.select = select - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - - -class TaskReactivateOptions(Model): - """Additional parameters for reactivate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(TaskReactivateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class TaskSchedulingPolicy(Model): - """Specifies how Tasks should be distributed across Compute Nodes. - - All required parameters must be populated in order to send to Azure. - - :param node_fill_type: Required. If not specified, the default is spread. - Possible values include: 'spread', 'pack' - :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType - """ - - _validation = { - 'node_fill_type': {'required': True}, - } - - _attribute_map = { - 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, - } - - def __init__(self, *, node_fill_type, **kwargs) -> None: - super(TaskSchedulingPolicy, self).__init__(**kwargs) - self.node_fill_type = node_fill_type - - -class TaskSlotCounts(Model): - """The TaskSlot counts for a Job. - - All required parameters must be populated in order to send to Azure. - - :param active: Required. The number of TaskSlots for active Tasks. - :type active: int - :param running: Required. The number of TaskSlots for running Tasks. - :type running: int - :param completed: Required. The number of TaskSlots for completed Tasks. - :type completed: int - :param succeeded: Required. The number of TaskSlots for succeeded Tasks. - :type succeeded: int - :param failed: Required. The number of TaskSlots for failed Tasks. - :type failed: int - """ - - _validation = { - 'active': {'required': True}, - 'running': {'required': True}, - 'completed': {'required': True}, - 'succeeded': {'required': True}, - 'failed': {'required': True}, - } - - _attribute_map = { - 'active': {'key': 'active', 'type': 'int'}, - 'running': {'key': 'running', 'type': 'int'}, - 'completed': {'key': 'completed', 'type': 'int'}, - 'succeeded': {'key': 'succeeded', 'type': 'int'}, - 'failed': {'key': 'failed', 'type': 'int'}, - } - - def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None: - super(TaskSlotCounts, self).__init__(**kwargs) - self.active = active - self.running = running - self.completed = completed - self.succeeded = succeeded - self.failed = failed - - -class TaskStatistics(Model): - """Resource usage statistics for a Task. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. - :type url: str - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param user_cpu_time: Required. - :type user_cpu_time: timedelta - :param kernel_cpu_time: Required. - :type kernel_cpu_time: timedelta - :param wall_clock_time: Required. The wall clock time is the elapsed time - from when the Task started running on a Compute Node to when it finished - (or to the last time the statistics were updated, if the Task had not - finished by then). If the Task was retried, this includes the wall clock - time of all the Task retries. - :type wall_clock_time: timedelta - :param read_iops: Required. The total number of disk read operations made - by the Task. - :type read_iops: long - :param write_iops: Required. The total number of disk write operations - made by the Task. - :type write_iops: long - :param read_io_gi_b: Required. The total gibibytes read from disk by the - Task. - :type read_io_gi_b: float - :param write_io_gi_b: Required. The total gibibytes written to disk by the - Task. - :type write_io_gi_b: float - :param wait_time: Required. - :type wait_time: timedelta - """ - - _validation = { - 'url': {'required': True}, - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'user_cpu_time': {'required': True}, - 'kernel_cpu_time': {'required': True}, - 'wall_clock_time': {'required': True}, - 'read_iops': {'required': True}, - 'write_iops': {'required': True}, - 'read_io_gi_b': {'required': True}, - 'write_io_gi_b': {'required': True}, - 'wait_time': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, - 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, - 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, - 'read_iops': {'key': 'readIOps', 'type': 'long'}, - 'write_iops': {'key': 'writeIOps', 'type': 'long'}, - 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, - 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, - 'wait_time': {'key': 'waitTime', 'type': 'duration'}, - } - - def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time, **kwargs) -> None: - super(TaskStatistics, self).__init__(**kwargs) - self.url = url - self.start_time = start_time - self.last_update_time = last_update_time - self.user_cpu_time = user_cpu_time - self.kernel_cpu_time = kernel_cpu_time - self.wall_clock_time = wall_clock_time - self.read_iops = read_iops - self.write_iops = write_iops - self.read_io_gi_b = read_io_gi_b - self.write_io_gi_b = write_io_gi_b - self.wait_time = wait_time - - -class TaskTerminateOptions(Model): - """Additional parameters for terminate operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(TaskTerminateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class TaskUpdateOptions(Model): - """Additional parameters for update operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger - than 30, the default will be used instead. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(TaskUpdateOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - - -class TaskUpdateParameter(Model): - """The set of changes to be made to a Task. - - :param constraints: Constraints that apply to this Task. If omitted, the - Task is given the default constraints. For multi-instance Tasks, updating - the retention time applies only to the primary Task and not subtasks. - :type constraints: ~azure.batch.models.TaskConstraints - """ - - _attribute_map = { - 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, - } - - def __init__(self, *, constraints=None, **kwargs) -> None: - super(TaskUpdateParameter, self).__init__(**kwargs) - self.constraints = constraints - - -class UefiSettings(Model): - """Specifies the security settings like secure boot and vTPM used while - creating the virtual machine. - - :param secure_boot_enabled: Specifies whether secure boot should be - enabled on the virtual machine. - :type secure_boot_enabled: bool - :param v_tpm_enabled: Specifies whether vTPM should be enabled on the - virtual machine. - :type v_tpm_enabled: bool - """ - - _attribute_map = { - 'secure_boot_enabled': {'key': 'secureBootEnabled', 'type': 'bool'}, - 'v_tpm_enabled': {'key': 'vTpmEnabled', 'type': 'bool'}, - } - - def __init__(self, *, secure_boot_enabled: bool=None, v_tpm_enabled: bool=None, **kwargs) -> None: - super(UefiSettings, self).__init__(**kwargs) - self.secure_boot_enabled = secure_boot_enabled - self.v_tpm_enabled = v_tpm_enabled - - -class UpgradePolicy(Model): - """Describes an upgrade policy - automatic, manual, or rolling. - - All required parameters must be populated in order to send to Azure. - - :param mode: Required. Possible values include: 'automatic', 'manual', - 'rolling' - :type mode: str or ~azure.batch.models.UpgradeMode - :param automatic_os_upgrade_policy: Configuration parameters used for - performing automatic OS Upgrade. The configuration parameters used for - performing automatic OS upgrade. - :type automatic_os_upgrade_policy: - ~azure.batch.models.AutomaticOSUpgradePolicy - :param rolling_upgrade_policy: The configuration parameters used while - performing a rolling upgrade. This property is only supported on Pools - with the virtualMachineConfiguration property. - :type rolling_upgrade_policy: ~azure.batch.models.RollingUpgradePolicy - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'UpgradeMode'}, - 'automatic_os_upgrade_policy': {'key': 'automaticOSUpgradePolicy', 'type': 'AutomaticOSUpgradePolicy'}, - 'rolling_upgrade_policy': {'key': 'rollingUpgradePolicy', 'type': 'RollingUpgradePolicy'}, - } - - def __init__(self, *, mode, automatic_os_upgrade_policy=None, rolling_upgrade_policy=None, **kwargs) -> None: - super(UpgradePolicy, self).__init__(**kwargs) - self.mode = mode - self.automatic_os_upgrade_policy = automatic_os_upgrade_policy - self.rolling_upgrade_policy = rolling_upgrade_policy - - -class UploadBatchServiceLogsConfiguration(Model): - """The Azure Batch service log files upload configuration for a Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param container_url: Required. If a user assigned managed identity is not - being used, the URL must include a Shared Access Signature (SAS) granting - write permissions to the container. The SAS duration must allow enough - time for the upload to finish. The start time for SAS is optional and - recommended to not be specified. - :type container_url: str - :param start_time: Required. Any log file containing a log message in the - time range will be uploaded. This means that the operation might retrieve - more logs than have been requested since the entire log file is always - uploaded, but the operation should not retrieve fewer logs than have been - requested. - :type start_time: datetime - :param end_time: Any log file containing a log message in the time range - will be uploaded. This means that the operation might retrieve more logs - than have been requested since the entire log file is always uploaded, but - the operation should not retrieve fewer logs than have been requested. If - omitted, the default is to upload all logs available after the startTime. - :type end_time: datetime - :param identity_reference: The reference to the user assigned identity to - use to access Azure Blob Storage specified by containerUrl. The identity - must have write access to the Azure Blob Storage container. - :type identity_reference: ~azure.batch.models.ComputeNodeIdentityReference - """ - - _validation = { - 'container_url': {'required': True}, - 'start_time': {'required': True}, - } - - _attribute_map = { - 'container_url': {'key': 'containerUrl', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'identity_reference': {'key': 'identityReference', 'type': 'ComputeNodeIdentityReference'}, - } - - def __init__(self, *, container_url: str, start_time, end_time=None, identity_reference=None, **kwargs) -> None: - super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) - self.container_url = container_url - self.start_time = start_time - self.end_time = end_time - self.identity_reference = identity_reference - - -class UploadBatchServiceLogsResult(Model): - """The result of uploading Batch service log files from a specific Compute - Node. - - All required parameters must be populated in order to send to Azure. - - :param virtual_directory_name: Required. The virtual directory name is - part of the blob name for each log file uploaded, and it is built based - poolId, nodeId and a unique identifier. - :type virtual_directory_name: str - :param number_of_files_uploaded: Required. The number of log files which - will be uploaded. - :type number_of_files_uploaded: int - """ - - _validation = { - 'virtual_directory_name': {'required': True}, - 'number_of_files_uploaded': {'required': True}, - } - - _attribute_map = { - 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, - 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, - } - - def __init__(self, *, virtual_directory_name: str, number_of_files_uploaded: int, **kwargs) -> None: - super(UploadBatchServiceLogsResult, self).__init__(**kwargs) - self.virtual_directory_name = virtual_directory_name - self.number_of_files_uploaded = number_of_files_uploaded - - -class UsageStatistics(Model): - """Statistics related to Pool usage information. - - All required parameters must be populated in order to send to Azure. - - :param start_time: Required. - :type start_time: datetime - :param last_update_time: Required. - :type last_update_time: datetime - :param dedicated_core_time: Required. - :type dedicated_core_time: timedelta - """ - - _validation = { - 'start_time': {'required': True}, - 'last_update_time': {'required': True}, - 'dedicated_core_time': {'required': True}, - } - - _attribute_map = { - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, - } - - def __init__(self, *, start_time, last_update_time, dedicated_core_time, **kwargs) -> None: - super(UsageStatistics, self).__init__(**kwargs) - self.start_time = start_time - self.last_update_time = last_update_time - self.dedicated_core_time = dedicated_core_time - - -class UserAccount(Model): - """Properties used to create a user used to execute Tasks on an Azure Batch - Compute Node. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param password: Required. - :type password: str - :param elevation_level: The elevation level of the user Account. The - default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' - :type elevation_level: str or ~azure.batch.models.ElevationLevel - :param linux_user_configuration: The Linux-specific user configuration for - the user Account. This property is ignored if specified on a Windows Pool. - If not specified, the user is created with the default options. - :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration - :param windows_user_configuration: The Windows-specific user configuration - for the user Account. This property can only be specified if the user is - on a Windows Pool. If not specified and on a Windows Pool, the user is - created with the default options. - :type windows_user_configuration: - ~azure.batch.models.WindowsUserConfiguration - """ - - _validation = { - 'name': {'required': True}, - 'password': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, - 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, - 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, - } - - def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None: - super(UserAccount, self).__init__(**kwargs) - self.name = name - self.password = password - self.elevation_level = elevation_level - self.linux_user_configuration = linux_user_configuration - self.windows_user_configuration = windows_user_configuration - - -class UserAssignedIdentity(Model): - """The user assigned Identity. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param resource_id: Required. The ARM resource id of the user assigned - identity - :type resource_id: str - :ivar client_id: The client id of the user assigned identity. - :vartype client_id: str - :ivar principal_id: The principal id of the user assigned identity. - :vartype principal_id: str - """ - - _validation = { - 'resource_id': {'required': True}, - 'client_id': {'readonly': True}, - 'principal_id': {'readonly': True}, - } - - _attribute_map = { - 'resource_id': {'key': 'resourceId', 'type': 'str'}, - 'client_id': {'key': 'clientId', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - } - - def __init__(self, *, resource_id: str, **kwargs) -> None: - super(UserAssignedIdentity, self).__init__(**kwargs) - self.resource_id = resource_id - self.client_id = None - self.principal_id = None - - -class UserIdentity(Model): - """The definition of the user identity under which the Task is run. - - Specify either the userName or autoUser property, but not both. - - :param user_name: The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - :type user_name: str - :param auto_user: The auto user under which the Task is run. The userName - and autoUser properties are mutually exclusive; you must specify one but - not both. - :type auto_user: ~azure.batch.models.AutoUserSpecification - """ - - _attribute_map = { - 'user_name': {'key': 'username', 'type': 'str'}, - 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, - } - - def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None: - super(UserIdentity, self).__init__(**kwargs) - self.user_name = user_name - self.auto_user = auto_user - - -class VirtualMachineConfiguration(Model): - """The configuration for Compute Nodes in a Pool based on the Azure Virtual - Machines infrastructure. - - All required parameters must be populated in order to send to Azure. - - :param image_reference: Required. A reference to the Azure Virtual - Machines Marketplace Image or the custom Virtual Machine Image to use. - :type image_reference: ~azure.batch.models.ImageReference - :param node_agent_sku_id: Required. The Batch Compute Node agent is a - program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch - service. There are different implementations of the Compute Node agent, - known as SKUs, for different operating systems. You must specify a Compute - Node agent SKU which matches the selected Image reference. To get the list - of supported Compute Node agent SKUs along with their list of verified - Image references, see the 'List supported Compute Node agent SKUs' - operation. - :type node_agent_sku_id: str - :param windows_configuration: Windows operating system settings on the - virtual machine. This property must not be specified if the imageReference - property specifies a Linux OS Image. - :type windows_configuration: ~azure.batch.models.WindowsConfiguration - :param data_disks: This property must be specified if the Compute Nodes in - the Pool need to have empty data disks attached to them. This cannot be - updated. Each Compute Node gets its own disk (the disk is not a file - share). Existing disks cannot be attached, each attached disk is empty. - When the Compute Node is removed from the Pool, the disk and all data - associated with it is also deleted. The disk is not formatted after being - attached, it must be formatted before use - for more information see - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux - and - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. - :type data_disks: list[~azure.batch.models.DataDisk] - :param license_type: This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid on-premises - licenses for the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - :type license_type: str - :param container_configuration: The container configuration for the Pool. - If specified, setup is performed on each Compute Node in the Pool to allow - Tasks to run in containers. All regular Tasks and Job manager Tasks run on - this Pool must specify the containerSettings property, and all other Tasks - may specify it. - :type container_configuration: ~azure.batch.models.ContainerConfiguration - :param disk_encryption_configuration: The disk encryption configuration - for the pool. If specified, encryption is performed on each node in the - pool during node provisioning. - :type disk_encryption_configuration: - ~azure.batch.models.DiskEncryptionConfiguration - :param node_placement_configuration: The node placement configuration for - the pool. This configuration will specify rules on how nodes in the pool - will be physically allocated. - :type node_placement_configuration: - ~azure.batch.models.NodePlacementConfiguration - :param extensions: If specified, the extensions mentioned in this - configuration will be installed on each node. - :type extensions: list[~azure.batch.models.VMExtension] - :param os_disk: Settings for the operating system disk of the Virtual - Machine. - :type os_disk: ~azure.batch.models.OSDisk - :param security_profile: Specifies the security profile settings for the - virtual machine or virtual machine scale set. - :type security_profile: ~azure.batch.models.SecurityProfile - :param service_artifact_reference: Specifies the service artifact - reference id used to set same image version for all virtual machines in - the scale set when using 'latest' image version. The service artifact - reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} - :type service_artifact_reference: - ~azure.batch.models.ServiceArtifactReference - """ - - _validation = { - 'image_reference': {'required': True}, - 'node_agent_sku_id': {'required': True}, - } - - _attribute_map = { - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, - 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, - 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, - 'license_type': {'key': 'licenseType', 'type': 'str'}, - 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, - 'disk_encryption_configuration': {'key': 'diskEncryptionConfiguration', 'type': 'DiskEncryptionConfiguration'}, - 'node_placement_configuration': {'key': 'nodePlacementConfiguration', 'type': 'NodePlacementConfiguration'}, - 'extensions': {'key': 'extensions', 'type': '[VMExtension]'}, - 'os_disk': {'key': 'osDisk', 'type': 'OSDisk'}, - 'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'}, - 'service_artifact_reference': {'key': 'serviceArtifactReference', 'type': 'ServiceArtifactReference'}, - } - - def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, disk_encryption_configuration=None, node_placement_configuration=None, extensions=None, os_disk=None, security_profile=None, service_artifact_reference=None, **kwargs) -> None: - super(VirtualMachineConfiguration, self).__init__(**kwargs) - self.image_reference = image_reference - self.node_agent_sku_id = node_agent_sku_id - self.windows_configuration = windows_configuration - self.data_disks = data_disks - self.license_type = license_type - self.container_configuration = container_configuration - self.disk_encryption_configuration = disk_encryption_configuration - self.node_placement_configuration = node_placement_configuration - self.extensions = extensions - self.os_disk = os_disk - self.security_profile = security_profile - self.service_artifact_reference = service_artifact_reference - - -class VirtualMachineInfo(Model): - """Info about the current state of the virtual machine. - - :param image_reference: The reference to the Azure Virtual Machine's - Marketplace Image. - :type image_reference: ~azure.batch.models.ImageReference - :param scale_set_vm_resource_id: - :type scale_set_vm_resource_id: str - """ - - _attribute_map = { - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'scale_set_vm_resource_id': {'key': 'scaleSetVmResourceId', 'type': 'str'}, - } - - def __init__(self, *, image_reference=None, scale_set_vm_resource_id: str=None, **kwargs) -> None: - super(VirtualMachineInfo, self).__init__(**kwargs) - self.image_reference = image_reference - self.scale_set_vm_resource_id = scale_set_vm_resource_id - - -class VMExtension(Model): - """The configuration for virtual machine extensions. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param publisher: Required. - :type publisher: str - :param type: Required. - :type type: str - :param type_handler_version: - :type type_handler_version: str - :param auto_upgrade_minor_version: Indicates whether the extension should - use a newer minor version if one is available at deployment time. Once - deployed, however, the extension will not upgrade minor versions unless - redeployed, even with this property set to true. - :type auto_upgrade_minor_version: bool - :param enable_automatic_upgrade: Indicates whether the extension should be - automatically upgraded by the platform if there is a newer version of the - extension available. - :type enable_automatic_upgrade: bool - :param settings: - :type settings: object - :param protected_settings: The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or no protected - settings at all. - :type protected_settings: object - :param provision_after_extensions: Collection of extension names after - which this extension needs to be provisioned. - :type provision_after_extensions: list[str] - """ - - _validation = { - 'name': {'required': True}, - 'publisher': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'publisher': {'key': 'publisher', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'}, - 'auto_upgrade_minor_version': {'key': 'autoUpgradeMinorVersion', 'type': 'bool'}, - 'enable_automatic_upgrade': {'key': 'enableAutomaticUpgrade', 'type': 'bool'}, - 'settings': {'key': 'settings', 'type': 'object'}, - 'protected_settings': {'key': 'protectedSettings', 'type': 'object'}, - 'provision_after_extensions': {'key': 'provisionAfterExtensions', 'type': '[str]'}, - } - - def __init__(self, *, name: str, publisher: str, type: str, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, enable_automatic_upgrade: bool=None, settings=None, protected_settings=None, provision_after_extensions=None, **kwargs) -> None: - super(VMExtension, self).__init__(**kwargs) - self.name = name - self.publisher = publisher - self.type = type - self.type_handler_version = type_handler_version - self.auto_upgrade_minor_version = auto_upgrade_minor_version - self.enable_automatic_upgrade = enable_automatic_upgrade - self.settings = settings - self.protected_settings = protected_settings - self.provision_after_extensions = provision_after_extensions - - -class VMExtensionInstanceView(Model): - """The vm extension instance view. - - :param name: - :type name: str - :param statuses: The resource status information. - :type statuses: list[~azure.batch.models.InstanceViewStatus] - :param sub_statuses: The resource status information. - :type sub_statuses: list[~azure.batch.models.InstanceViewStatus] - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'}, - 'sub_statuses': {'key': 'subStatuses', 'type': '[InstanceViewStatus]'}, - } - - def __init__(self, *, name: str=None, statuses=None, sub_statuses=None, **kwargs) -> None: - super(VMExtensionInstanceView, self).__init__(**kwargs) - self.name = name - self.statuses = statuses - self.sub_statuses = sub_statuses - - -class WindowsConfiguration(Model): - """Windows operating system settings to apply to the virtual machine. - - :param enable_automatic_updates: Whether automatic updates are enabled on - the virtual machine. If omitted, the default value is true. - :type enable_automatic_updates: bool - """ - - _attribute_map = { - 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, - } - - def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None: - super(WindowsConfiguration, self).__init__(**kwargs) - self.enable_automatic_updates = enable_automatic_updates - - -class WindowsUserConfiguration(Model): - """Properties used to create a user Account on a Windows Compute Node. - - :param login_mode: The default value for VirtualMachineConfiguration Pools - is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. - Possible values include: 'batch', 'interactive' - :type login_mode: str or ~azure.batch.models.LoginMode - """ - - _attribute_map = { - 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, - } - - def __init__(self, *, login_mode=None, **kwargs) -> None: - super(WindowsUserConfiguration, self).__init__(**kwargs) - self.login_mode = login_mode diff --git a/sdk/batch/azure-batch/azure/batch/models/_paged_models.py b/sdk/batch/azure-batch/azure/batch/models/_paged_models.py deleted file mode 100644 index 6e8edecd133c..000000000000 --- a/sdk/batch/azure-batch/azure/batch/models/_paged_models.py +++ /dev/null @@ -1,183 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class ApplicationSummaryPaged(Paged): - """ - A paging container for iterating over a list of :class:`ApplicationSummary ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[ApplicationSummary]'} - } - - def __init__(self, *args, **kwargs): - - super(ApplicationSummaryPaged, self).__init__(*args, **kwargs) -class PoolUsageMetricsPaged(Paged): - """ - A paging container for iterating over a list of :class:`PoolUsageMetrics ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[PoolUsageMetrics]'} - } - - def __init__(self, *args, **kwargs): - - super(PoolUsageMetricsPaged, self).__init__(*args, **kwargs) -class CloudPoolPaged(Paged): - """ - A paging container for iterating over a list of :class:`CloudPool ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[CloudPool]'} - } - - def __init__(self, *args, **kwargs): - - super(CloudPoolPaged, self).__init__(*args, **kwargs) -class ImageInformationPaged(Paged): - """ - A paging container for iterating over a list of :class:`ImageInformation ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[ImageInformation]'} - } - - def __init__(self, *args, **kwargs): - - super(ImageInformationPaged, self).__init__(*args, **kwargs) -class PoolNodeCountsPaged(Paged): - """ - A paging container for iterating over a list of :class:`PoolNodeCounts ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[PoolNodeCounts]'} - } - - def __init__(self, *args, **kwargs): - - super(PoolNodeCountsPaged, self).__init__(*args, **kwargs) -class CertificatePaged(Paged): - """ - A paging container for iterating over a list of :class:`Certificate ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[Certificate]'} - } - - def __init__(self, *args, **kwargs): - - super(CertificatePaged, self).__init__(*args, **kwargs) -class NodeFilePaged(Paged): - """ - A paging container for iterating over a list of :class:`NodeFile ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[NodeFile]'} - } - - def __init__(self, *args, **kwargs): - - super(NodeFilePaged, self).__init__(*args, **kwargs) -class CloudJobSchedulePaged(Paged): - """ - A paging container for iterating over a list of :class:`CloudJobSchedule ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[CloudJobSchedule]'} - } - - def __init__(self, *args, **kwargs): - - super(CloudJobSchedulePaged, self).__init__(*args, **kwargs) -class CloudJobPaged(Paged): - """ - A paging container for iterating over a list of :class:`CloudJob ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[CloudJob]'} - } - - def __init__(self, *args, **kwargs): - - super(CloudJobPaged, self).__init__(*args, **kwargs) -class JobPreparationAndReleaseTaskExecutionInformationPaged(Paged): - """ - A paging container for iterating over a list of :class:`JobPreparationAndReleaseTaskExecutionInformation ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[JobPreparationAndReleaseTaskExecutionInformation]'} - } - - def __init__(self, *args, **kwargs): - - super(JobPreparationAndReleaseTaskExecutionInformationPaged, self).__init__(*args, **kwargs) -class CloudTaskPaged(Paged): - """ - A paging container for iterating over a list of :class:`CloudTask ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[CloudTask]'} - } - - def __init__(self, *args, **kwargs): - - super(CloudTaskPaged, self).__init__(*args, **kwargs) -class ComputeNodePaged(Paged): - """ - A paging container for iterating over a list of :class:`ComputeNode ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[ComputeNode]'} - } - - def __init__(self, *args, **kwargs): - - super(ComputeNodePaged, self).__init__(*args, **kwargs) -class NodeVMExtensionPaged(Paged): - """ - A paging container for iterating over a list of :class:`NodeVMExtension ` object - """ - - _attribute_map = { - 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[NodeVMExtension]'} - } - - def __init__(self, *args, **kwargs): - - super(NodeVMExtensionPaged, self).__init__(*args, **kwargs) diff --git a/sdk/batch/azure-batch/azure/batch/models/_patch.py b/sdk/batch/azure-batch/azure/batch/models/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/batch/azure-batch/azure/batch/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/batch/azure-batch/azure/batch/operations/__init__.py b/sdk/batch/azure-batch/azure/batch/operations/__init__.py deleted file mode 100644 index 0ccd4e88fd5d..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._application_operations import ApplicationOperations -from ._pool_operations import PoolOperations -from ._account_operations import AccountOperations -from ._certificate_operations import CertificateOperations -from ._file_operations import FileOperations -from ._job_schedule_operations import JobScheduleOperations -from ._job_operations import JobOperations -from ._task_operations import TaskOperations -from ._compute_node_operations import ComputeNodeOperations -from ._compute_node_extension_operations import ComputeNodeExtensionOperations - -__all__ = [ - 'ApplicationOperations', - 'PoolOperations', - 'AccountOperations', - 'CertificateOperations', - 'FileOperations', - 'JobScheduleOperations', - 'JobOperations', - 'TaskOperations', - 'ComputeNodeOperations', - 'ComputeNodeExtensionOperations', -] diff --git a/sdk/batch/azure-batch/azure/batch/operations/_account_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_account_operations.py deleted file mode 100644 index fbc5ad3a3eae..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_account_operations.py +++ /dev/null @@ -1,241 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class AccountOperations(object): - """AccountOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def list_supported_images( - self, account_list_supported_images_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all Virtual Machine Images supported by the Azure Batch service. - - :param account_list_supported_images_options: Additional parameters - for the operation - :type account_list_supported_images_options: - ~azure.batch.models.AccountListSupportedImagesOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of ImageInformation - :rtype: - ~azure.batch.models.ImageInformationPaged[~azure.batch.models.ImageInformation] - :raises: - :class:`BatchErrorException` - """ - filter = None - if account_list_supported_images_options is not None: - filter = account_list_supported_images_options.filter - max_results = None - if account_list_supported_images_options is not None: - max_results = account_list_supported_images_options.max_results - timeout = None - if account_list_supported_images_options is not None: - timeout = account_list_supported_images_options.timeout - client_request_id = None - if account_list_supported_images_options is not None: - client_request_id = account_list_supported_images_options.client_request_id - return_client_request_id = None - if account_list_supported_images_options is not None: - return_client_request_id = account_list_supported_images_options.return_client_request_id - ocp_date = None - if account_list_supported_images_options is not None: - ocp_date = account_list_supported_images_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_supported_images.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.ImageInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_supported_images.metadata = {'url': '/supportedimages'} - - def list_pool_node_counts( - self, account_list_pool_node_counts_options=None, custom_headers=None, raw=False, **operation_config): - """Gets the number of Compute Nodes in each state, grouped by Pool. Note - that the numbers returned may not always be up to date. If you need - exact node counts, use a list query. - - :param account_list_pool_node_counts_options: Additional parameters - for the operation - :type account_list_pool_node_counts_options: - ~azure.batch.models.AccountListPoolNodeCountsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of PoolNodeCounts - :rtype: - ~azure.batch.models.PoolNodeCountsPaged[~azure.batch.models.PoolNodeCounts] - :raises: - :class:`BatchErrorException` - """ - filter = None - if account_list_pool_node_counts_options is not None: - filter = account_list_pool_node_counts_options.filter - max_results = None - if account_list_pool_node_counts_options is not None: - max_results = account_list_pool_node_counts_options.max_results - timeout = None - if account_list_pool_node_counts_options is not None: - timeout = account_list_pool_node_counts_options.timeout - client_request_id = None - if account_list_pool_node_counts_options is not None: - client_request_id = account_list_pool_node_counts_options.client_request_id - return_client_request_id = None - if account_list_pool_node_counts_options is not None: - return_client_request_id = account_list_pool_node_counts_options.return_client_request_id - ocp_date = None - if account_list_pool_node_counts_options is not None: - ocp_date = account_list_pool_node_counts_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_pool_node_counts.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=10, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_pool_node_counts.metadata = {'url': '/nodecounts'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_application_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_application_operations.py deleted file mode 100644 index 86b40f1b2f3e..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_application_operations.py +++ /dev/null @@ -1,236 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class ApplicationOperations(object): - """ApplicationOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def list( - self, application_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the applications available in the specified Account. - - This operation returns only Applications and versions that are - available for use on Compute Nodes; that is, that can be used in an - Package reference. For administrator information about applications and - versions that are not yet available to Compute Nodes, use the Azure - portal or the Azure Resource Manager API. - - :param application_list_options: Additional parameters for the - operation - :type application_list_options: - ~azure.batch.models.ApplicationListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of ApplicationSummary - :rtype: - ~azure.batch.models.ApplicationSummaryPaged[~azure.batch.models.ApplicationSummary] - :raises: - :class:`BatchErrorException` - """ - max_results = None - if application_list_options is not None: - max_results = application_list_options.max_results - timeout = None - if application_list_options is not None: - timeout = application_list_options.timeout - client_request_id = None - if application_list_options is not None: - client_request_id = application_list_options.client_request_id - return_client_request_id = None - if application_list_options is not None: - return_client_request_id = application_list_options.return_client_request_id - ocp_date = None - if application_list_options is not None: - ocp_date = application_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/applications'} - - def get( - self, application_id, application_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Application. - - This operation returns only Applications and versions that are - available for use on Compute Nodes; that is, that can be used in an - Package reference. For administrator information about Applications and - versions that are not yet available to Compute Nodes, use the Azure - portal or the Azure Resource Manager API. - - :param application_id: The ID of the Application. - :type application_id: str - :param application_get_options: Additional parameters for the - operation - :type application_get_options: - ~azure.batch.models.ApplicationGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationSummary or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.ApplicationSummary or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if application_get_options is not None: - timeout = application_get_options.timeout - client_request_id = None - if application_get_options is not None: - client_request_id = application_get_options.client_request_id - return_client_request_id = None - if application_get_options is not None: - return_client_request_id = application_get_options.return_client_request_id - ocp_date = None - if application_get_options is not None: - ocp_date = application_get_options.ocp_date - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'applicationId': self._serialize.url("application_id", application_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationSummary', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/applications/{applicationId}'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_certificate_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_certificate_operations.py deleted file mode 100644 index 6ef35ca2e978..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_certificate_operations.py +++ /dev/null @@ -1,540 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class CertificateOperations(object): - """CertificateOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def add( - self, certificate, certificate_add_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a Certificate to the specified Account. - - Warning: This operation is deprecated and will be removed after - February, 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - :param certificate: The Certificate to be added. - :type certificate: ~azure.batch.models.CertificateAddParameter - :param certificate_add_options: Additional parameters for the - operation - :type certificate_add_options: - ~azure.batch.models.CertificateAddOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if certificate_add_options is not None: - timeout = certificate_add_options.timeout - client_request_id = None - if certificate_add_options is not None: - client_request_id = certificate_add_options.client_request_id - return_client_request_id = None - if certificate_add_options is not None: - return_client_request_id = certificate_add_options.return_client_request_id - ocp_date = None - if certificate_add_options is not None: - ocp_date = certificate_add_options.ocp_date - - # Construct URL - url = self.add.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(certificate, 'CertificateAddParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - add.metadata = {'url': '/certificates'} - - def list( - self, certificate_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the Certificates that have been added to the specified - Account. - - Warning: This operation is deprecated and will be removed after - February, 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - :param certificate_list_options: Additional parameters for the - operation - :type certificate_list_options: - ~azure.batch.models.CertificateListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of Certificate - :rtype: - ~azure.batch.models.CertificatePaged[~azure.batch.models.Certificate] - :raises: - :class:`BatchErrorException` - """ - filter = None - if certificate_list_options is not None: - filter = certificate_list_options.filter - select = None - if certificate_list_options is not None: - select = certificate_list_options.select - max_results = None - if certificate_list_options is not None: - max_results = certificate_list_options.max_results - timeout = None - if certificate_list_options is not None: - timeout = certificate_list_options.timeout - client_request_id = None - if certificate_list_options is not None: - client_request_id = certificate_list_options.client_request_id - return_client_request_id = None - if certificate_list_options is not None: - return_client_request_id = certificate_list_options.return_client_request_id - ocp_date = None - if certificate_list_options is not None: - ocp_date = certificate_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/certificates'} - - def cancel_deletion( - self, thumbprint_algorithm, thumbprint, certificate_cancel_deletion_options=None, custom_headers=None, raw=False, **operation_config): - """Cancels a failed deletion of a Certificate from the specified Account. - - If you try to delete a Certificate that is being used by a Pool or - Compute Node, the status of the Certificate changes to deleteFailed. If - you decide that you want to continue using the Certificate, you can use - this operation to set the status of the Certificate back to active. If - you intend to delete the Certificate, you do not need to run this - operation after the deletion failed. You must make sure that the - Certificate is not being used by any resources, and then you can try - again to delete the Certificate. - Warning: This operation is deprecated and will be removed after - February, 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - :param thumbprint_algorithm: The algorithm used to derive the - thumbprint parameter. This must be sha1. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the Certificate being deleted. - :type thumbprint: str - :param certificate_cancel_deletion_options: Additional parameters for - the operation - :type certificate_cancel_deletion_options: - ~azure.batch.models.CertificateCancelDeletionOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if certificate_cancel_deletion_options is not None: - timeout = certificate_cancel_deletion_options.timeout - client_request_id = None - if certificate_cancel_deletion_options is not None: - client_request_id = certificate_cancel_deletion_options.client_request_id - return_client_request_id = None - if certificate_cancel_deletion_options is not None: - return_client_request_id = certificate_cancel_deletion_options.return_client_request_id - ocp_date = None - if certificate_cancel_deletion_options is not None: - ocp_date = certificate_cancel_deletion_options.ocp_date - - # Construct URL - url = self.cancel_deletion.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), - 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [204]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - cancel_deletion.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete'} - - def delete( - self, thumbprint_algorithm, thumbprint, certificate_delete_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes a Certificate from the specified Account. - - You cannot delete a Certificate if a resource (Pool or Compute Node) is - using it. Before you can delete a Certificate, you must therefore make - sure that the Certificate is not associated with any existing Pools, - the Certificate is not installed on any Nodes (even if you remove a - Certificate from a Pool, it is not removed from existing Compute Nodes - in that Pool until they restart), and no running Tasks depend on the - Certificate. If you try to delete a Certificate that is in use, the - deletion fails. The Certificate status changes to deleteFailed. You can - use Cancel Delete Certificate to set the status back to active if you - decide that you want to continue using the Certificate. - Warning: This operation is deprecated and will be removed after - February, 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - :param thumbprint_algorithm: The algorithm used to derive the - thumbprint parameter. This must be sha1. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the Certificate to be deleted. - :type thumbprint: str - :param certificate_delete_options: Additional parameters for the - operation - :type certificate_delete_options: - ~azure.batch.models.CertificateDeleteOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if certificate_delete_options is not None: - timeout = certificate_delete_options.timeout - client_request_id = None - if certificate_delete_options is not None: - client_request_id = certificate_delete_options.client_request_id - return_client_request_id = None - if certificate_delete_options is not None: - return_client_request_id = certificate_delete_options.return_client_request_id - ocp_date = None - if certificate_delete_options is not None: - ocp_date = certificate_delete_options.ocp_date - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), - 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - }) - return client_raw_response - delete.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} - - def get( - self, thumbprint_algorithm, thumbprint, certificate_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Certificate. - - Warning: This operation is deprecated and will be removed after - February, 2024. Please use the [Azure KeyVault - Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) - instead. - - :param thumbprint_algorithm: The algorithm used to derive the - thumbprint parameter. This must be sha1. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the Certificate to get. - :type thumbprint: str - :param certificate_get_options: Additional parameters for the - operation - :type certificate_get_options: - ~azure.batch.models.CertificateGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Certificate or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.Certificate or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if certificate_get_options is not None: - select = certificate_get_options.select - timeout = None - if certificate_get_options is not None: - timeout = certificate_get_options.timeout - client_request_id = None - if certificate_get_options is not None: - client_request_id = certificate_get_options.client_request_id - return_client_request_id = None - if certificate_get_options is not None: - return_client_request_id = certificate_get_options.return_client_request_id - ocp_date = None - if certificate_get_options is not None: - ocp_date = certificate_get_options.ocp_date - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), - 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Certificate', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_compute_node_extension_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_compute_node_extension_operations.py deleted file mode 100644 index 8851ef552c1c..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_compute_node_extension_operations.py +++ /dev/null @@ -1,249 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class ComputeNodeExtensionOperations(object): - """ComputeNodeExtensionOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def get( - self, pool_id, node_id, extension_name, compute_node_extension_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Compute Node Extension. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that contains the - extensions. - :type node_id: str - :param extension_name: The name of the of the Compute Node Extension - that you want to get information about. - :type extension_name: str - :param compute_node_extension_get_options: Additional parameters for - the operation - :type compute_node_extension_get_options: - ~azure.batch.models.ComputeNodeExtensionGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NodeVMExtension or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.NodeVMExtension or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if compute_node_extension_get_options is not None: - select = compute_node_extension_get_options.select - timeout = None - if compute_node_extension_get_options is not None: - timeout = compute_node_extension_get_options.timeout - client_request_id = None - if compute_node_extension_get_options is not None: - client_request_id = compute_node_extension_get_options.client_request_id - return_client_request_id = None - if compute_node_extension_get_options is not None: - return_client_request_id = compute_node_extension_get_options.return_client_request_id - ocp_date = None - if compute_node_extension_get_options is not None: - ocp_date = compute_node_extension_get_options.ocp_date - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str'), - 'extensionName': self._serialize.url("extension_name", extension_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeVMExtension', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}'} - - def list( - self, pool_id, node_id, compute_node_extension_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists the Compute Nodes Extensions in the specified Pool. - - :param pool_id: The ID of the Pool that contains Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that you want to list - extensions. - :type node_id: str - :param compute_node_extension_list_options: Additional parameters for - the operation - :type compute_node_extension_list_options: - ~azure.batch.models.ComputeNodeExtensionListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of NodeVMExtension - :rtype: - ~azure.batch.models.NodeVMExtensionPaged[~azure.batch.models.NodeVMExtension] - :raises: - :class:`BatchErrorException` - """ - select = None - if compute_node_extension_list_options is not None: - select = compute_node_extension_list_options.select - max_results = None - if compute_node_extension_list_options is not None: - max_results = compute_node_extension_list_options.max_results - timeout = None - if compute_node_extension_list_options is not None: - timeout = compute_node_extension_list_options.timeout - client_request_id = None - if compute_node_extension_list_options is not None: - client_request_id = compute_node_extension_list_options.client_request_id - return_client_request_id = None - if compute_node_extension_list_options is not None: - return_client_request_id = compute_node_extension_list_options.return_client_request_id - ocp_date = None - if compute_node_extension_list_options is not None: - ocp_date = compute_node_extension_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.NodeVMExtensionPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/extensions'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_compute_node_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_compute_node_operations.py deleted file mode 100644 index 9d4a93f616f1..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_compute_node_operations.py +++ /dev/null @@ -1,1230 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class ComputeNodeOperations(object): - """ComputeNodeOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def add_user( - self, pool_id, node_id, user, compute_node_add_user_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a user Account to the specified Compute Node. - - You can add a user Account to a Compute Node only when it is in the - idle or running state. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the machine on which you want to create a - user Account. - :type node_id: str - :param user: The user Account to be created. - :type user: ~azure.batch.models.ComputeNodeUser - :param compute_node_add_user_options: Additional parameters for the - operation - :type compute_node_add_user_options: - ~azure.batch.models.ComputeNodeAddUserOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_add_user_options is not None: - timeout = compute_node_add_user_options.timeout - client_request_id = None - if compute_node_add_user_options is not None: - client_request_id = compute_node_add_user_options.client_request_id - return_client_request_id = None - if compute_node_add_user_options is not None: - return_client_request_id = compute_node_add_user_options.return_client_request_id - ocp_date = None - if compute_node_add_user_options is not None: - ocp_date = compute_node_add_user_options.ocp_date - - # Construct URL - url = self.add_user.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(user, 'ComputeNodeUser') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - add_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users'} - - def delete_user( - self, pool_id, node_id, user_name, compute_node_delete_user_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes a user Account from the specified Compute Node. - - You can delete a user Account to a Compute Node only when it is in the - idle or running state. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the machine on which you want to delete a - user Account. - :type node_id: str - :param user_name: The name of the user Account to delete. - :type user_name: str - :param compute_node_delete_user_options: Additional parameters for the - operation - :type compute_node_delete_user_options: - ~azure.batch.models.ComputeNodeDeleteUserOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_delete_user_options is not None: - timeout = compute_node_delete_user_options.timeout - client_request_id = None - if compute_node_delete_user_options is not None: - client_request_id = compute_node_delete_user_options.client_request_id - return_client_request_id = None - if compute_node_delete_user_options is not None: - return_client_request_id = compute_node_delete_user_options.return_client_request_id - ocp_date = None - if compute_node_delete_user_options is not None: - ocp_date = compute_node_delete_user_options.ocp_date - - # Construct URL - url = self.delete_user.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str'), - 'userName': self._serialize.url("user_name", user_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} - - def update_user( - self, pool_id, node_id, user_name, node_update_user_parameter, compute_node_update_user_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the password and expiration time of a user Account on the - specified Compute Node. - - This operation replaces of all the updatable properties of the Account. - For example, if the expiryTime element is not specified, the current - value is replaced with the default value, not left unmodified. You can - update a user Account on a Compute Node only when it is in the idle or - running state. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the machine on which you want to update a - user Account. - :type node_id: str - :param user_name: The name of the user Account to update. - :type user_name: str - :param node_update_user_parameter: The parameters for the request. - :type node_update_user_parameter: - ~azure.batch.models.NodeUpdateUserParameter - :param compute_node_update_user_options: Additional parameters for the - operation - :type compute_node_update_user_options: - ~azure.batch.models.ComputeNodeUpdateUserOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_update_user_options is not None: - timeout = compute_node_update_user_options.timeout - client_request_id = None - if compute_node_update_user_options is not None: - client_request_id = compute_node_update_user_options.client_request_id - return_client_request_id = None - if compute_node_update_user_options is not None: - return_client_request_id = compute_node_update_user_options.return_client_request_id - ocp_date = None - if compute_node_update_user_options is not None: - ocp_date = compute_node_update_user_options.ocp_date - - # Construct URL - url = self.update_user.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str'), - 'userName': self._serialize.url("user_name", user_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(node_update_user_parameter, 'NodeUpdateUserParameter') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - update_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} - - def get( - self, pool_id, node_id, compute_node_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Compute Node. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that you want to get - information about. - :type node_id: str - :param compute_node_get_options: Additional parameters for the - operation - :type compute_node_get_options: - ~azure.batch.models.ComputeNodeGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ComputeNode or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.ComputeNode or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if compute_node_get_options is not None: - select = compute_node_get_options.select - timeout = None - if compute_node_get_options is not None: - timeout = compute_node_get_options.timeout - client_request_id = None - if compute_node_get_options is not None: - client_request_id = compute_node_get_options.client_request_id - return_client_request_id = None - if compute_node_get_options is not None: - return_client_request_id = compute_node_get_options.return_client_request_id - ocp_date = None - if compute_node_get_options is not None: - ocp_date = compute_node_get_options.ocp_date - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ComputeNode', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}'} - - def reboot( - self, pool_id, node_id, node_reboot_option=None, compute_node_reboot_options=None, custom_headers=None, raw=False, **operation_config): - """Restarts the specified Compute Node. - - You can restart a Compute Node only if it is in an idle or running - state. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that you want to restart. - :type node_id: str - :param node_reboot_option: The default value is requeue. Possible - values include: 'requeue', 'terminate', 'taskCompletion', - 'retainedData' - :type node_reboot_option: str or - ~azure.batch.models.ComputeNodeRebootOption - :param compute_node_reboot_options: Additional parameters for the - operation - :type compute_node_reboot_options: - ~azure.batch.models.ComputeNodeRebootOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_reboot_options is not None: - timeout = compute_node_reboot_options.timeout - client_request_id = None - if compute_node_reboot_options is not None: - client_request_id = compute_node_reboot_options.client_request_id - return_client_request_id = None - if compute_node_reboot_options is not None: - return_client_request_id = compute_node_reboot_options.return_client_request_id - ocp_date = None - if compute_node_reboot_options is not None: - ocp_date = compute_node_reboot_options.ocp_date - node_reboot_parameter = None - if node_reboot_option is not None: - node_reboot_parameter = models.NodeRebootParameter(node_reboot_option=node_reboot_option) - - # Construct URL - url = self.reboot.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - if node_reboot_parameter is not None: - body_content = self._serialize.body(node_reboot_parameter, 'NodeRebootParameter') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - reboot.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reboot'} - - def reimage( - self, pool_id, node_id, node_reimage_option=None, compute_node_reimage_options=None, custom_headers=None, raw=False, **operation_config): - """Reinstalls the operating system on the specified Compute Node. - - You can reinstall the operating system on a Compute Node only if it is - in an idle or running state. This API can be invoked only on Pools - created with the cloud service configuration property. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that you want to restart. - :type node_id: str - :param node_reimage_option: The default value is requeue. Possible - values include: 'requeue', 'terminate', 'taskCompletion', - 'retainedData' - :type node_reimage_option: str or - ~azure.batch.models.ComputeNodeReimageOption - :param compute_node_reimage_options: Additional parameters for the - operation - :type compute_node_reimage_options: - ~azure.batch.models.ComputeNodeReimageOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_reimage_options is not None: - timeout = compute_node_reimage_options.timeout - client_request_id = None - if compute_node_reimage_options is not None: - client_request_id = compute_node_reimage_options.client_request_id - return_client_request_id = None - if compute_node_reimage_options is not None: - return_client_request_id = compute_node_reimage_options.return_client_request_id - ocp_date = None - if compute_node_reimage_options is not None: - ocp_date = compute_node_reimage_options.ocp_date - node_reimage_parameter = None - if node_reimage_option is not None: - node_reimage_parameter = models.NodeReimageParameter(node_reimage_option=node_reimage_option) - - # Construct URL - url = self.reimage.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - if node_reimage_parameter is not None: - body_content = self._serialize.body(node_reimage_parameter, 'NodeReimageParameter') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - reimage.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reimage'} - - def disable_scheduling( - self, pool_id, node_id, node_disable_scheduling_option=None, compute_node_disable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): - """Disables Task scheduling on the specified Compute Node. - - You can disable Task scheduling on a Compute Node only if its current - scheduling state is enabled. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node on which you want to - disable Task scheduling. - :type node_id: str - :param node_disable_scheduling_option: The default value is requeue. - Possible values include: 'requeue', 'terminate', 'taskCompletion' - :type node_disable_scheduling_option: str or - ~azure.batch.models.DisableComputeNodeSchedulingOption - :param compute_node_disable_scheduling_options: Additional parameters - for the operation - :type compute_node_disable_scheduling_options: - ~azure.batch.models.ComputeNodeDisableSchedulingOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_disable_scheduling_options is not None: - timeout = compute_node_disable_scheduling_options.timeout - client_request_id = None - if compute_node_disable_scheduling_options is not None: - client_request_id = compute_node_disable_scheduling_options.client_request_id - return_client_request_id = None - if compute_node_disable_scheduling_options is not None: - return_client_request_id = compute_node_disable_scheduling_options.return_client_request_id - ocp_date = None - if compute_node_disable_scheduling_options is not None: - ocp_date = compute_node_disable_scheduling_options.ocp_date - node_disable_scheduling_parameter = None - if node_disable_scheduling_option is not None: - node_disable_scheduling_parameter = models.NodeDisableSchedulingParameter(node_disable_scheduling_option=node_disable_scheduling_option) - - # Construct URL - url = self.disable_scheduling.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - if node_disable_scheduling_parameter is not None: - body_content = self._serialize.body(node_disable_scheduling_parameter, 'NodeDisableSchedulingParameter') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - disable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/disablescheduling'} - - def enable_scheduling( - self, pool_id, node_id, compute_node_enable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): - """Enables Task scheduling on the specified Compute Node. - - You can enable Task scheduling on a Compute Node only if its current - scheduling state is disabled. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node on which you want to enable - Task scheduling. - :type node_id: str - :param compute_node_enable_scheduling_options: Additional parameters - for the operation - :type compute_node_enable_scheduling_options: - ~azure.batch.models.ComputeNodeEnableSchedulingOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_enable_scheduling_options is not None: - timeout = compute_node_enable_scheduling_options.timeout - client_request_id = None - if compute_node_enable_scheduling_options is not None: - client_request_id = compute_node_enable_scheduling_options.client_request_id - return_client_request_id = None - if compute_node_enable_scheduling_options is not None: - return_client_request_id = compute_node_enable_scheduling_options.return_client_request_id - ocp_date = None - if compute_node_enable_scheduling_options is not None: - ocp_date = compute_node_enable_scheduling_options.ocp_date - - # Construct URL - url = self.enable_scheduling.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - enable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/enablescheduling'} - - def get_remote_login_settings( - self, pool_id, node_id, compute_node_get_remote_login_settings_options=None, custom_headers=None, raw=False, **operation_config): - """Gets the settings required for remote login to a Compute Node. - - Before you can remotely login to a Compute Node using the remote login - settings, you must create a user Account on the Compute Node. This API - can be invoked only on Pools created with the virtual machine - configuration property. For Pools created with a cloud service - configuration, see the GetRemoteDesktop API. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node for which to obtain the - remote login settings. - :type node_id: str - :param compute_node_get_remote_login_settings_options: Additional - parameters for the operation - :type compute_node_get_remote_login_settings_options: - ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ComputeNodeGetRemoteLoginSettingsResult or ClientRawResponse - if raw=true - :rtype: ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsResult or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_get_remote_login_settings_options is not None: - timeout = compute_node_get_remote_login_settings_options.timeout - client_request_id = None - if compute_node_get_remote_login_settings_options is not None: - client_request_id = compute_node_get_remote_login_settings_options.client_request_id - return_client_request_id = None - if compute_node_get_remote_login_settings_options is not None: - return_client_request_id = compute_node_get_remote_login_settings_options.return_client_request_id - ocp_date = None - if compute_node_get_remote_login_settings_options is not None: - ocp_date = compute_node_get_remote_login_settings_options.ocp_date - - # Construct URL - url = self.get_remote_login_settings.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ComputeNodeGetRemoteLoginSettingsResult', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get_remote_login_settings.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/remoteloginsettings'} - - def get_remote_desktop( - self, pool_id, node_id, compute_node_get_remote_desktop_options=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Gets the Remote Desktop Protocol file for the specified Compute Node. - - Before you can access a Compute Node by using the RDP file, you must - create a user Account on the Compute Node. This API can only be invoked - on Pools created with a cloud service configuration. For Pools created - with a virtual machine configuration, see the GetRemoteLoginSettings - API. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node for which you want to get - the Remote Desktop Protocol file. - :type node_id: str - :param compute_node_get_remote_desktop_options: Additional parameters - for the operation - :type compute_node_get_remote_desktop_options: - ~azure.batch.models.ComputeNodeGetRemoteDesktopOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param callback: When specified, will be called with each chunk of - data that is streamed. The callback should take two arguments, the - bytes of the current chunk of data and the response object. If the - data is uploading, response will be None. - :type callback: Callable[Bytes, response=None] - :param operation_config: :ref:`Operation configuration - overrides`. - :return: object or ClientRawResponse if raw=true - :rtype: Generator or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_get_remote_desktop_options is not None: - timeout = compute_node_get_remote_desktop_options.timeout - client_request_id = None - if compute_node_get_remote_desktop_options is not None: - client_request_id = compute_node_get_remote_desktop_options.client_request_id - return_client_request_id = None - if compute_node_get_remote_desktop_options is not None: - return_client_request_id = compute_node_get_remote_desktop_options.return_client_request_id - ocp_date = None - if compute_node_get_remote_desktop_options is not None: - ocp_date = compute_node_get_remote_desktop_options.ocp_date - - # Construct URL - url = self.get_remote_desktop.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=True, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = self._client.stream_download(response, callback) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get_remote_desktop.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/rdp'} - - def upload_batch_service_logs( - self, pool_id, node_id, upload_batch_service_logs_configuration, compute_node_upload_batch_service_logs_options=None, custom_headers=None, raw=False, **operation_config): - """Upload Azure Batch service log files from the specified Compute Node to - Azure Blob Storage. - - This is for gathering Azure Batch service log files in an automated - fashion from Compute Nodes if you are experiencing an error and wish to - escalate to Azure support. The Azure Batch service log files should be - shared with Azure support to aid in debugging issues with the Batch - service. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to - upload the Azure Batch service log files. - :type node_id: str - :param upload_batch_service_logs_configuration: The Azure Batch - service log files upload configuration. - :type upload_batch_service_logs_configuration: - ~azure.batch.models.UploadBatchServiceLogsConfiguration - :param compute_node_upload_batch_service_logs_options: Additional - parameters for the operation - :type compute_node_upload_batch_service_logs_options: - ~azure.batch.models.ComputeNodeUploadBatchServiceLogsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: UploadBatchServiceLogsResult or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.UploadBatchServiceLogsResult or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if compute_node_upload_batch_service_logs_options is not None: - timeout = compute_node_upload_batch_service_logs_options.timeout - client_request_id = None - if compute_node_upload_batch_service_logs_options is not None: - client_request_id = compute_node_upload_batch_service_logs_options.client_request_id - return_client_request_id = None - if compute_node_upload_batch_service_logs_options is not None: - return_client_request_id = compute_node_upload_batch_service_logs_options.return_client_request_id - ocp_date = None - if compute_node_upload_batch_service_logs_options is not None: - ocp_date = compute_node_upload_batch_service_logs_options.ocp_date - - # Construct URL - url = self.upload_batch_service_logs.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(upload_batch_service_logs_configuration, 'UploadBatchServiceLogsConfiguration') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UploadBatchServiceLogsResult', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - upload_batch_service_logs.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs'} - - def list( - self, pool_id, compute_node_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists the Compute Nodes in the specified Pool. - - :param pool_id: The ID of the Pool from which you want to list Compute - Nodes. - :type pool_id: str - :param compute_node_list_options: Additional parameters for the - operation - :type compute_node_list_options: - ~azure.batch.models.ComputeNodeListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of ComputeNode - :rtype: - ~azure.batch.models.ComputeNodePaged[~azure.batch.models.ComputeNode] - :raises: - :class:`BatchErrorException` - """ - filter = None - if compute_node_list_options is not None: - filter = compute_node_list_options.filter - select = None - if compute_node_list_options is not None: - select = compute_node_list_options.select - max_results = None - if compute_node_list_options is not None: - max_results = compute_node_list_options.max_results - timeout = None - if compute_node_list_options is not None: - timeout = compute_node_list_options.timeout - client_request_id = None - if compute_node_list_options is not None: - client_request_id = compute_node_list_options.client_request_id - return_client_request_id = None - if compute_node_list_options is not None: - return_client_request_id = compute_node_list_options.return_client_request_id - ocp_date = None - if compute_node_list_options is not None: - ocp_date = compute_node_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/pools/{poolId}/nodes'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_file_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_file_operations.py deleted file mode 100644 index 6a60bc95f2b2..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_file_operations.py +++ /dev/null @@ -1,874 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def delete_from_task( - self, job_id, task_id, file_path, recursive=None, file_delete_from_task_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes the specified Task file from the Compute Node where the Task - ran. - - :param job_id: The ID of the Job that contains the Task. - :type job_id: str - :param task_id: The ID of the Task whose file you want to delete. - :type task_id: str - :param file_path: The path to the Task file or directory that you want - to delete. - :type file_path: str - :param recursive: Whether to delete children of a directory. If the - filePath parameter represents a directory instead of a file, you can - set recursive to true to delete the directory and all of the files and - subdirectories in it. If recursive is false then the directory must be - empty or deletion will fail. - :type recursive: bool - :param file_delete_from_task_options: Additional parameters for the - operation - :type file_delete_from_task_options: - ~azure.batch.models.FileDeleteFromTaskOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if file_delete_from_task_options is not None: - timeout = file_delete_from_task_options.timeout - client_request_id = None - if file_delete_from_task_options is not None: - client_request_id = file_delete_from_task_options.client_request_id - return_client_request_id = None - if file_delete_from_task_options is not None: - return_client_request_id = file_delete_from_task_options.return_client_request_id - ocp_date = None - if file_delete_from_task_options is not None: - ocp_date = file_delete_from_task_options.ocp_date - - # Construct URL - url = self.delete_from_task.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str'), - 'filePath': self._serialize.url("file_path", file_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} - - def get_from_task( - self, job_id, task_id, file_path, file_get_from_task_options=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. - :type task_id: str - :param file_path: The path to the Task file that you want to get the - content of. - :type file_path: str - :param file_get_from_task_options: Additional parameters for the - operation - :type file_get_from_task_options: - ~azure.batch.models.FileGetFromTaskOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param callback: When specified, will be called with each chunk of - data that is streamed. The callback should take two arguments, the - bytes of the current chunk of data and the response object. If the - data is uploading, response will be None. - :type callback: Callable[Bytes, response=None] - :param operation_config: :ref:`Operation configuration - overrides`. - :return: object or ClientRawResponse if raw=true - :rtype: Generator or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if file_get_from_task_options is not None: - timeout = file_get_from_task_options.timeout - client_request_id = None - if file_get_from_task_options is not None: - client_request_id = file_get_from_task_options.client_request_id - return_client_request_id = None - if file_get_from_task_options is not None: - return_client_request_id = file_get_from_task_options.return_client_request_id - ocp_date = None - if file_get_from_task_options is not None: - ocp_date = file_get_from_task_options.ocp_date - ocp_range = None - if file_get_from_task_options is not None: - ocp_range = file_get_from_task_options.ocp_range - if_modified_since = None - if file_get_from_task_options is not None: - if_modified_since = file_get_from_task_options.if_modified_since - if_unmodified_since = None - if file_get_from_task_options is not None: - if_unmodified_since = file_get_from_task_options.if_unmodified_since - - # Construct URL - url = self.get_from_task.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str'), - 'filePath': self._serialize.url("file_path", file_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if ocp_range is not None: - header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=True, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = self._client.stream_download(response, callback) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} - - def get_properties_from_task( - self, job_id, task_id, file_path, file_get_properties_from_task_options=None, custom_headers=None, raw=False, **operation_config): - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. - :type job_id: str - :param task_id: The ID of the Task whose file you want to get the - properties of. - :type task_id: str - :param file_path: The path to the Task file that you want to get the - properties of. - :type file_path: str - :param file_get_properties_from_task_options: Additional parameters - for the operation - :type file_get_properties_from_task_options: - ~azure.batch.models.FileGetPropertiesFromTaskOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if file_get_properties_from_task_options is not None: - timeout = file_get_properties_from_task_options.timeout - client_request_id = None - if file_get_properties_from_task_options is not None: - client_request_id = file_get_properties_from_task_options.client_request_id - return_client_request_id = None - if file_get_properties_from_task_options is not None: - return_client_request_id = file_get_properties_from_task_options.return_client_request_id - ocp_date = None - if file_get_properties_from_task_options is not None: - ocp_date = file_get_properties_from_task_options.ocp_date - if_modified_since = None - if file_get_properties_from_task_options is not None: - if_modified_since = file_get_properties_from_task_options.if_modified_since - if_unmodified_since = None - if file_get_properties_from_task_options is not None: - if_unmodified_since = file_get_properties_from_task_options.if_unmodified_since - - # Construct URL - url = self.get_properties_from_task.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str'), - 'filePath': self._serialize.url("file_path", file_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'ocp-creation-time': 'rfc-1123', - 'ocp-batch-file-isdirectory': 'bool', - 'ocp-batch-file-url': 'str', - 'ocp-batch-file-mode': 'str', - 'Content-Type': 'str', - 'Content-Length': 'long', - }) - return client_raw_response - get_properties_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} - - def delete_from_compute_node( - self, pool_id, node_id, file_path, recursive=None, file_delete_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes the specified file from the Compute Node. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to - delete the file. - :type node_id: str - :param file_path: The path to the file or directory that you want to - delete. - :type file_path: str - :param recursive: Whether to delete children of a directory. If the - filePath parameter represents a directory instead of a file, you can - set recursive to true to delete the directory and all of the files and - subdirectories in it. If recursive is false then the directory must be - empty or deletion will fail. - :type recursive: bool - :param file_delete_from_compute_node_options: Additional parameters - for the operation - :type file_delete_from_compute_node_options: - ~azure.batch.models.FileDeleteFromComputeNodeOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if file_delete_from_compute_node_options is not None: - timeout = file_delete_from_compute_node_options.timeout - client_request_id = None - if file_delete_from_compute_node_options is not None: - client_request_id = file_delete_from_compute_node_options.client_request_id - return_client_request_id = None - if file_delete_from_compute_node_options is not None: - return_client_request_id = file_delete_from_compute_node_options.return_client_request_id - ocp_date = None - if file_delete_from_compute_node_options is not None: - ocp_date = file_delete_from_compute_node_options.ocp_date - - # Construct URL - url = self.delete_from_compute_node.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str'), - 'filePath': self._serialize.url("file_path", file_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} - - def get_from_compute_node( - self, pool_id, node_id, file_path, file_get_from_compute_node_options=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that contains the file. - :type node_id: str - :param file_path: The path to the Compute Node file that you want to - get the content of. - :type file_path: str - :param file_get_from_compute_node_options: Additional parameters for - the operation - :type file_get_from_compute_node_options: - ~azure.batch.models.FileGetFromComputeNodeOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param callback: When specified, will be called with each chunk of - data that is streamed. The callback should take two arguments, the - bytes of the current chunk of data and the response object. If the - data is uploading, response will be None. - :type callback: Callable[Bytes, response=None] - :param operation_config: :ref:`Operation configuration - overrides`. - :return: object or ClientRawResponse if raw=true - :rtype: Generator or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if file_get_from_compute_node_options is not None: - timeout = file_get_from_compute_node_options.timeout - client_request_id = None - if file_get_from_compute_node_options is not None: - client_request_id = file_get_from_compute_node_options.client_request_id - return_client_request_id = None - if file_get_from_compute_node_options is not None: - return_client_request_id = file_get_from_compute_node_options.return_client_request_id - ocp_date = None - if file_get_from_compute_node_options is not None: - ocp_date = file_get_from_compute_node_options.ocp_date - ocp_range = None - if file_get_from_compute_node_options is not None: - ocp_range = file_get_from_compute_node_options.ocp_range - if_modified_since = None - if file_get_from_compute_node_options is not None: - if_modified_since = file_get_from_compute_node_options.if_modified_since - if_unmodified_since = None - if file_get_from_compute_node_options is not None: - if_unmodified_since = file_get_from_compute_node_options.if_unmodified_since - - # Construct URL - url = self.get_from_compute_node.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str'), - 'filePath': self._serialize.url("file_path", file_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if ocp_range is not None: - header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=True, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = self._client.stream_download(response, callback) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} - - def get_properties_from_compute_node( - self, pool_id, node_id, file_path, file_get_properties_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node that contains the file. - :type node_id: str - :param file_path: The path to the Compute Node file that you want to - get the properties of. - :type file_path: str - :param file_get_properties_from_compute_node_options: Additional - parameters for the operation - :type file_get_properties_from_compute_node_options: - ~azure.batch.models.FileGetPropertiesFromComputeNodeOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if file_get_properties_from_compute_node_options is not None: - timeout = file_get_properties_from_compute_node_options.timeout - client_request_id = None - if file_get_properties_from_compute_node_options is not None: - client_request_id = file_get_properties_from_compute_node_options.client_request_id - return_client_request_id = None - if file_get_properties_from_compute_node_options is not None: - return_client_request_id = file_get_properties_from_compute_node_options.return_client_request_id - ocp_date = None - if file_get_properties_from_compute_node_options is not None: - ocp_date = file_get_properties_from_compute_node_options.ocp_date - if_modified_since = None - if file_get_properties_from_compute_node_options is not None: - if_modified_since = file_get_properties_from_compute_node_options.if_modified_since - if_unmodified_since = None - if file_get_properties_from_compute_node_options is not None: - if_unmodified_since = file_get_properties_from_compute_node_options.if_unmodified_since - - # Construct URL - url = self.get_properties_from_compute_node.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str'), - 'filePath': self._serialize.url("file_path", file_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'ocp-creation-time': 'rfc-1123', - 'ocp-batch-file-isdirectory': 'bool', - 'ocp-batch-file-url': 'str', - 'ocp-batch-file-mode': 'str', - 'Content-Type': 'str', - 'Content-Length': 'long', - }) - return client_raw_response - get_properties_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} - - def list_from_task( - self, job_id, task_id, recursive=None, file_list_from_task_options=None, custom_headers=None, raw=False, **operation_config): - """Lists the files in a Task's directory on its Compute Node. - - :param job_id: The ID of the Job that contains the Task. - :type job_id: str - :param task_id: The ID of the Task whose files you want to list. - :type task_id: str - :param recursive: Whether to list children of the Task directory. This - parameter can be used in combination with the filter parameter to list - specific type of files. - :type recursive: bool - :param file_list_from_task_options: Additional parameters for the - operation - :type file_list_from_task_options: - ~azure.batch.models.FileListFromTaskOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of NodeFile - :rtype: - ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] - :raises: - :class:`BatchErrorException` - """ - filter = None - if file_list_from_task_options is not None: - filter = file_list_from_task_options.filter - max_results = None - if file_list_from_task_options is not None: - max_results = file_list_from_task_options.max_results - timeout = None - if file_list_from_task_options is not None: - timeout = file_list_from_task_options.timeout - client_request_id = None - if file_list_from_task_options is not None: - client_request_id = file_list_from_task_options.client_request_id - return_client_request_id = None - if file_list_from_task_options is not None: - return_client_request_id = file_list_from_task_options.return_client_request_id - ocp_date = None - if file_list_from_task_options is not None: - ocp_date = file_list_from_task_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_from_task.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files'} - - def list_from_compute_node( - self, pool_id, node_id, recursive=None, file_list_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the files in Task directories on the specified Compute - Node. - - :param pool_id: The ID of the Pool that contains the Compute Node. - :type pool_id: str - :param node_id: The ID of the Compute Node whose files you want to - list. - :type node_id: str - :param recursive: Whether to list children of a directory. - :type recursive: bool - :param file_list_from_compute_node_options: Additional parameters for - the operation - :type file_list_from_compute_node_options: - ~azure.batch.models.FileListFromComputeNodeOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of NodeFile - :rtype: - ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] - :raises: - :class:`BatchErrorException` - """ - filter = None - if file_list_from_compute_node_options is not None: - filter = file_list_from_compute_node_options.filter - max_results = None - if file_list_from_compute_node_options is not None: - max_results = file_list_from_compute_node_options.max_results - timeout = None - if file_list_from_compute_node_options is not None: - timeout = file_list_from_compute_node_options.timeout - client_request_id = None - if file_list_from_compute_node_options is not None: - client_request_id = file_list_from_compute_node_options.client_request_id - return_client_request_id = None - if file_list_from_compute_node_options is not None: - return_client_request_id = file_list_from_compute_node_options.return_client_request_id - ocp_date = None - if file_list_from_compute_node_options is not None: - ocp_date = file_list_from_compute_node_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_from_compute_node.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str'), - 'nodeId': self._serialize.url("node_id", node_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_job_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_job_operations.py deleted file mode 100644 index fa07dbcdf2ff..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_job_operations.py +++ /dev/null @@ -1,1351 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class JobOperations(object): - """JobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def delete( - self, job_id, job_delete_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes a Job. - - Deleting a Job also deletes all Tasks that are part of that Job, and - all Job statistics. This also overrides the retention period for Task - data; that is, if the Job contains Tasks which are still retained on - Compute Nodes, the Batch services deletes those Tasks' working - directories and all their contents. When a Delete Job request is - received, the Batch service sets the Job to the deleting state. All - update operations on a Job that is in deleting state will fail with - status code 409 (Conflict), with additional information indicating that - the Job is being deleted. - - :param job_id: The ID of the Job to delete. - :type job_id: str - :param job_delete_options: Additional parameters for the operation - :type job_delete_options: ~azure.batch.models.JobDeleteOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_delete_options is not None: - timeout = job_delete_options.timeout - client_request_id = None - if job_delete_options is not None: - client_request_id = job_delete_options.client_request_id - return_client_request_id = None - if job_delete_options is not None: - return_client_request_id = job_delete_options.return_client_request_id - ocp_date = None - if job_delete_options is not None: - ocp_date = job_delete_options.ocp_date - if_match = None - if job_delete_options is not None: - if_match = job_delete_options.if_match - if_none_match = None - if job_delete_options is not None: - if_none_match = job_delete_options.if_none_match - if_modified_since = None - if job_delete_options is not None: - if_modified_since = job_delete_options.if_modified_since - if_unmodified_since = None - if job_delete_options is not None: - if_unmodified_since = job_delete_options.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete.metadata = {'url': '/jobs/{jobId}'} - - def get( - self, job_id, job_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Job. - - :param job_id: The ID of the Job. - :type job_id: str - :param job_get_options: Additional parameters for the operation - :type job_get_options: ~azure.batch.models.JobGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: CloudJob or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.CloudJob or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if job_get_options is not None: - select = job_get_options.select - expand = None - if job_get_options is not None: - expand = job_get_options.expand - timeout = None - if job_get_options is not None: - timeout = job_get_options.timeout - client_request_id = None - if job_get_options is not None: - client_request_id = job_get_options.client_request_id - return_client_request_id = None - if job_get_options is not None: - return_client_request_id = job_get_options.return_client_request_id - ocp_date = None - if job_get_options is not None: - ocp_date = job_get_options.ocp_date - if_match = None - if job_get_options is not None: - if_match = job_get_options.if_match - if_none_match = None - if job_get_options is not None: - if_none_match = job_get_options.if_none_match - if_modified_since = None - if job_get_options is not None: - if_modified_since = job_get_options.if_modified_since - if_unmodified_since = None - if job_get_options is not None: - if_unmodified_since = job_get_options.if_unmodified_since - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('CloudJob', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/jobs/{jobId}'} - - def patch( - self, job_id, job_patch_parameter, job_patch_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Job. - - This replaces only the Job properties specified in the request. For - example, if the Job has constraints, and a request does not specify the - constraints element, then the Job keeps the existing constraints. - - :param job_id: The ID of the Job whose properties you want to update. - :type job_id: str - :param job_patch_parameter: The parameters for the request. - :type job_patch_parameter: ~azure.batch.models.JobPatchParameter - :param job_patch_options: Additional parameters for the operation - :type job_patch_options: ~azure.batch.models.JobPatchOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_patch_options is not None: - timeout = job_patch_options.timeout - client_request_id = None - if job_patch_options is not None: - client_request_id = job_patch_options.client_request_id - return_client_request_id = None - if job_patch_options is not None: - return_client_request_id = job_patch_options.return_client_request_id - ocp_date = None - if job_patch_options is not None: - ocp_date = job_patch_options.ocp_date - if_match = None - if job_patch_options is not None: - if_match = job_patch_options.if_match - if_none_match = None - if job_patch_options is not None: - if_none_match = job_patch_options.if_none_match - if_modified_since = None - if job_patch_options is not None: - if_modified_since = job_patch_options.if_modified_since - if_unmodified_since = None - if job_patch_options is not None: - if_unmodified_since = job_patch_options.if_unmodified_since - - # Construct URL - url = self.patch.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(job_patch_parameter, 'JobPatchParameter') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - patch.metadata = {'url': '/jobs/{jobId}'} - - def update( - self, job_id, job_update_parameter, job_update_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Job. - - This fully replaces all the updatable properties of the Job. For - example, if the Job has constraints associated with it and if - constraints is not specified with this request, then the Batch service - will remove the existing constraints. - - :param job_id: The ID of the Job whose properties you want to update. - :type job_id: str - :param job_update_parameter: The parameters for the request. - :type job_update_parameter: ~azure.batch.models.JobUpdateParameter - :param job_update_options: Additional parameters for the operation - :type job_update_options: ~azure.batch.models.JobUpdateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_update_options is not None: - timeout = job_update_options.timeout - client_request_id = None - if job_update_options is not None: - client_request_id = job_update_options.client_request_id - return_client_request_id = None - if job_update_options is not None: - return_client_request_id = job_update_options.return_client_request_id - ocp_date = None - if job_update_options is not None: - ocp_date = job_update_options.ocp_date - if_match = None - if job_update_options is not None: - if_match = job_update_options.if_match - if_none_match = None - if job_update_options is not None: - if_none_match = job_update_options.if_none_match - if_modified_since = None - if job_update_options is not None: - if_modified_since = job_update_options.if_modified_since - if_unmodified_since = None - if job_update_options is not None: - if_unmodified_since = job_update_options.if_unmodified_since - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(job_update_parameter, 'JobUpdateParameter') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - update.metadata = {'url': '/jobs/{jobId}'} - - def disable( - self, job_id, disable_tasks, job_disable_options=None, custom_headers=None, raw=False, **operation_config): - """Disables the specified Job, preventing new Tasks from running. - - The Batch Service immediately moves the Job to the disabling state. - Batch then uses the disableTasks parameter to determine what to do with - the currently running Tasks of the Job. The Job remains in the - disabling state until the disable operation is completed and all Tasks - have been dealt with according to the disableTasks option; the Job then - moves to the disabled state. No new Tasks are started under the Job - until it moves back to active state. If you try to disable a Job that - is in any state other than active, disabling, or disabled, the request - fails with status code 409. - - :param job_id: The ID of the Job to disable. - :type job_id: str - :param disable_tasks: Possible values include: 'requeue', 'terminate', - 'wait' - :type disable_tasks: str or ~azure.batch.models.DisableJobOption - :param job_disable_options: Additional parameters for the operation - :type job_disable_options: ~azure.batch.models.JobDisableOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_disable_options is not None: - timeout = job_disable_options.timeout - client_request_id = None - if job_disable_options is not None: - client_request_id = job_disable_options.client_request_id - return_client_request_id = None - if job_disable_options is not None: - return_client_request_id = job_disable_options.return_client_request_id - ocp_date = None - if job_disable_options is not None: - ocp_date = job_disable_options.ocp_date - if_match = None - if job_disable_options is not None: - if_match = job_disable_options.if_match - if_none_match = None - if job_disable_options is not None: - if_none_match = job_disable_options.if_none_match - if_modified_since = None - if job_disable_options is not None: - if_modified_since = job_disable_options.if_modified_since - if_unmodified_since = None - if job_disable_options is not None: - if_unmodified_since = job_disable_options.if_unmodified_since - job_disable_parameter = models.JobDisableParameter(disable_tasks=disable_tasks) - - # Construct URL - url = self.disable.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(job_disable_parameter, 'JobDisableParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - disable.metadata = {'url': '/jobs/{jobId}/disable'} - - def enable( - self, job_id, job_enable_options=None, custom_headers=None, raw=False, **operation_config): - """Enables the specified Job, allowing new Tasks to run. - - When you call this API, the Batch service sets a disabled Job to the - enabling state. After the this operation is completed, the Job moves to - the active state, and scheduling of new Tasks under the Job resumes. - The Batch service does not allow a Task to remain in the active state - for more than 180 days. Therefore, if you enable a Job containing - active Tasks which were added more than 180 days ago, those Tasks will - not run. - - :param job_id: The ID of the Job to enable. - :type job_id: str - :param job_enable_options: Additional parameters for the operation - :type job_enable_options: ~azure.batch.models.JobEnableOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_enable_options is not None: - timeout = job_enable_options.timeout - client_request_id = None - if job_enable_options is not None: - client_request_id = job_enable_options.client_request_id - return_client_request_id = None - if job_enable_options is not None: - return_client_request_id = job_enable_options.return_client_request_id - ocp_date = None - if job_enable_options is not None: - ocp_date = job_enable_options.ocp_date - if_match = None - if job_enable_options is not None: - if_match = job_enable_options.if_match - if_none_match = None - if job_enable_options is not None: - if_none_match = job_enable_options.if_none_match - if_modified_since = None - if job_enable_options is not None: - if_modified_since = job_enable_options.if_modified_since - if_unmodified_since = None - if job_enable_options is not None: - if_unmodified_since = job_enable_options.if_unmodified_since - - # Construct URL - url = self.enable.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - enable.metadata = {'url': '/jobs/{jobId}/enable'} - - def terminate( - self, job_id, terminate_reason=None, job_terminate_options=None, custom_headers=None, raw=False, **operation_config): - """Terminates the specified Job, marking it as completed. - - When a Terminate Job request is received, the Batch service sets the - Job to the terminating state. The Batch service then terminates any - running Tasks associated with the Job and runs any required Job release - Tasks. Then the Job moves into the completed state. If there are any - Tasks in the Job in the active state, they will remain in the active - state. Once a Job is terminated, new Tasks cannot be added and any - remaining active Tasks will not be scheduled. - - :param job_id: The ID of the Job to terminate. - :type job_id: str - :param terminate_reason: - :type terminate_reason: str - :param job_terminate_options: Additional parameters for the operation - :type job_terminate_options: ~azure.batch.models.JobTerminateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_terminate_options is not None: - timeout = job_terminate_options.timeout - client_request_id = None - if job_terminate_options is not None: - client_request_id = job_terminate_options.client_request_id - return_client_request_id = None - if job_terminate_options is not None: - return_client_request_id = job_terminate_options.return_client_request_id - ocp_date = None - if job_terminate_options is not None: - ocp_date = job_terminate_options.ocp_date - if_match = None - if job_terminate_options is not None: - if_match = job_terminate_options.if_match - if_none_match = None - if job_terminate_options is not None: - if_none_match = job_terminate_options.if_none_match - if_modified_since = None - if job_terminate_options is not None: - if_modified_since = job_terminate_options.if_modified_since - if_unmodified_since = None - if job_terminate_options is not None: - if_unmodified_since = job_terminate_options.if_unmodified_since - job_terminate_parameter = None - if terminate_reason is not None: - job_terminate_parameter = models.JobTerminateParameter(terminate_reason=terminate_reason) - - # Construct URL - url = self.terminate.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - if job_terminate_parameter is not None: - body_content = self._serialize.body(job_terminate_parameter, 'JobTerminateParameter') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - terminate.metadata = {'url': '/jobs/{jobId}/terminate'} - - def add( - self, job, job_add_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a Job to the specified Account. - - The Batch service supports two ways to control the work done as part of - a Job. In the first approach, the user specifies a Job Manager Task. - The Batch service launches this Task when it is ready to start the Job. - The Job Manager Task controls all other Tasks that run under this Job, - by using the Task APIs. In the second approach, the user directly - controls the execution of Tasks under an active Job, by using the Task - APIs. Also note: when naming Jobs, avoid including sensitive - information such as user names or secret project names. This - information may appear in telemetry logs accessible to Microsoft - Support engineers. - - :param job: The Job to be added. - :type job: ~azure.batch.models.JobAddParameter - :param job_add_options: Additional parameters for the operation - :type job_add_options: ~azure.batch.models.JobAddOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_add_options is not None: - timeout = job_add_options.timeout - client_request_id = None - if job_add_options is not None: - client_request_id = job_add_options.client_request_id - return_client_request_id = None - if job_add_options is not None: - return_client_request_id = job_add_options.return_client_request_id - ocp_date = None - if job_add_options is not None: - ocp_date = job_add_options.ocp_date - - # Construct URL - url = self.add.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(job, 'JobAddParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - add.metadata = {'url': '/jobs'} - - def list( - self, job_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the Jobs in the specified Account. - - :param job_list_options: Additional parameters for the operation - :type job_list_options: ~azure.batch.models.JobListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of CloudJob - :rtype: - ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] - :raises: - :class:`BatchErrorException` - """ - filter = None - if job_list_options is not None: - filter = job_list_options.filter - select = None - if job_list_options is not None: - select = job_list_options.select - expand = None - if job_list_options is not None: - expand = job_list_options.expand - max_results = None - if job_list_options is not None: - max_results = job_list_options.max_results - timeout = None - if job_list_options is not None: - timeout = job_list_options.timeout - client_request_id = None - if job_list_options is not None: - client_request_id = job_list_options.client_request_id - return_client_request_id = None - if job_list_options is not None: - return_client_request_id = job_list_options.return_client_request_id - ocp_date = None - if job_list_options is not None: - ocp_date = job_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/jobs'} - - def list_from_job_schedule( - self, job_schedule_id, job_list_from_job_schedule_options=None, custom_headers=None, raw=False, **operation_config): - """Lists the Jobs that have been created under the specified Job Schedule. - - :param job_schedule_id: The ID of the Job Schedule from which you want - to get a list of Jobs. - :type job_schedule_id: str - :param job_list_from_job_schedule_options: Additional parameters for - the operation - :type job_list_from_job_schedule_options: - ~azure.batch.models.JobListFromJobScheduleOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of CloudJob - :rtype: - ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] - :raises: - :class:`BatchErrorException` - """ - filter = None - if job_list_from_job_schedule_options is not None: - filter = job_list_from_job_schedule_options.filter - select = None - if job_list_from_job_schedule_options is not None: - select = job_list_from_job_schedule_options.select - expand = None - if job_list_from_job_schedule_options is not None: - expand = job_list_from_job_schedule_options.expand - max_results = None - if job_list_from_job_schedule_options is not None: - max_results = job_list_from_job_schedule_options.max_results - timeout = None - if job_list_from_job_schedule_options is not None: - timeout = job_list_from_job_schedule_options.timeout - client_request_id = None - if job_list_from_job_schedule_options is not None: - client_request_id = job_list_from_job_schedule_options.client_request_id - return_client_request_id = None - if job_list_from_job_schedule_options is not None: - return_client_request_id = job_list_from_job_schedule_options.return_client_request_id - ocp_date = None - if job_list_from_job_schedule_options is not None: - ocp_date = job_list_from_job_schedule_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_from_job_schedule.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_from_job_schedule.metadata = {'url': '/jobschedules/{jobScheduleId}/jobs'} - - def list_preparation_and_release_task_status( - self, job_id, job_list_preparation_and_release_task_status_options=None, custom_headers=None, raw=False, **operation_config): - """Lists the execution status of the Job Preparation and Job Release Task - for the specified Job across the Compute Nodes where the Job has run. - - This API returns the Job Preparation and Job Release Task status on all - Compute Nodes that have run the Job Preparation or Job Release Task. - This includes Compute Nodes which have since been removed from the - Pool. If this API is invoked on a Job which has no Job Preparation or - Job Release Task, the Batch service returns HTTP status code 409 - (Conflict) with an error code of JobPreparationTaskNotSpecified. - - :param job_id: The ID of the Job. - :type job_id: str - :param job_list_preparation_and_release_task_status_options: - Additional parameters for the operation - :type job_list_preparation_and_release_task_status_options: - ~azure.batch.models.JobListPreparationAndReleaseTaskStatusOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of - JobPreparationAndReleaseTaskExecutionInformation - :rtype: - ~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformationPaged[~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformation] - :raises: - :class:`BatchErrorException` - """ - filter = None - if job_list_preparation_and_release_task_status_options is not None: - filter = job_list_preparation_and_release_task_status_options.filter - select = None - if job_list_preparation_and_release_task_status_options is not None: - select = job_list_preparation_and_release_task_status_options.select - max_results = None - if job_list_preparation_and_release_task_status_options is not None: - max_results = job_list_preparation_and_release_task_status_options.max_results - timeout = None - if job_list_preparation_and_release_task_status_options is not None: - timeout = job_list_preparation_and_release_task_status_options.timeout - client_request_id = None - if job_list_preparation_and_release_task_status_options is not None: - client_request_id = job_list_preparation_and_release_task_status_options.client_request_id - return_client_request_id = None - if job_list_preparation_and_release_task_status_options is not None: - return_client_request_id = job_list_preparation_and_release_task_status_options.return_client_request_id - ocp_date = None - if job_list_preparation_and_release_task_status_options is not None: - ocp_date = job_list_preparation_and_release_task_status_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_preparation_and_release_task_status.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_preparation_and_release_task_status.metadata = {'url': '/jobs/{jobId}/jobpreparationandreleasetaskstatus'} - - def get_task_counts( - self, job_id, job_get_task_counts_options=None, custom_headers=None, raw=False, **operation_config): - """Gets the Task counts for the specified Job. - - Task counts provide a count of the Tasks by active, running or - completed Task state, and a count of Tasks which succeeded or failed. - Tasks in the preparing state are counted as running. Note that the - numbers returned may not always be up to date. If you need exact task - counts, use a list query. - - :param job_id: The ID of the Job. - :type job_id: str - :param job_get_task_counts_options: Additional parameters for the - operation - :type job_get_task_counts_options: - ~azure.batch.models.JobGetTaskCountsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: TaskCountsResult or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.TaskCountsResult or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_get_task_counts_options is not None: - timeout = job_get_task_counts_options.timeout - client_request_id = None - if job_get_task_counts_options is not None: - client_request_id = job_get_task_counts_options.client_request_id - return_client_request_id = None - if job_get_task_counts_options is not None: - return_client_request_id = job_get_task_counts_options.return_client_request_id - ocp_date = None - if job_get_task_counts_options is not None: - ocp_date = job_get_task_counts_options.ocp_date - - # Construct URL - url = self.get_task_counts.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('TaskCountsResult', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get_task_counts.metadata = {'url': '/jobs/{jobId}/taskcounts'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_job_schedule_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_job_schedule_operations.py deleted file mode 100644 index fd66edffabee..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_job_schedule_operations.py +++ /dev/null @@ -1,1096 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class JobScheduleOperations(object): - """JobScheduleOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def exists( - self, job_schedule_id, job_schedule_exists_options=None, custom_headers=None, raw=False, **operation_config): - """Checks the specified Job Schedule exists. - - :param job_schedule_id: The ID of the Job Schedule which you want to - check. - :type job_schedule_id: str - :param job_schedule_exists_options: Additional parameters for the - operation - :type job_schedule_exists_options: - ~azure.batch.models.JobScheduleExistsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: bool or ClientRawResponse if raw=true - :rtype: bool or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_exists_options is not None: - timeout = job_schedule_exists_options.timeout - client_request_id = None - if job_schedule_exists_options is not None: - client_request_id = job_schedule_exists_options.client_request_id - return_client_request_id = None - if job_schedule_exists_options is not None: - return_client_request_id = job_schedule_exists_options.return_client_request_id - ocp_date = None - if job_schedule_exists_options is not None: - ocp_date = job_schedule_exists_options.ocp_date - if_match = None - if job_schedule_exists_options is not None: - if_match = job_schedule_exists_options.if_match - if_none_match = None - if job_schedule_exists_options is not None: - if_none_match = job_schedule_exists_options.if_none_match - if_modified_since = None - if job_schedule_exists_options is not None: - if_modified_since = job_schedule_exists_options.if_modified_since - if_unmodified_since = None - if job_schedule_exists_options is not None: - if_unmodified_since = job_schedule_exists_options.if_unmodified_since - - # Construct URL - url = self.exists.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 404]: - raise models.BatchErrorException(self._deserialize, response) - - deserialized = (response.status_code == 200) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - }) - return client_raw_response - return deserialized - exists.metadata = {'url': '/jobschedules/{jobScheduleId}'} - - def delete( - self, job_schedule_id, job_schedule_delete_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes a Job Schedule from the specified Account. - - When you delete a Job Schedule, this also deletes all Jobs and Tasks - under that schedule. When Tasks are deleted, all the files in their - working directories on the Compute Nodes are also deleted (the - retention period is ignored). The Job Schedule statistics are no longer - accessible once the Job Schedule is deleted, though they are still - counted towards Account lifetime statistics. - - :param job_schedule_id: The ID of the Job Schedule to delete. - :type job_schedule_id: str - :param job_schedule_delete_options: Additional parameters for the - operation - :type job_schedule_delete_options: - ~azure.batch.models.JobScheduleDeleteOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_delete_options is not None: - timeout = job_schedule_delete_options.timeout - client_request_id = None - if job_schedule_delete_options is not None: - client_request_id = job_schedule_delete_options.client_request_id - return_client_request_id = None - if job_schedule_delete_options is not None: - return_client_request_id = job_schedule_delete_options.return_client_request_id - ocp_date = None - if job_schedule_delete_options is not None: - ocp_date = job_schedule_delete_options.ocp_date - if_match = None - if job_schedule_delete_options is not None: - if_match = job_schedule_delete_options.if_match - if_none_match = None - if job_schedule_delete_options is not None: - if_none_match = job_schedule_delete_options.if_none_match - if_modified_since = None - if job_schedule_delete_options is not None: - if_modified_since = job_schedule_delete_options.if_modified_since - if_unmodified_since = None - if job_schedule_delete_options is not None: - if_unmodified_since = job_schedule_delete_options.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete.metadata = {'url': '/jobschedules/{jobScheduleId}'} - - def get( - self, job_schedule_id, job_schedule_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Job Schedule. - - :param job_schedule_id: The ID of the Job Schedule to get. - :type job_schedule_id: str - :param job_schedule_get_options: Additional parameters for the - operation - :type job_schedule_get_options: - ~azure.batch.models.JobScheduleGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: CloudJobSchedule or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.CloudJobSchedule or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if job_schedule_get_options is not None: - select = job_schedule_get_options.select - expand = None - if job_schedule_get_options is not None: - expand = job_schedule_get_options.expand - timeout = None - if job_schedule_get_options is not None: - timeout = job_schedule_get_options.timeout - client_request_id = None - if job_schedule_get_options is not None: - client_request_id = job_schedule_get_options.client_request_id - return_client_request_id = None - if job_schedule_get_options is not None: - return_client_request_id = job_schedule_get_options.return_client_request_id - ocp_date = None - if job_schedule_get_options is not None: - ocp_date = job_schedule_get_options.ocp_date - if_match = None - if job_schedule_get_options is not None: - if_match = job_schedule_get_options.if_match - if_none_match = None - if job_schedule_get_options is not None: - if_none_match = job_schedule_get_options.if_none_match - if_modified_since = None - if job_schedule_get_options is not None: - if_modified_since = job_schedule_get_options.if_modified_since - if_unmodified_since = None - if job_schedule_get_options is not None: - if_unmodified_since = job_schedule_get_options.if_unmodified_since - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('CloudJobSchedule', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/jobschedules/{jobScheduleId}'} - - def patch( - self, job_schedule_id, job_schedule_patch_parameter, job_schedule_patch_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Job Schedule. - - This replaces only the Job Schedule properties specified in the - request. For example, if the schedule property is not specified with - this request, then the Batch service will keep the existing schedule. - Changes to a Job Schedule only impact Jobs created by the schedule - after the update has taken place; currently running Jobs are - unaffected. - - :param job_schedule_id: The ID of the Job Schedule to update. - :type job_schedule_id: str - :param job_schedule_patch_parameter: The parameters for the request. - :type job_schedule_patch_parameter: - ~azure.batch.models.JobSchedulePatchParameter - :param job_schedule_patch_options: Additional parameters for the - operation - :type job_schedule_patch_options: - ~azure.batch.models.JobSchedulePatchOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_patch_options is not None: - timeout = job_schedule_patch_options.timeout - client_request_id = None - if job_schedule_patch_options is not None: - client_request_id = job_schedule_patch_options.client_request_id - return_client_request_id = None - if job_schedule_patch_options is not None: - return_client_request_id = job_schedule_patch_options.return_client_request_id - ocp_date = None - if job_schedule_patch_options is not None: - ocp_date = job_schedule_patch_options.ocp_date - if_match = None - if job_schedule_patch_options is not None: - if_match = job_schedule_patch_options.if_match - if_none_match = None - if job_schedule_patch_options is not None: - if_none_match = job_schedule_patch_options.if_none_match - if_modified_since = None - if job_schedule_patch_options is not None: - if_modified_since = job_schedule_patch_options.if_modified_since - if_unmodified_since = None - if job_schedule_patch_options is not None: - if_unmodified_since = job_schedule_patch_options.if_unmodified_since - - # Construct URL - url = self.patch.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(job_schedule_patch_parameter, 'JobSchedulePatchParameter') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - patch.metadata = {'url': '/jobschedules/{jobScheduleId}'} - - def update( - self, job_schedule_id, job_schedule_update_parameter, job_schedule_update_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Job Schedule. - - This fully replaces all the updatable properties of the Job Schedule. - For example, if the schedule property is not specified with this - request, then the Batch service will remove the existing schedule. - Changes to a Job Schedule only impact Jobs created by the schedule - after the update has taken place; currently running Jobs are - unaffected. - - :param job_schedule_id: The ID of the Job Schedule to update. - :type job_schedule_id: str - :param job_schedule_update_parameter: The parameters for the request. - :type job_schedule_update_parameter: - ~azure.batch.models.JobScheduleUpdateParameter - :param job_schedule_update_options: Additional parameters for the - operation - :type job_schedule_update_options: - ~azure.batch.models.JobScheduleUpdateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_update_options is not None: - timeout = job_schedule_update_options.timeout - client_request_id = None - if job_schedule_update_options is not None: - client_request_id = job_schedule_update_options.client_request_id - return_client_request_id = None - if job_schedule_update_options is not None: - return_client_request_id = job_schedule_update_options.return_client_request_id - ocp_date = None - if job_schedule_update_options is not None: - ocp_date = job_schedule_update_options.ocp_date - if_match = None - if job_schedule_update_options is not None: - if_match = job_schedule_update_options.if_match - if_none_match = None - if job_schedule_update_options is not None: - if_none_match = job_schedule_update_options.if_none_match - if_modified_since = None - if job_schedule_update_options is not None: - if_modified_since = job_schedule_update_options.if_modified_since - if_unmodified_since = None - if job_schedule_update_options is not None: - if_unmodified_since = job_schedule_update_options.if_unmodified_since - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(job_schedule_update_parameter, 'JobScheduleUpdateParameter') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - update.metadata = {'url': '/jobschedules/{jobScheduleId}'} - - def disable( - self, job_schedule_id, job_schedule_disable_options=None, custom_headers=None, raw=False, **operation_config): - """Disables a Job Schedule. - - No new Jobs will be created until the Job Schedule is enabled again. - - :param job_schedule_id: The ID of the Job Schedule to disable. - :type job_schedule_id: str - :param job_schedule_disable_options: Additional parameters for the - operation - :type job_schedule_disable_options: - ~azure.batch.models.JobScheduleDisableOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_disable_options is not None: - timeout = job_schedule_disable_options.timeout - client_request_id = None - if job_schedule_disable_options is not None: - client_request_id = job_schedule_disable_options.client_request_id - return_client_request_id = None - if job_schedule_disable_options is not None: - return_client_request_id = job_schedule_disable_options.return_client_request_id - ocp_date = None - if job_schedule_disable_options is not None: - ocp_date = job_schedule_disable_options.ocp_date - if_match = None - if job_schedule_disable_options is not None: - if_match = job_schedule_disable_options.if_match - if_none_match = None - if job_schedule_disable_options is not None: - if_none_match = job_schedule_disable_options.if_none_match - if_modified_since = None - if job_schedule_disable_options is not None: - if_modified_since = job_schedule_disable_options.if_modified_since - if_unmodified_since = None - if job_schedule_disable_options is not None: - if_unmodified_since = job_schedule_disable_options.if_unmodified_since - - # Construct URL - url = self.disable.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [204]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - disable.metadata = {'url': '/jobschedules/{jobScheduleId}/disable'} - - def enable( - self, job_schedule_id, job_schedule_enable_options=None, custom_headers=None, raw=False, **operation_config): - """Enables a Job Schedule. - - :param job_schedule_id: The ID of the Job Schedule to enable. - :type job_schedule_id: str - :param job_schedule_enable_options: Additional parameters for the - operation - :type job_schedule_enable_options: - ~azure.batch.models.JobScheduleEnableOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_enable_options is not None: - timeout = job_schedule_enable_options.timeout - client_request_id = None - if job_schedule_enable_options is not None: - client_request_id = job_schedule_enable_options.client_request_id - return_client_request_id = None - if job_schedule_enable_options is not None: - return_client_request_id = job_schedule_enable_options.return_client_request_id - ocp_date = None - if job_schedule_enable_options is not None: - ocp_date = job_schedule_enable_options.ocp_date - if_match = None - if job_schedule_enable_options is not None: - if_match = job_schedule_enable_options.if_match - if_none_match = None - if job_schedule_enable_options is not None: - if_none_match = job_schedule_enable_options.if_none_match - if_modified_since = None - if job_schedule_enable_options is not None: - if_modified_since = job_schedule_enable_options.if_modified_since - if_unmodified_since = None - if job_schedule_enable_options is not None: - if_unmodified_since = job_schedule_enable_options.if_unmodified_since - - # Construct URL - url = self.enable.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [204]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - enable.metadata = {'url': '/jobschedules/{jobScheduleId}/enable'} - - def terminate( - self, job_schedule_id, job_schedule_terminate_options=None, custom_headers=None, raw=False, **operation_config): - """Terminates a Job Schedule. - - :param job_schedule_id: The ID of the Job Schedule to terminates. - :type job_schedule_id: str - :param job_schedule_terminate_options: Additional parameters for the - operation - :type job_schedule_terminate_options: - ~azure.batch.models.JobScheduleTerminateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_terminate_options is not None: - timeout = job_schedule_terminate_options.timeout - client_request_id = None - if job_schedule_terminate_options is not None: - client_request_id = job_schedule_terminate_options.client_request_id - return_client_request_id = None - if job_schedule_terminate_options is not None: - return_client_request_id = job_schedule_terminate_options.return_client_request_id - ocp_date = None - if job_schedule_terminate_options is not None: - ocp_date = job_schedule_terminate_options.ocp_date - if_match = None - if job_schedule_terminate_options is not None: - if_match = job_schedule_terminate_options.if_match - if_none_match = None - if job_schedule_terminate_options is not None: - if_none_match = job_schedule_terminate_options.if_none_match - if_modified_since = None - if job_schedule_terminate_options is not None: - if_modified_since = job_schedule_terminate_options.if_modified_since - if_unmodified_since = None - if job_schedule_terminate_options is not None: - if_unmodified_since = job_schedule_terminate_options.if_unmodified_since - - # Construct URL - url = self.terminate.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - terminate.metadata = {'url': '/jobschedules/{jobScheduleId}/terminate'} - - def add( - self, cloud_job_schedule, job_schedule_add_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a Job Schedule to the specified Account. - - :param cloud_job_schedule: The Job Schedule to be added. - :type cloud_job_schedule: ~azure.batch.models.JobScheduleAddParameter - :param job_schedule_add_options: Additional parameters for the - operation - :type job_schedule_add_options: - ~azure.batch.models.JobScheduleAddOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if job_schedule_add_options is not None: - timeout = job_schedule_add_options.timeout - client_request_id = None - if job_schedule_add_options is not None: - client_request_id = job_schedule_add_options.client_request_id - return_client_request_id = None - if job_schedule_add_options is not None: - return_client_request_id = job_schedule_add_options.return_client_request_id - ocp_date = None - if job_schedule_add_options is not None: - ocp_date = job_schedule_add_options.ocp_date - - # Construct URL - url = self.add.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(cloud_job_schedule, 'JobScheduleAddParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - add.metadata = {'url': '/jobschedules'} - - def list( - self, job_schedule_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the Job Schedules in the specified Account. - - :param job_schedule_list_options: Additional parameters for the - operation - :type job_schedule_list_options: - ~azure.batch.models.JobScheduleListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of CloudJobSchedule - :rtype: - ~azure.batch.models.CloudJobSchedulePaged[~azure.batch.models.CloudJobSchedule] - :raises: - :class:`BatchErrorException` - """ - filter = None - if job_schedule_list_options is not None: - filter = job_schedule_list_options.filter - select = None - if job_schedule_list_options is not None: - select = job_schedule_list_options.select - expand = None - if job_schedule_list_options is not None: - expand = job_schedule_list_options.expand - max_results = None - if job_schedule_list_options is not None: - max_results = job_schedule_list_options.max_results - timeout = None - if job_schedule_list_options is not None: - timeout = job_schedule_list_options.timeout - client_request_id = None - if job_schedule_list_options is not None: - client_request_id = job_schedule_list_options.client_request_id - return_client_request_id = None - if job_schedule_list_options is not None: - return_client_request_id = job_schedule_list_options.return_client_request_id - ocp_date = None - if job_schedule_list_options is not None: - ocp_date = job_schedule_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/jobschedules'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_pool_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_pool_operations.py deleted file mode 100644 index 31942e803c5b..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_pool_operations.py +++ /dev/null @@ -1,1540 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class PoolOperations(object): - """PoolOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def list_usage_metrics( - self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config): - """Lists the usage metrics, aggregated by Pool across individual time - intervals, for the specified Account. - - If you do not specify a $filter clause including a poolId, the response - includes all Pools that existed in the Account in the time range of the - returned aggregation intervals. If you do not specify a $filter clause - including a startTime or endTime these filters default to the start and - end times of the last aggregation interval currently available; that - is, only the last aggregation interval is returned. - - :param pool_list_usage_metrics_options: Additional parameters for the - operation - :type pool_list_usage_metrics_options: - ~azure.batch.models.PoolListUsageMetricsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of PoolUsageMetrics - :rtype: - ~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics] - :raises: - :class:`BatchErrorException` - """ - start_time = None - if pool_list_usage_metrics_options is not None: - start_time = pool_list_usage_metrics_options.start_time - end_time = None - if pool_list_usage_metrics_options is not None: - end_time = pool_list_usage_metrics_options.end_time - filter = None - if pool_list_usage_metrics_options is not None: - filter = pool_list_usage_metrics_options.filter - max_results = None - if pool_list_usage_metrics_options is not None: - max_results = pool_list_usage_metrics_options.max_results - timeout = None - if pool_list_usage_metrics_options is not None: - timeout = pool_list_usage_metrics_options.timeout - client_request_id = None - if pool_list_usage_metrics_options is not None: - client_request_id = pool_list_usage_metrics_options.client_request_id - return_client_request_id = None - if pool_list_usage_metrics_options is not None: - return_client_request_id = pool_list_usage_metrics_options.return_client_request_id - ocp_date = None - if pool_list_usage_metrics_options is not None: - ocp_date = pool_list_usage_metrics_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_usage_metrics.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if start_time is not None: - query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601') - if end_time is not None: - query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_usage_metrics.metadata = {'url': '/poolusagemetrics'} - - def add( - self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a Pool to the specified Account. - - When naming Pools, avoid including sensitive information such as user - names or secret project names. This information may appear in telemetry - logs accessible to Microsoft Support engineers. - - :param pool: The Pool to be added. - :type pool: ~azure.batch.models.PoolAddParameter - :param pool_add_options: Additional parameters for the operation - :type pool_add_options: ~azure.batch.models.PoolAddOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_add_options is not None: - timeout = pool_add_options.timeout - client_request_id = None - if pool_add_options is not None: - client_request_id = pool_add_options.client_request_id - return_client_request_id = None - if pool_add_options is not None: - return_client_request_id = pool_add_options.return_client_request_id - ocp_date = None - if pool_add_options is not None: - ocp_date = pool_add_options.ocp_date - - # Construct URL - url = self.add.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool, 'PoolAddParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - add.metadata = {'url': '/pools'} - - def list( - self, pool_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the Pools in the specified Account. - - :param pool_list_options: Additional parameters for the operation - :type pool_list_options: ~azure.batch.models.PoolListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of CloudPool - :rtype: - ~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool] - :raises: - :class:`BatchErrorException` - """ - filter = None - if pool_list_options is not None: - filter = pool_list_options.filter - select = None - if pool_list_options is not None: - select = pool_list_options.select - expand = None - if pool_list_options is not None: - expand = pool_list_options.expand - max_results = None - if pool_list_options is not None: - max_results = pool_list_options.max_results - timeout = None - if pool_list_options is not None: - timeout = pool_list_options.timeout - client_request_id = None - if pool_list_options is not None: - client_request_id = pool_list_options.client_request_id - return_client_request_id = None - if pool_list_options is not None: - return_client_request_id = pool_list_options.return_client_request_id - ocp_date = None - if pool_list_options is not None: - ocp_date = pool_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/pools'} - - def delete( - self, pool_id, pool_delete_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes a Pool from the specified Account. - - When you request that a Pool be deleted, the following actions occur: - the Pool state is set to deleting; any ongoing resize operation on the - Pool are stopped; the Batch service starts resizing the Pool to zero - Compute Nodes; any Tasks running on existing Compute Nodes are - terminated and requeued (as if a resize Pool operation had been - requested with the default requeue option); finally, the Pool is - removed from the system. Because running Tasks are requeued, the user - can rerun these Tasks by updating their Job to target a different Pool. - The Tasks can then run on the new Pool. If you want to override the - requeue behavior, then you should call resize Pool explicitly to shrink - the Pool to zero size before deleting the Pool. If you call an Update, - Patch or Delete API on a Pool in the deleting state, it will fail with - HTTP status code 409 with error code PoolBeingDeleted. - - :param pool_id: The ID of the Pool to delete. - :type pool_id: str - :param pool_delete_options: Additional parameters for the operation - :type pool_delete_options: ~azure.batch.models.PoolDeleteOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_delete_options is not None: - timeout = pool_delete_options.timeout - client_request_id = None - if pool_delete_options is not None: - client_request_id = pool_delete_options.client_request_id - return_client_request_id = None - if pool_delete_options is not None: - return_client_request_id = pool_delete_options.return_client_request_id - ocp_date = None - if pool_delete_options is not None: - ocp_date = pool_delete_options.ocp_date - if_match = None - if pool_delete_options is not None: - if_match = pool_delete_options.if_match - if_none_match = None - if pool_delete_options is not None: - if_none_match = pool_delete_options.if_none_match - if_modified_since = None - if pool_delete_options is not None: - if_modified_since = pool_delete_options.if_modified_since - if_unmodified_since = None - if pool_delete_options is not None: - if_unmodified_since = pool_delete_options.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete.metadata = {'url': '/pools/{poolId}'} - - def exists( - self, pool_id, pool_exists_options=None, custom_headers=None, raw=False, **operation_config): - """Gets basic properties of a Pool. - - :param pool_id: The ID of the Pool to get. - :type pool_id: str - :param pool_exists_options: Additional parameters for the operation - :type pool_exists_options: ~azure.batch.models.PoolExistsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: bool or ClientRawResponse if raw=true - :rtype: bool or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_exists_options is not None: - timeout = pool_exists_options.timeout - client_request_id = None - if pool_exists_options is not None: - client_request_id = pool_exists_options.client_request_id - return_client_request_id = None - if pool_exists_options is not None: - return_client_request_id = pool_exists_options.return_client_request_id - ocp_date = None - if pool_exists_options is not None: - ocp_date = pool_exists_options.ocp_date - if_match = None - if pool_exists_options is not None: - if_match = pool_exists_options.if_match - if_none_match = None - if pool_exists_options is not None: - if_none_match = pool_exists_options.if_none_match - if_modified_since = None - if pool_exists_options is not None: - if_modified_since = pool_exists_options.if_modified_since - if_unmodified_since = None - if pool_exists_options is not None: - if_unmodified_since = pool_exists_options.if_unmodified_since - - # Construct URL - url = self.exists.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 404]: - raise models.BatchErrorException(self._deserialize, response) - - deserialized = (response.status_code == 200) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - }) - return client_raw_response - return deserialized - exists.metadata = {'url': '/pools/{poolId}'} - - def get( - self, pool_id, pool_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Pool. - - :param pool_id: The ID of the Pool to get. - :type pool_id: str - :param pool_get_options: Additional parameters for the operation - :type pool_get_options: ~azure.batch.models.PoolGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: CloudPool or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.CloudPool or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if pool_get_options is not None: - select = pool_get_options.select - expand = None - if pool_get_options is not None: - expand = pool_get_options.expand - timeout = None - if pool_get_options is not None: - timeout = pool_get_options.timeout - client_request_id = None - if pool_get_options is not None: - client_request_id = pool_get_options.client_request_id - return_client_request_id = None - if pool_get_options is not None: - return_client_request_id = pool_get_options.return_client_request_id - ocp_date = None - if pool_get_options is not None: - ocp_date = pool_get_options.ocp_date - if_match = None - if pool_get_options is not None: - if_match = pool_get_options.if_match - if_none_match = None - if pool_get_options is not None: - if_none_match = pool_get_options.if_none_match - if_modified_since = None - if pool_get_options is not None: - if_modified_since = pool_get_options.if_modified_since - if_unmodified_since = None - if pool_get_options is not None: - if_unmodified_since = pool_get_options.if_unmodified_since - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('CloudPool', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/pools/{poolId}'} - - def patch( - self, pool_id, pool_patch_parameter, pool_patch_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Pool. - - This only replaces the Pool properties specified in the request. For - example, if the Pool has a StartTask associated with it, and a request - does not specify a StartTask element, then the Pool keeps the existing - StartTask. - - :param pool_id: The ID of the Pool to update. - :type pool_id: str - :param pool_patch_parameter: The parameters for the request. - :type pool_patch_parameter: ~azure.batch.models.PoolPatchParameter - :param pool_patch_options: Additional parameters for the operation - :type pool_patch_options: ~azure.batch.models.PoolPatchOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_patch_options is not None: - timeout = pool_patch_options.timeout - client_request_id = None - if pool_patch_options is not None: - client_request_id = pool_patch_options.client_request_id - return_client_request_id = None - if pool_patch_options is not None: - return_client_request_id = pool_patch_options.return_client_request_id - ocp_date = None - if pool_patch_options is not None: - ocp_date = pool_patch_options.ocp_date - if_match = None - if pool_patch_options is not None: - if_match = pool_patch_options.if_match - if_none_match = None - if pool_patch_options is not None: - if_none_match = pool_patch_options.if_none_match - if_modified_since = None - if pool_patch_options is not None: - if_modified_since = pool_patch_options.if_modified_since - if_unmodified_since = None - if pool_patch_options is not None: - if_unmodified_since = pool_patch_options.if_unmodified_since - - # Construct URL - url = self.patch.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool_patch_parameter, 'PoolPatchParameter') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - patch.metadata = {'url': '/pools/{poolId}'} - - def disable_auto_scale( - self, pool_id, pool_disable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): - """Disables automatic scaling for a Pool. - - :param pool_id: The ID of the Pool on which to disable automatic - scaling. - :type pool_id: str - :param pool_disable_auto_scale_options: Additional parameters for the - operation - :type pool_disable_auto_scale_options: - ~azure.batch.models.PoolDisableAutoScaleOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_disable_auto_scale_options is not None: - timeout = pool_disable_auto_scale_options.timeout - client_request_id = None - if pool_disable_auto_scale_options is not None: - client_request_id = pool_disable_auto_scale_options.client_request_id - return_client_request_id = None - if pool_disable_auto_scale_options is not None: - return_client_request_id = pool_disable_auto_scale_options.return_client_request_id - ocp_date = None - if pool_disable_auto_scale_options is not None: - ocp_date = pool_disable_auto_scale_options.ocp_date - - # Construct URL - url = self.disable_auto_scale.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - disable_auto_scale.metadata = {'url': '/pools/{poolId}/disableautoscale'} - - def enable_auto_scale( - self, pool_id, auto_scale_formula=None, auto_scale_evaluation_interval=None, pool_enable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): - """Enables automatic scaling for a Pool. - - You cannot enable automatic scaling on a Pool if a resize operation is - in progress on the Pool. If automatic scaling of the Pool is currently - disabled, you must specify a valid autoscale formula as part of the - request. If automatic scaling of the Pool is already enabled, you may - specify a new autoscale formula and/or a new evaluation interval. You - cannot call this API for the same Pool more than once every 30 seconds. - - :param pool_id: The ID of the Pool on which to enable automatic - scaling. - :type pool_id: str - :param auto_scale_formula: The formula is checked for validity before - it is applied to the Pool. If the formula is not valid, the Batch - service rejects the request with detailed error information. For more - information about specifying this formula, see Automatically scale - Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - :type auto_scale_formula: str - :param auto_scale_evaluation_interval: The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). If you specify a new interval, - then the existing autoscale evaluation schedule will be stopped and a - new autoscale evaluation schedule will be started, with its starting - time being the time when this request was issued. - :type auto_scale_evaluation_interval: timedelta - :param pool_enable_auto_scale_options: Additional parameters for the - operation - :type pool_enable_auto_scale_options: - ~azure.batch.models.PoolEnableAutoScaleOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_enable_auto_scale_options is not None: - timeout = pool_enable_auto_scale_options.timeout - client_request_id = None - if pool_enable_auto_scale_options is not None: - client_request_id = pool_enable_auto_scale_options.client_request_id - return_client_request_id = None - if pool_enable_auto_scale_options is not None: - return_client_request_id = pool_enable_auto_scale_options.return_client_request_id - ocp_date = None - if pool_enable_auto_scale_options is not None: - ocp_date = pool_enable_auto_scale_options.ocp_date - if_match = None - if pool_enable_auto_scale_options is not None: - if_match = pool_enable_auto_scale_options.if_match - if_none_match = None - if pool_enable_auto_scale_options is not None: - if_none_match = pool_enable_auto_scale_options.if_none_match - if_modified_since = None - if pool_enable_auto_scale_options is not None: - if_modified_since = pool_enable_auto_scale_options.if_modified_since - if_unmodified_since = None - if pool_enable_auto_scale_options is not None: - if_unmodified_since = pool_enable_auto_scale_options.if_unmodified_since - pool_enable_auto_scale_parameter = models.PoolEnableAutoScaleParameter(auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=auto_scale_evaluation_interval) - - # Construct URL - url = self.enable_auto_scale.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool_enable_auto_scale_parameter, 'PoolEnableAutoScaleParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - enable_auto_scale.metadata = {'url': '/pools/{poolId}/enableautoscale'} - - def evaluate_auto_scale( - self, pool_id, auto_scale_formula, pool_evaluate_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): - """Gets the result of evaluating an automatic scaling formula on the Pool. - - This API is primarily for validating an autoscale formula, as it simply - returns the result without applying the formula to the Pool. The Pool - must have auto scaling enabled in order to evaluate a formula. - - :param pool_id: The ID of the Pool on which to evaluate the automatic - scaling formula. - :type pool_id: str - :param auto_scale_formula: The formula is validated and its results - calculated, but it is not applied to the Pool. To apply the formula to - the Pool, 'Enable automatic scaling on a Pool'. For more information - about specifying this formula, see Automatically scale Compute Nodes - in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - :type auto_scale_formula: str - :param pool_evaluate_auto_scale_options: Additional parameters for the - operation - :type pool_evaluate_auto_scale_options: - ~azure.batch.models.PoolEvaluateAutoScaleOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: AutoScaleRun or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.AutoScaleRun or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_evaluate_auto_scale_options is not None: - timeout = pool_evaluate_auto_scale_options.timeout - client_request_id = None - if pool_evaluate_auto_scale_options is not None: - client_request_id = pool_evaluate_auto_scale_options.client_request_id - return_client_request_id = None - if pool_evaluate_auto_scale_options is not None: - return_client_request_id = pool_evaluate_auto_scale_options.return_client_request_id - ocp_date = None - if pool_evaluate_auto_scale_options is not None: - ocp_date = pool_evaluate_auto_scale_options.ocp_date - pool_evaluate_auto_scale_parameter = models.PoolEvaluateAutoScaleParameter(auto_scale_formula=auto_scale_formula) - - # Construct URL - url = self.evaluate_auto_scale.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool_evaluate_auto_scale_parameter, 'PoolEvaluateAutoScaleParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('AutoScaleRun', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - evaluate_auto_scale.metadata = {'url': '/pools/{poolId}/evaluateautoscale'} - - def resize( - self, pool_id, pool_resize_parameter, pool_resize_options=None, custom_headers=None, raw=False, **operation_config): - """Changes the number of Compute Nodes that are assigned to a Pool. - - You can only resize a Pool when its allocation state is steady. If the - Pool is already resizing, the request fails with status code 409. When - you resize a Pool, the Pool's allocation state changes from steady to - resizing. You cannot resize Pools which are configured for automatic - scaling. If you try to do this, the Batch service returns an error 409. - If you resize a Pool downwards, the Batch service chooses which Compute - Nodes to remove. To remove specific Compute Nodes, use the Pool remove - Compute Nodes API instead. - - :param pool_id: The ID of the Pool to resize. - :type pool_id: str - :param pool_resize_parameter: The parameters for the request. - :type pool_resize_parameter: ~azure.batch.models.PoolResizeParameter - :param pool_resize_options: Additional parameters for the operation - :type pool_resize_options: ~azure.batch.models.PoolResizeOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_resize_options is not None: - timeout = pool_resize_options.timeout - client_request_id = None - if pool_resize_options is not None: - client_request_id = pool_resize_options.client_request_id - return_client_request_id = None - if pool_resize_options is not None: - return_client_request_id = pool_resize_options.return_client_request_id - ocp_date = None - if pool_resize_options is not None: - ocp_date = pool_resize_options.ocp_date - if_match = None - if pool_resize_options is not None: - if_match = pool_resize_options.if_match - if_none_match = None - if pool_resize_options is not None: - if_none_match = pool_resize_options.if_none_match - if_modified_since = None - if pool_resize_options is not None: - if_modified_since = pool_resize_options.if_modified_since - if_unmodified_since = None - if pool_resize_options is not None: - if_unmodified_since = pool_resize_options.if_unmodified_since - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool_resize_parameter, 'PoolResizeParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - resize.metadata = {'url': '/pools/{poolId}/resize'} - - def stop_resize( - self, pool_id, pool_stop_resize_options=None, custom_headers=None, raw=False, **operation_config): - """Stops an ongoing resize operation on the Pool. - - This does not restore the Pool to its previous state before the resize - operation: it only stops any further changes being made, and the Pool - maintains its current state. After stopping, the Pool stabilizes at the - number of Compute Nodes it was at when the stop operation was done. - During the stop operation, the Pool allocation state changes first to - stopping and then to steady. A resize operation need not be an explicit - resize Pool request; this API can also be used to halt the initial - sizing of the Pool when it is created. - - :param pool_id: The ID of the Pool whose resizing you want to stop. - :type pool_id: str - :param pool_stop_resize_options: Additional parameters for the - operation - :type pool_stop_resize_options: - ~azure.batch.models.PoolStopResizeOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_stop_resize_options is not None: - timeout = pool_stop_resize_options.timeout - client_request_id = None - if pool_stop_resize_options is not None: - client_request_id = pool_stop_resize_options.client_request_id - return_client_request_id = None - if pool_stop_resize_options is not None: - return_client_request_id = pool_stop_resize_options.return_client_request_id - ocp_date = None - if pool_stop_resize_options is not None: - ocp_date = pool_stop_resize_options.ocp_date - if_match = None - if pool_stop_resize_options is not None: - if_match = pool_stop_resize_options.if_match - if_none_match = None - if pool_stop_resize_options is not None: - if_none_match = pool_stop_resize_options.if_none_match - if_modified_since = None - if pool_stop_resize_options is not None: - if_modified_since = pool_stop_resize_options.if_modified_since - if_unmodified_since = None - if pool_stop_resize_options is not None: - if_unmodified_since = pool_stop_resize_options.if_unmodified_since - - # Construct URL - url = self.stop_resize.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - stop_resize.metadata = {'url': '/pools/{poolId}/stopresize'} - - def update_properties( - self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Pool. - - This fully replaces all the updatable properties of the Pool. For - example, if the Pool has a StartTask associated with it and if - StartTask is not specified with this request, then the Batch service - will remove the existing StartTask. - - :param pool_id: The ID of the Pool to update. - :type pool_id: str - :param pool_update_properties_parameter: The parameters for the - request. - :type pool_update_properties_parameter: - ~azure.batch.models.PoolUpdatePropertiesParameter - :param pool_update_properties_options: Additional parameters for the - operation - :type pool_update_properties_options: - ~azure.batch.models.PoolUpdatePropertiesOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_update_properties_options is not None: - timeout = pool_update_properties_options.timeout - client_request_id = None - if pool_update_properties_options is not None: - client_request_id = pool_update_properties_options.client_request_id - return_client_request_id = None - if pool_update_properties_options is not None: - return_client_request_id = pool_update_properties_options.return_client_request_id - ocp_date = None - if pool_update_properties_options is not None: - ocp_date = pool_update_properties_options.ocp_date - - # Construct URL - url = self.update_properties.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool_update_properties_parameter, 'PoolUpdatePropertiesParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [204]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'} - - def remove_nodes( - self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config): - """Removes Compute Nodes from the specified Pool. - - This operation can only run when the allocation state of the Pool is - steady. When this operation runs, the allocation state changes from - steady to resizing. Each request may remove up to 100 nodes. - - :param pool_id: The ID of the Pool from which you want to remove - Compute Nodes. - :type pool_id: str - :param node_remove_parameter: The parameters for the request. - :type node_remove_parameter: ~azure.batch.models.NodeRemoveParameter - :param pool_remove_nodes_options: Additional parameters for the - operation - :type pool_remove_nodes_options: - ~azure.batch.models.PoolRemoveNodesOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_remove_nodes_options is not None: - timeout = pool_remove_nodes_options.timeout - client_request_id = None - if pool_remove_nodes_options is not None: - client_request_id = pool_remove_nodes_options.client_request_id - return_client_request_id = None - if pool_remove_nodes_options is not None: - return_client_request_id = pool_remove_nodes_options.return_client_request_id - ocp_date = None - if pool_remove_nodes_options is not None: - ocp_date = pool_remove_nodes_options.ocp_date - if_match = None - if pool_remove_nodes_options is not None: - if_match = pool_remove_nodes_options.if_match - if_none_match = None - if pool_remove_nodes_options is not None: - if_none_match = pool_remove_nodes_options.if_none_match - if_modified_since = None - if pool_remove_nodes_options is not None: - if_modified_since = pool_remove_nodes_options.if_modified_since - if_unmodified_since = None - if pool_remove_nodes_options is not None: - if_unmodified_since = pool_remove_nodes_options.if_unmodified_since - - # Construct URL - url = self.remove_nodes.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(node_remove_parameter, 'NodeRemoveParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - remove_nodes.metadata = {'url': '/pools/{poolId}/removenodes'} diff --git a/sdk/batch/azure-batch/azure/batch/operations/_task_operations.py b/sdk/batch/azure-batch/azure/batch/operations/_task_operations.py deleted file mode 100644 index 9a3f731e3ca4..000000000000 --- a/sdk/batch/azure-batch/azure/batch/operations/_task_operations.py +++ /dev/null @@ -1,1027 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class TaskOperations(object): - """TaskOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The API version to use for the request. Constant value: "2024-02-01.19.0". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2024-02-01.19.0" - - self.config = config - - def add( - self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a Task to the specified Job. - - The maximum lifetime of a Task from addition to completion is 180 days. - If a Task has not completed within 180 days of being added it will be - terminated by the Batch service and left in whatever state it was in at - that time. - - :param job_id: The ID of the Job to which the Task is to be added. - :type job_id: str - :param task: The Task to be added. - :type task: ~azure.batch.models.TaskAddParameter - :param task_add_options: Additional parameters for the operation - :type task_add_options: ~azure.batch.models.TaskAddOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if task_add_options is not None: - timeout = task_add_options.timeout - client_request_id = None - if task_add_options is not None: - client_request_id = task_add_options.client_request_id - return_client_request_id = None - if task_add_options is not None: - return_client_request_id = task_add_options.return_client_request_id - ocp_date = None - if task_add_options is not None: - ocp_date = task_add_options.ocp_date - - # Construct URL - url = self.add.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(task, 'TaskAddParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - add.metadata = {'url': '/jobs/{jobId}/tasks'} - - def list( - self, job_id, task_list_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the Tasks that are associated with the specified Job. - - For multi-instance Tasks, information such as affinityId, executionInfo - and nodeInfo refer to the primary Task. Use the list subtasks API to - retrieve information about subtasks. - - :param job_id: The ID of the Job. - :type job_id: str - :param task_list_options: Additional parameters for the operation - :type task_list_options: ~azure.batch.models.TaskListOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of CloudTask - :rtype: - ~azure.batch.models.CloudTaskPaged[~azure.batch.models.CloudTask] - :raises: - :class:`BatchErrorException` - """ - filter = None - if task_list_options is not None: - filter = task_list_options.filter - select = None - if task_list_options is not None: - select = task_list_options.select - expand = None - if task_list_options is not None: - expand = task_list_options.expand - max_results = None - if task_list_options is not None: - max_results = task_list_options.max_results - timeout = None - if task_list_options is not None: - timeout = task_list_options.timeout - client_request_id = None - if task_list_options is not None: - client_request_id = task_list_options.client_request_id - return_client_request_id = None - if task_list_options is not None: - return_client_request_id = task_list_options.return_client_request_id - ocp_date = None - if task_list_options is not None: - ocp_date = task_list_options.ocp_date - - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if max_results is not None: - query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/jobs/{jobId}/tasks'} - - def add_collection( - self, job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config): - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not - return the results for each Task in the same order the Tasks were - submitted in this request. If the server times out or the connection is - closed during the request, the request may have been partially or fully - processed, or not at all. In such cases, the user should re-issue the - request. Note that it is up to the user to correctly handle failures - when re-issuing a request. For example, you should use the same Task - IDs during a retry so that if the prior operation succeeded, the retry - will not create extra Tasks unexpectedly. If the response contains any - Tasks which failed to add, a client can retry the request. In a retry, - it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The - maximum lifetime of a Task from addition to completion is 180 days. If - a Task has not completed within 180 days of being added it will be - terminated by the Batch service and left in whatever state it was in at - that time. - - :param job_id: The ID of the Job to which the Task collection is to be - added. - :type job_id: str - :param value: The total serialized size of this collection must be - less than 1MB. If it is greater than 1MB (for example if each Task has - 100's of resource files or environment variables), the request will - fail with code 'RequestBodyTooLarge' and should be retried again with - fewer Tasks. - :type value: list[~azure.batch.models.TaskAddParameter] - :param task_add_collection_options: Additional parameters for the - operation - :type task_add_collection_options: - ~azure.batch.models.TaskAddCollectionOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: TaskAddCollectionResult or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.TaskAddCollectionResult or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if task_add_collection_options is not None: - timeout = task_add_collection_options.timeout - client_request_id = None - if task_add_collection_options is not None: - client_request_id = task_add_collection_options.client_request_id - return_client_request_id = None - if task_add_collection_options is not None: - return_client_request_id = task_add_collection_options.return_client_request_id - ocp_date = None - if task_add_collection_options is not None: - ocp_date = task_add_collection_options.ocp_date - task_collection = models.TaskAddCollectionParameter(value=value) - - # Construct URL - url = self.add_collection.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(task_collection, 'TaskAddCollectionParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('TaskAddCollectionResult', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} - - def delete( - self, job_id, task_id, task_delete_options=None, custom_headers=None, raw=False, **operation_config): - """Deletes a Task from the specified Job. - - When a Task is deleted, all of the files in its directory on the - Compute Node where it ran are also deleted (regardless of the retention - time). For multi-instance Tasks, the delete Task operation applies - synchronously to the primary task; subtasks and their files are then - deleted asynchronously in the background. - - :param job_id: The ID of the Job from which to delete the Task. - :type job_id: str - :param task_id: The ID of the Task to delete. - :type task_id: str - :param task_delete_options: Additional parameters for the operation - :type task_delete_options: ~azure.batch.models.TaskDeleteOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if task_delete_options is not None: - timeout = task_delete_options.timeout - client_request_id = None - if task_delete_options is not None: - client_request_id = task_delete_options.client_request_id - return_client_request_id = None - if task_delete_options is not None: - return_client_request_id = task_delete_options.return_client_request_id - ocp_date = None - if task_delete_options is not None: - ocp_date = task_delete_options.ocp_date - if_match = None - if task_delete_options is not None: - if_match = task_delete_options.if_match - if_none_match = None - if task_delete_options is not None: - if_none_match = task_delete_options.if_none_match - if_modified_since = None - if task_delete_options is not None: - if_modified_since = task_delete_options.if_modified_since - if_unmodified_since = None - if task_delete_options is not None: - if_unmodified_since = task_delete_options.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - }) - return client_raw_response - delete.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} - - def get( - self, job_id, task_id, task_get_options=None, custom_headers=None, raw=False, **operation_config): - """Gets information about the specified Task. - - For multi-instance Tasks, information such as affinityId, executionInfo - and nodeInfo refer to the primary Task. Use the list subtasks API to - retrieve information about subtasks. - - :param job_id: The ID of the Job that contains the Task. - :type job_id: str - :param task_id: The ID of the Task to get information about. - :type task_id: str - :param task_get_options: Additional parameters for the operation - :type task_get_options: ~azure.batch.models.TaskGetOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: CloudTask or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.CloudTask or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if task_get_options is not None: - select = task_get_options.select - expand = None - if task_get_options is not None: - expand = task_get_options.expand - timeout = None - if task_get_options is not None: - timeout = task_get_options.timeout - client_request_id = None - if task_get_options is not None: - client_request_id = task_get_options.client_request_id - return_client_request_id = None - if task_get_options is not None: - return_client_request_id = task_get_options.return_client_request_id - ocp_date = None - if task_get_options is not None: - ocp_date = task_get_options.ocp_date - if_match = None - if task_get_options is not None: - if_match = task_get_options.if_match - if_none_match = None - if task_get_options is not None: - if_none_match = task_get_options.if_none_match - if_modified_since = None - if task_get_options is not None: - if_modified_since = task_get_options.if_modified_since - if_unmodified_since = None - if task_get_options is not None: - if_unmodified_since = task_get_options.if_unmodified_since - - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if expand is not None: - query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('CloudTask', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - get.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} - - def update( - self, job_id, task_id, constraints=None, task_update_options=None, custom_headers=None, raw=False, **operation_config): - """Updates the properties of the specified Task. - - :param job_id: The ID of the Job containing the Task. - :type job_id: str - :param task_id: The ID of the Task to update. - :type task_id: str - :param constraints: Constraints that apply to this Task. If omitted, - the Task is given the default constraints. For multi-instance Tasks, - updating the retention time applies only to the primary Task and not - subtasks. - :type constraints: ~azure.batch.models.TaskConstraints - :param task_update_options: Additional parameters for the operation - :type task_update_options: ~azure.batch.models.TaskUpdateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if task_update_options is not None: - timeout = task_update_options.timeout - client_request_id = None - if task_update_options is not None: - client_request_id = task_update_options.client_request_id - return_client_request_id = None - if task_update_options is not None: - return_client_request_id = task_update_options.return_client_request_id - ocp_date = None - if task_update_options is not None: - ocp_date = task_update_options.ocp_date - if_match = None - if task_update_options is not None: - if_match = task_update_options.if_match - if_none_match = None - if task_update_options is not None: - if_none_match = task_update_options.if_none_match - if_modified_since = None - if task_update_options is not None: - if_modified_since = task_update_options.if_modified_since - if_unmodified_since = None - if task_update_options is not None: - if_unmodified_since = task_update_options.if_unmodified_since - task_update_parameter = models.TaskUpdateParameter(constraints=constraints) - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(task_update_parameter, 'TaskUpdateParameter') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - update.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} - - def list_subtasks( - self, job_id, task_id, task_list_subtasks_options=None, custom_headers=None, raw=False, **operation_config): - """Lists all of the subtasks that are associated with the specified - multi-instance Task. - - If the Task is not a multi-instance Task then this returns an empty - collection. - - :param job_id: The ID of the Job. - :type job_id: str - :param task_id: The ID of the Task. - :type task_id: str - :param task_list_subtasks_options: Additional parameters for the - operation - :type task_list_subtasks_options: - ~azure.batch.models.TaskListSubtasksOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: CloudTaskListSubtasksResult or ClientRawResponse if raw=true - :rtype: ~azure.batch.models.CloudTaskListSubtasksResult or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - select = None - if task_list_subtasks_options is not None: - select = task_list_subtasks_options.select - timeout = None - if task_list_subtasks_options is not None: - timeout = task_list_subtasks_options.timeout - client_request_id = None - if task_list_subtasks_options is not None: - client_request_id = task_list_subtasks_options.client_request_id - return_client_request_id = None - if task_list_subtasks_options is not None: - return_client_request_id = task_list_subtasks_options.return_client_request_id - ocp_date = None - if task_list_subtasks_options is not None: - ocp_date = task_list_subtasks_options.ocp_date - - # Construct URL - url = self.list_subtasks.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.BatchErrorException(self._deserialize, response) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('CloudTaskListSubtasksResult', response) - header_dict = { - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - } - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - client_raw_response.add_headers(header_dict) - return client_raw_response - - return deserialized - list_subtasks.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/subtasksinfo'} - - def terminate( - self, job_id, task_id, task_terminate_options=None, custom_headers=None, raw=False, **operation_config): - """Terminates the specified Task. - - When the Task has been terminated, it moves to the completed state. For - multi-instance Tasks, the terminate Task operation applies - synchronously to the primary task; subtasks are then terminated - asynchronously in the background. - - :param job_id: The ID of the Job containing the Task. - :type job_id: str - :param task_id: The ID of the Task to terminate. - :type task_id: str - :param task_terminate_options: Additional parameters for the operation - :type task_terminate_options: ~azure.batch.models.TaskTerminateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if task_terminate_options is not None: - timeout = task_terminate_options.timeout - client_request_id = None - if task_terminate_options is not None: - client_request_id = task_terminate_options.client_request_id - return_client_request_id = None - if task_terminate_options is not None: - return_client_request_id = task_terminate_options.return_client_request_id - ocp_date = None - if task_terminate_options is not None: - ocp_date = task_terminate_options.ocp_date - if_match = None - if task_terminate_options is not None: - if_match = task_terminate_options.if_match - if_none_match = None - if task_terminate_options is not None: - if_none_match = task_terminate_options.if_none_match - if_modified_since = None - if task_terminate_options is not None: - if_modified_since = task_terminate_options.if_modified_since - if_unmodified_since = None - if task_terminate_options is not None: - if_unmodified_since = task_terminate_options.if_unmodified_since - - # Construct URL - url = self.terminate.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [204]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - terminate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/terminate'} - - def reactivate( - self, job_id, task_id, task_reactivate_options=None, custom_headers=None, raw=False, **operation_config): - """Reactivates a Task, allowing it to run again even if its retry count - has been exhausted. - - Reactivation makes a Task eligible to be retried again up to its - maximum retry count. The Task's state is changed to active. As the Task - is no longer in the completed state, any previous exit code or failure - information is no longer available after reactivation. Each time a Task - is reactivated, its retry count is reset to 0. Reactivation will fail - for Tasks that are not completed or that previously completed - successfully (with an exit code of 0). Additionally, it will fail if - the Job has completed (or is terminating or deleting). - - :param job_id: The ID of the Job containing the Task. - :type job_id: str - :param task_id: The ID of the Task to reactivate. - :type task_id: str - :param task_reactivate_options: Additional parameters for the - operation - :type task_reactivate_options: - ~azure.batch.models.TaskReactivateOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if task_reactivate_options is not None: - timeout = task_reactivate_options.timeout - client_request_id = None - if task_reactivate_options is not None: - client_request_id = task_reactivate_options.client_request_id - return_client_request_id = None - if task_reactivate_options is not None: - return_client_request_id = task_reactivate_options.return_client_request_id - ocp_date = None - if task_reactivate_options is not None: - ocp_date = task_reactivate_options.ocp_date - if_match = None - if task_reactivate_options is not None: - if_match = task_reactivate_options.if_match - if_none_match = None - if task_reactivate_options is not None: - if_none_match = task_reactivate_options.if_none_match - if_modified_since = None - if task_reactivate_options is not None: - if_modified_since = task_reactivate_options.if_modified_since - if_unmodified_since = None - if task_reactivate_options is not None: - if_unmodified_since = task_reactivate_options.if_unmodified_since - - # Construct URL - url = self.reactivate.metadata['url'] - path_format_arguments = { - 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), - 'jobId': self._serialize.url("job_id", job_id, 'str'), - 'taskId': self._serialize.url("task_id", task_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [204]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - reactivate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/reactivate'} diff --git a/sdk/batch/azure-batch/azure/batch/py.typed b/sdk/batch/azure-batch/azure/batch/py.typed index e69de29bb2d1..e5aff4f83af8 100644 --- a/sdk/batch/azure-batch/azure/batch/py.typed +++ b/sdk/batch/azure-batch/azure/batch/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/batch/azure-batch/setup.py b/sdk/batch/azure-batch/setup.py index 5cc77edf7781..c8218eb0bece 100644 --- a/sdk/batch/azure-batch/setup.py +++ b/sdk/batch/azure-batch/setup.py @@ -1,74 +1,70 @@ -#!/usr/bin/env python - -#------------------------------------------------------------------------- +# coding=utf-8 +# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 +import os import re -import os.path -from io import open -from setuptools import find_packages, setup +from setuptools import setup, find_packages + -# Change the PACKAGE_NAME only to change folder and different name PACKAGE_NAME = "azure-batch" -PACKAGE_PPRINT_NAME = "Batch" +PACKAGE_PPRINT_NAME = "Azure Batch" # a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace('-', '/') -# a-b-c => a.b.c -namespace_name = PACKAGE_NAME.replace('-', '.') +package_folder_path = PACKAGE_NAME.replace("-", "/") # Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd: - version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', - fd.read(), re.MULTILINE).group(1) +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) if not version: - raise RuntimeError('Cannot find version information') + raise RuntimeError("Cannot find version information") -with open('README.md', encoding='utf-8') as f: - readme = f.read() -with open('CHANGELOG.md', encoding='utf-8') as f: - changelog = f.read() setup( name=PACKAGE_NAME, version=version, - description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), - long_description=readme + '\n\n' + changelog, - long_description_content_type='text/markdown', - license='MIT License', - author='Microsoft Corporation', - author_email='azpysdkhelp@microsoft.com', - url='https://github.com/Azure/azure-sdk-for-python', + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", keywords="azure, azure sdk", classifiers=[ - "Development Status :: 5 - Production/Stable", - 'Programming Language :: Python', - 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'License :: OSI Approved :: MIT License', + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", ], zip_safe=False, - packages=find_packages(exclude=[ - 'tests', - # Exclude packages that will be covered by PEP420 or nspkg - 'azure', - ]), + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + ] + ), include_package_data=True, package_data={ - 'pytyped': ['py.typed'], + "azure.batch": ["py.typed"], }, - python_requires=">=3.6", install_requires=[ - 'msrestazure>=0.4.32,<2.0.0', - 'azure-common~=1.1', + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", ], + python_requires=">=3.8", ) diff --git a/sdk/batch/azure-batch/tests/batch_preparers.py b/sdk/batch/azure-batch/tests/batch_preparers.py index 3e195a145928..6137a1de28c6 100644 --- a/sdk/batch/azure-batch/tests/batch_preparers.py +++ b/sdk/batch/azure-batch/tests/batch_preparers.py @@ -13,92 +13,87 @@ from devtools_testutils.fake_credentials import BATCH_TEST_PASSWORD from devtools_testutils.resource_testcase import RESOURCE_GROUP_PARAM -AZURE_ARM_ENDPOINT = 'https://management.azure.com' -BATCH_ACCOUNT_PARAM = 'batch_account' -STORAGE_ACCOUNT_PARAM = 'storage_account' -FakeAccount = namedtuple( - 'FakeResource', - ['name', 'account_endpoint'] -) +AZURE_ARM_ENDPOINT = "https://management.azure.com" +BATCH_ACCOUNT_PARAM = "batch_account" +STORAGE_ACCOUNT_PARAM = "storage_account" +FakeAccount = namedtuple("FakeResource", ["name", "account_endpoint"]) + class AccountPreparer(AzureMgmtPreparer): - def __init__(self, - name_prefix='batch', - location='westus', - parameter_name=BATCH_ACCOUNT_PARAM, - resource_group_parameter_name=RESOURCE_GROUP_PARAM, - disable_recording=True, - playback_fake_resource=None, - batch_environment=None, # Set to "pilotprod1" or "pilotprod2" if testing in PPE - client_kwargs=None): - super(AccountPreparer, self).__init__(name_prefix, 24, - disable_recording=disable_recording, - playback_fake_resource=playback_fake_resource, - client_kwargs=client_kwargs) + def __init__( + self, + name_prefix="batch", + location="westus", + parameter_name=BATCH_ACCOUNT_PARAM, + resource_group_parameter_name=RESOURCE_GROUP_PARAM, + disable_recording=True, + playback_fake_resource=None, + batch_environment=None, # Set to "pilotprod1" or "pilotprod2" if testing in PPE + client_kwargs=None, + ): + super(AccountPreparer, self).__init__( + name_prefix, + 24, + disable_recording=disable_recording, + playback_fake_resource=playback_fake_resource, + client_kwargs=client_kwargs, + ) self.location = location self.resource_group_parameter_name = resource_group_parameter_name self.parameter_name = parameter_name - self.creds_parameter = 'credentials' - self.parameter_name_for_location='location' - self.resource_moniker=name_prefix + self.creds_parameter = "credentials" + self.parameter_name_for_location = "location" + self.resource_moniker = name_prefix self.batch_environment = batch_environment def _get_resource_group(self, **kwargs): try: return kwargs[self.resource_group_parameter_name] except KeyError: - template = 'To create a batch account a resource group is required. Please add ' \ - 'decorator @{} in front of this storage account preparer.' + template = ( + "To create a batch account a resource group is required. Please add " + "decorator @{} in front of this storage account preparer." + ) raise AzureTestError(template.format(ResourceGroupPreparer.__name__)) def _get_storage_account(self, **kwargs): return kwargs.get(STORAGE_ACCOUNT_PARAM) def _add_app_package(self, group_name, batch_name): - self.client.application.create( - group_name, batch_name, 'application_id') - package_ref = self.client.application_package.create( - group_name, batch_name, 'application_id', 'v1.0') + self.client.application.create(group_name, batch_name, "application_id") + package_ref = self.client.application_package.create(group_name, batch_name, "application_id", "v1.0") try: - with io.BytesIO(b'Hello World') as f: - headers = {'x-ms-blob-type': 'BlockBlob'} + with io.BytesIO(b"Hello World") as f: + headers = {"x-ms-blob-type": "BlockBlob"} upload = requests.put(package_ref.storage_url, headers=headers, data=f.read()) if not upload: - raise ValueError('Upload failed: {!r}'.format(upload)) + raise ValueError("Upload failed: {!r}".format(upload)) except Exception as err: - raise AzureTestError('Failed to upload test package: {}'.format(err)) + raise AzureTestError("Failed to upload test package: {}".format(err)) else: - self.client.application_package.activate(group_name, batch_name, 'application_id', 'v1.0', {'format': 'zip'}) + self.client.application_package.activate( + group_name, batch_name, "application_id", "v1.0", {"format": "zip"} + ) def create_resource(self, name, **kwargs): if self.is_live: - self.client = self.create_mgmt_client( - azure.mgmt.batch.BatchManagementClient, - base_url=AZURE_ARM_ENDPOINT) + self.client = self.create_mgmt_client(azure.mgmt.batch.BatchManagementClient, base_url=AZURE_ARM_ENDPOINT) group = self._get_resource_group(**kwargs) batch_account = models.BatchAccountCreateParameters( location=self.location, ) storage = self._get_storage_account(**kwargs) if storage: - storage_resource = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}'.format( - self.test_class_instance.settings.get("AZURE_SUBSCRIPTION_ID"), - group.name, - storage.name + storage_resource = ( + "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}".format( + self.test_class_instance.settings.get("AZURE_SUBSCRIPTION_ID"), group.name, storage.name + ) ) - batch_account.auto_storage=models.AutoStorageBaseProperties(storage_account_id=storage_resource) - account_setup = self.client.batch_account.begin_create( - group.name, - name, - batch_account) + batch_account.auto_storage = models.AutoStorageBaseProperties(storage_account_id=storage_resource) + account_setup = self.client.batch_account.begin_create(group.name, name, batch_account) self.resource = account_setup.result() - keys = self.client.batch_account.get_keys( - group.name, - name - ) - credentials = SharedKeyCredentials( - keys.account_name, - keys.primary) + keys = self.client.batch_account.get_keys(group.name, name) + credentials = SharedKeyCredentials(keys.account_name, keys.primary) if storage: self._add_app_package(group.name, name) else: @@ -107,15 +102,10 @@ def create_resource(self, name, **kwargs): env_prefix = "" if self.batch_environment is None else ".{}".format(self.batch_environment) self.resource = FakeAccount( - name=name, - account_endpoint="https://{}{}.{}.batch.azure.com".format(name, env_prefix, self.location)) - credentials = SharedKeyCredentials( - name, - 'ZmFrZV9hY29jdW50X2tleQ==') - return { - self.parameter_name: self.resource, - self.creds_parameter: credentials - } + name=name, account_endpoint="https://{}{}.{}.batch.azure.com".format(name, env_prefix, self.location) + ) + credentials = SharedKeyCredentials(name, "ZmFrZV9hY29jdW50X2tleQ==") + return {self.parameter_name: self.resource, self.creds_parameter: credentials} def remove_resource(self, name, **kwargs): if self.is_live: @@ -128,21 +118,27 @@ def remove_resource(self, name, **kwargs): class PoolPreparer(AzureMgmtPreparer): - def __init__(self, - name_prefix='', - size=0, - os='Linux', - config='iaas', - parameter_name='batch_pool', - location=None, - resource_group_parameter_name=RESOURCE_GROUP_PARAM, - batch_account_parameter_name=BATCH_ACCOUNT_PARAM, - disable_recording=True, playback_fake_resource=None, - client_kwargs=None): - super(PoolPreparer, self).__init__(name_prefix, 24, - disable_recording=disable_recording, - playback_fake_resource=playback_fake_resource, - client_kwargs=client_kwargs) + def __init__( + self, + name_prefix="", + size=0, + os="Linux", + config="iaas", + parameter_name="batch_pool", + location=None, + resource_group_parameter_name=RESOURCE_GROUP_PARAM, + batch_account_parameter_name=BATCH_ACCOUNT_PARAM, + disable_recording=True, + playback_fake_resource=None, + client_kwargs=None, + ): + super(PoolPreparer, self).__init__( + name_prefix, + 24, + disable_recording=disable_recording, + playback_fake_resource=playback_fake_resource, + client_kwargs=client_kwargs, + ) self.size = size self.os = os self.config = config @@ -154,67 +150,70 @@ def _get_resource_group(self, **kwargs): try: return kwargs[self.resource_group_parameter_name] except KeyError: - template = 'To create a batch account a resource group is required. Please add ' \ - 'decorator @{} in front of this storage account preparer.' + template = ( + "To create a batch account a resource group is required. Please add " + "decorator @{} in front of this storage account preparer." + ) raise AzureTestError(template.format(ResourceGroupPreparer.__name__)) def _get_batch_account(self, **kwargs): try: return kwargs[self.batch_account_parameter_name] except KeyError: - template = 'To create a batch poool, a batch account is required. Please add ' \ - 'decorator @AccountPreparer in front of this pool preparer.' + template = ( + "To create a batch poool, a batch account is required. Please add " + "decorator @AccountPreparer in front of this pool preparer." + ) raise AzureTestError(template) def create_resource(self, name, **kwargs): if self.is_live: - self.client = self.create_mgmt_client( - azure.mgmt.batch.BatchManagementClient, - base_url=AZURE_ARM_ENDPOINT) + self.client = self.create_mgmt_client(azure.mgmt.batch.BatchManagementClient, base_url=AZURE_ARM_ENDPOINT) group = self._get_resource_group(**kwargs) batch_account = self._get_batch_account(**kwargs) - user = models.UserAccount(name='task-user', password=BATCH_TEST_PASSWORD, elevation_level=models.ElevationLevel.admin) - vm_size = 'standard_d2_v2' + user = models.UserAccount( + name="task-user", password=BATCH_TEST_PASSWORD, elevation_level=models.ElevationLevel.admin + ) + vm_size = "standard_d2_v2" - if self.config == 'paas': - vm_size = 'small' + if self.config == "paas": + vm_size = "small" deployment = models.DeploymentConfiguration( - cloud_service_configuration=models.CloudServiceConfiguration( - os_family='5')) - elif self.os == 'Windows': + cloud_service_configuration=models.CloudServiceConfiguration(os_family="5") + ) + elif self.os == "Windows": deployment = models.DeploymentConfiguration( virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( - publisher='MicrosoftWindowsServer', - offer='WindowsServer', - sku='2016-Datacenter-smalldisk' + publisher="MicrosoftWindowsServer", offer="WindowsServer", sku="2016-Datacenter-smalldisk" ), - node_agent_sku_id='batch.node.windows amd64')) + node_agent_sku_id="batch.node.windows amd64", + ) + ) else: deployment = models.DeploymentConfiguration( virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' + publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS" ), - node_agent_sku_id='batch.node.ubuntu 18.04')) + node_agent_sku_id="batch.node.ubuntu 18.04", + ) + ) parameters = models.Pool( display_name="test_pool", vm_size=vm_size, user_accounts=[user], deployment_configuration=deployment, scale_settings=models.ScaleSettings( - fixed_scale=models.FixedScaleSettings( - target_dedicated_nodes=self.size - ) - ) + fixed_scale=models.FixedScaleSettings(target_dedicated_nodes=self.size) + ), ) - self.resource = self.client.pool.create( - group.name, batch_account.name, name, parameters) - while (self.resource.allocation_state != models.AllocationState.steady - and self.resource.current_dedicated_nodes < self.size): + self.resource = self.client.pool.create(group.name, batch_account.name, name, parameters) + while ( + self.resource.allocation_state != models.AllocationState.steady + and self.resource.current_dedicated_nodes < self.size + ): time.sleep(10) self.resource = self.client.pool.get(group.name, batch_account.name, name) else: @@ -235,34 +234,42 @@ def remove_resource(self, name, **kwargs): class JobPreparer(AzureMgmtPreparer): - def __init__(self, - name_prefix='batch', - parameter_name='batch_job', - batch_account_parameter_name=BATCH_ACCOUNT_PARAM, - batch_credentials_parameter_name='credentials', - batch_pool_parameter_name='batch_pool', - disable_recording=True, playback_fake_resource=None, - client_kwargs=None, **extra_args): - super(JobPreparer, self).__init__(name_prefix, 24, - disable_recording=disable_recording, - playback_fake_resource=playback_fake_resource, - client_kwargs=client_kwargs) + def __init__( + self, + name_prefix="batch", + parameter_name="batch_job", + batch_account_parameter_name=BATCH_ACCOUNT_PARAM, + batch_credentials_parameter_name="credentials", + batch_pool_parameter_name="batch_pool", + disable_recording=True, + playback_fake_resource=None, + client_kwargs=None, + **extra_args + ): + super(JobPreparer, self).__init__( + name_prefix, + 24, + disable_recording=disable_recording, + playback_fake_resource=playback_fake_resource, + client_kwargs=client_kwargs, + ) self.parameter_name = parameter_name self.batch_account_parameter_name = batch_account_parameter_name self.batch_credentials_parameter_name = batch_credentials_parameter_name self.batch_pool_parameter_name = batch_pool_parameter_name self.extra_args = extra_args - self.resource_moniker=name_prefix + self.resource_moniker = name_prefix def _get_batch_client(self, **kwargs): try: account = kwargs[self.batch_account_parameter_name] credentials = kwargs[self.batch_credentials_parameter_name] - return azure.batch.BatchServiceClient( - credentials, batch_url='https://' + account.account_endpoint) + return azure.batch.BatchServiceClient(credentials, batch_url="https://" + account.account_endpoint) except KeyError: - template = 'To create a batch job, a batch account is required. Please add ' \ - 'decorator @AccountPreparer in front of this job preparer.' + template = ( + "To create a batch job, a batch account is required. Please add " + "decorator @AccountPreparer in front of this job preparer." + ) raise AzureTestError(template) def _get_batch_pool_id(self, **kwargs): @@ -273,25 +280,17 @@ def _get_batch_pool_id(self, **kwargs): auto_pool = azure.batch.models.AutoPoolSpecification( pool_lifetime_option=azure.batch.models.PoolLifetimeOption.job, pool=azure.batch.models.PoolSpecification( - vm_size='small', - cloud_service_configuration=azure.batch.models.CloudServiceConfiguration( - os_family='5' - ) - ) - ) - return azure.batch.models.PoolInformation( - auto_pool_specification=auto_pool + vm_size="small", + cloud_service_configuration=azure.batch.models.CloudServiceConfiguration(os_family="5"), + ), ) + return azure.batch.models.PoolInformation(auto_pool_specification=auto_pool) def create_resource(self, name, **kwargs): if self.is_live: self.client = self._get_batch_client(**kwargs) pool = self._get_batch_pool_id(**kwargs) - self.resource = azure.batch.models.JobAddParameter( - id=name, - pool_info=pool, - **self.extra_args - ) + self.resource = azure.batch.models.JobAddParameter(id=name, pool_info=pool, **self.extra_args) try: self.client.job.add(self.resource) except azure.batch.models.BatchErrorException as e: @@ -307,4 +306,4 @@ def create_resource(self, name, **kwargs): def remove_resource(self, name, **kwargs): if self.is_live: - self.client.job.delete(name) \ No newline at end of file + self.client.job.delete(name) diff --git a/sdk/batch/azure-batch/tests/conftest.py b/sdk/batch/azure-batch/tests/conftest.py index af75f6837247..f68a10217239 100644 --- a/sdk/batch/azure-batch/tests/conftest.py +++ b/sdk/batch/azure-batch/tests/conftest.py @@ -1,7 +1,8 @@ import pytest from devtools_testutils import test_proxy + # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): - return \ No newline at end of file + return diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 9f9509130ac9..ef0c8f477770 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -1,10 +1,10 @@ # coding: utf-8 -#------------------------------------------------------------------------- +# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. -#-------------------------------------------------------------------------- +# -------------------------------------------------------------------------- import datetime import io import time @@ -13,49 +13,40 @@ from azure.batch import models import pytest -from batch_preparers import ( - AccountPreparer, - PoolPreparer, - JobPreparer -) +from batch_preparers import AccountPreparer, PoolPreparer, JobPreparer from devtools_testutils import ( AzureMgmtRecordedTestCase, ResourceGroupPreparer, StorageAccountPreparer, - CachedResourceGroupPreparer + CachedResourceGroupPreparer, ) from devtools_testutils.fake_credentials import BATCH_TEST_PASSWORD -AZURE_LOCATION = 'eastus' +AZURE_LOCATION = "eastus" BATCH_ENVIRONMENT = None # Set this to None if testing against prod -BATCH_RESOURCE = 'https://batch.core.windows.net/' -DEFAULT_VM_SIZE = 'standard_d2_v2' +BATCH_RESOURCE = "https://batch.core.windows.net/" +DEFAULT_VM_SIZE = "standard_d2_v2" class TestBatch(AzureMgmtRecordedTestCase): def _batch_url(self, batch): - if batch.account_endpoint.startswith('https://'): + if batch.account_endpoint.startswith("https://"): return batch.account_endpoint else: - return 'https://' + batch.account_endpoint + return "https://" + batch.account_endpoint def create_aad_client(self, batch_account, **kwargs): credentials = self.settings.get_credentials(resource=BATCH_RESOURCE) client = self.create_basic_client( - azure.batch.BatchServiceClient, - credentials=credentials, - batch_url=self._batch_url(batch_account) + azure.batch.BatchServiceClient, credentials=credentials, batch_url=self._batch_url(batch_account) ) return client def create_sharedkey_client(self, batch_account, credentials, **kwargs): - client = azure.batch.BatchServiceClient( - credentials=credentials, - batch_url=self._batch_url(batch_account) - ) + client = azure.batch.BatchServiceClient(credentials=credentials, batch_url=self._batch_url(batch_account)) return client def assertBatchError(self, code, func, *args, **kwargs): @@ -83,7 +74,7 @@ def assertCreateTasksError(self, code, func, *args, **kwargs): @pytest.mark.live_test_only("Can't use recordings until tests use the test proxy") @ResourceGroupPreparer(location=AZURE_LOCATION) - @StorageAccountPreparer(name_prefix='batch1', location=AZURE_LOCATION) + @StorageAccountPreparer(name_prefix="batch1", location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) @JobPreparer() def test_batch_applications(self, **kwargs): @@ -94,17 +85,19 @@ def test_batch_applications(self, **kwargs): assert len(apps) == 1 # Test Get Application - app = client.application.get('application_id') + app = client.application.get("application_id") assert isinstance(app, models.ApplicationSummary) - assert app.id == 'application_id' - assert app.versions == ['v1.0'] + assert app.id == "application_id" + assert app.versions == ["v1.0"] # Test Create Task with Application Package - task_id = 'python_task_with_app_package' + task_id = "python_task_with_app_package" task = models.TaskAddParameter( id=task_id, command_line='cmd /c "echo hello world"', - application_package_references=[models.ApplicationPackageReference(application_id='application_id', version='v1.0')] + application_package_references=[ + models.ApplicationPackageReference(application_id="application_id", version="v1.0") + ], ) response = client.task.add(batch_job.id, task) assert response is None @@ -112,7 +105,7 @@ def test_batch_applications(self, **kwargs): # Test Get Task with Application Package task = client.task.get(batch_job.id, task_id) assert isinstance(task, models.CloudTask) - assert task.application_package_references[0].application_id == 'application_id' + assert task.application_package_references[0].application_id == "application_id" @pytest.mark.live_test_only("Can't use recordings until tests use the test proxy") @ResourceGroupPreparer(location=AZURE_LOCATION) @@ -121,37 +114,38 @@ def test_batch_certificates(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test Add Certificate certificate = models.CertificateAddParameter( - thumbprint='cff2ab63c8c955aaf71989efa641b906558d9fb7', - thumbprint_algorithm='sha1', - data='MIIGMQIBAzCCBe0GCSqGSIb3DQEHAaCCBd4EggXaMIIF1jCCA8AGCSqGSIb3DQEHAaCCA7EEggOtMIIDqTCCA6UGCyqGSIb3DQEMCgECoIICtjCCArIwHAYKKoZIhvcNAQwBAzAOBAhyd3xCtln3iQICB9AEggKQhe5P10V9iV1BsDlwWT561Yu2hVq3JT8ae/ebx1ZR/gMApVereDKkS9Zg4vFyssusHebbK5pDpU8vfAqle0TM4m7wGsRj453ZorSPUfMpHvQnAOn+2pEpWdMThU7xvZ6DVpwhDOQk9166z+KnKdHGuJKh4haMT7Rw/6xZ1rsBt2423cwTrQVMQyACrEkianpuujubKltN99qRoFAxhQcnYE2KlYKw7lRcExq6mDSYAyk5xJZ1ZFdLj6MAryZroQit/0g5eyhoNEKwWbi8px5j71pRTf7yjN+deMGQKwbGl+3OgaL1UZ5fCjypbVL60kpIBxLZwIJ7p3jJ+q9pbq9zSdzshPYor5lxyUfXqaso/0/91ayNoBzg4hQGh618PhFI6RMGjwkzhB9xk74iweJ9HQyIHf8yx2RCSI22JuCMitPMWSGvOszhbNx3AEDLuiiAOHg391mprEtKZguOIr9LrJwem/YmcHbwyz5YAbZmiseKPkllfC7dafFfCFEkj6R2oegIsZo0pEKYisAXBqT0g+6/jGwuhlZcBo0f7UIZm88iA3MrJCjlXEgV5OcQdoWj+hq0lKEdnhtCKr03AIfukN6+4vjjarZeW1bs0swq0l3XFf5RHa11otshMS4mpewshB9iO9MuKWpRxuxeng4PlKZ/zuBqmPeUrjJ9454oK35Pq+dghfemt7AUpBH/KycDNIZgfdEWUZrRKBGnc519C+RTqxyt5hWL18nJk4LvSd3QKlJ1iyJxClhhb/NWEzPqNdyA5cxen+2T9bd/EqJ2KzRv5/BPVwTQkHH9W/TZElFyvFfOFIW2+03RKbVGw72Mr/0xKZ+awAnEfoU+SL/2Gj2m6PHkqFX2sOCi/tN9EA4xgdswEwYJKoZIhvcNAQkVMQYEBAEAAAAwXQYJKwYBBAGCNxEBMVAeTgBNAGkAYwByAG8AcwBvAGYAdAAgAFMAdAByAG8AbgBnACAAQwByAHkAcAB0AG8AZwByAGEAcABoAGkAYwAgAFAAcgBvAHYAaQBkAGUAcjBlBgkqhkiG9w0BCRQxWB5WAFAAdgBrAFQAbQBwADoANABjAGUANgAwADQAZABhAC0AMAA2ADgAMQAtADQANAAxADUALQBhADIAYwBhAC0ANQA3ADcAMwAwADgAZQA2AGQAOQBhAGMwggIOBgkqhkiG9w0BBwGgggH/BIIB+zCCAfcwggHzBgsqhkiG9w0BDAoBA6CCAcswggHHBgoqhkiG9w0BCRYBoIIBtwSCAbMwggGvMIIBXaADAgECAhAdka3aTQsIsUphgIXGUmeRMAkGBSsOAwIdBQAwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3kwHhcNMTYwMTAxMDcwMDAwWhcNMTgwMTAxMDcwMDAwWjASMRAwDgYDVQQDEwdub2Rlc2RrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC5fhcxbJHxxBEIDzVOMc56s04U6k4GPY7yMR1m+rBGVRiAyV4RjY6U936dqXHCVD36ps2Q0Z+OeEgyCInkIyVeB1EwXcToOcyeS2YcUb0vRWZDouC3tuFdHwiK1Ed5iW/LksmXDotyV7kpqzaPhOFiMtBuMEwNJcPge9k17hRgRQIDAQABo0swSTBHBgNVHQEEQDA+gBAS5AktBh0dTwCNYSHcFmRjoRgwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3mCEAY3bACqAGSKEc+41KpcNfQwCQYFKw4DAh0FAANBAHl2M97QbpzdnwO5HoRBsiEExOcLTNg+GKCr7HUsbzfvrUivw+JLL7qjHAIc5phnK+F5bQ8HKe0L9YXBSKl+fvwxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFGVtyGMqiBd32fGpzlGZQoRM6UQwBBTI0YHFFqTS4Go8CoLgswn29EiuUQICB9A=', + thumbprint="cff2ab63c8c955aaf71989efa641b906558d9fb7", + thumbprint_algorithm="sha1", + data="MIIGMQIBAzCCBe0GCSqGSIb3DQEHAaCCBd4EggXaMIIF1jCCA8AGCSqGSIb3DQEHAaCCA7EEggOtMIIDqTCCA6UGCyqGSIb3DQEMCgECoIICtjCCArIwHAYKKoZIhvcNAQwBAzAOBAhyd3xCtln3iQICB9AEggKQhe5P10V9iV1BsDlwWT561Yu2hVq3JT8ae/ebx1ZR/gMApVereDKkS9Zg4vFyssusHebbK5pDpU8vfAqle0TM4m7wGsRj453ZorSPUfMpHvQnAOn+2pEpWdMThU7xvZ6DVpwhDOQk9166z+KnKdHGuJKh4haMT7Rw/6xZ1rsBt2423cwTrQVMQyACrEkianpuujubKltN99qRoFAxhQcnYE2KlYKw7lRcExq6mDSYAyk5xJZ1ZFdLj6MAryZroQit/0g5eyhoNEKwWbi8px5j71pRTf7yjN+deMGQKwbGl+3OgaL1UZ5fCjypbVL60kpIBxLZwIJ7p3jJ+q9pbq9zSdzshPYor5lxyUfXqaso/0/91ayNoBzg4hQGh618PhFI6RMGjwkzhB9xk74iweJ9HQyIHf8yx2RCSI22JuCMitPMWSGvOszhbNx3AEDLuiiAOHg391mprEtKZguOIr9LrJwem/YmcHbwyz5YAbZmiseKPkllfC7dafFfCFEkj6R2oegIsZo0pEKYisAXBqT0g+6/jGwuhlZcBo0f7UIZm88iA3MrJCjlXEgV5OcQdoWj+hq0lKEdnhtCKr03AIfukN6+4vjjarZeW1bs0swq0l3XFf5RHa11otshMS4mpewshB9iO9MuKWpRxuxeng4PlKZ/zuBqmPeUrjJ9454oK35Pq+dghfemt7AUpBH/KycDNIZgfdEWUZrRKBGnc519C+RTqxyt5hWL18nJk4LvSd3QKlJ1iyJxClhhb/NWEzPqNdyA5cxen+2T9bd/EqJ2KzRv5/BPVwTQkHH9W/TZElFyvFfOFIW2+03RKbVGw72Mr/0xKZ+awAnEfoU+SL/2Gj2m6PHkqFX2sOCi/tN9EA4xgdswEwYJKoZIhvcNAQkVMQYEBAEAAAAwXQYJKwYBBAGCNxEBMVAeTgBNAGkAYwByAG8AcwBvAGYAdAAgAFMAdAByAG8AbgBnACAAQwByAHkAcAB0AG8AZwByAGEAcABoAGkAYwAgAFAAcgBvAHYAaQBkAGUAcjBlBgkqhkiG9w0BCRQxWB5WAFAAdgBrAFQAbQBwADoANABjAGUANgAwADQAZABhAC0AMAA2ADgAMQAtADQANAAxADUALQBhADIAYwBhAC0ANQA3ADcAMwAwADgAZQA2AGQAOQBhAGMwggIOBgkqhkiG9w0BBwGgggH/BIIB+zCCAfcwggHzBgsqhkiG9w0BDAoBA6CCAcswggHHBgoqhkiG9w0BCRYBoIIBtwSCAbMwggGvMIIBXaADAgECAhAdka3aTQsIsUphgIXGUmeRMAkGBSsOAwIdBQAwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3kwHhcNMTYwMTAxMDcwMDAwWhcNMTgwMTAxMDcwMDAwWjASMRAwDgYDVQQDEwdub2Rlc2RrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC5fhcxbJHxxBEIDzVOMc56s04U6k4GPY7yMR1m+rBGVRiAyV4RjY6U936dqXHCVD36ps2Q0Z+OeEgyCInkIyVeB1EwXcToOcyeS2YcUb0vRWZDouC3tuFdHwiK1Ed5iW/LksmXDotyV7kpqzaPhOFiMtBuMEwNJcPge9k17hRgRQIDAQABo0swSTBHBgNVHQEEQDA+gBAS5AktBh0dTwCNYSHcFmRjoRgwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3mCEAY3bACqAGSKEc+41KpcNfQwCQYFKw4DAh0FAANBAHl2M97QbpzdnwO5HoRBsiEExOcLTNg+GKCr7HUsbzfvrUivw+JLL7qjHAIc5phnK+F5bQ8HKe0L9YXBSKl+fvwxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFGVtyGMqiBd32fGpzlGZQoRM6UQwBBTI0YHFFqTS4Go8CoLgswn29EiuUQICB9A=", certificate_format=models.CertificateFormat.pfx, - password='nodesdk') + password="nodesdk", + ) response = client.certificate.add(certificate) assert response is None # Test List Certificates certs = client.certificate.list() - test_cert = [c for c in certs if c.thumbprint == 'cff2ab63c8c955aaf71989efa641b906558d9fb7'] + test_cert = [c for c in certs if c.thumbprint == "cff2ab63c8c955aaf71989efa641b906558d9fb7"] assert len(test_cert) == 1 # Test Get Certificate - cert = client.certificate.get('sha1', 'cff2ab63c8c955aaf71989efa641b906558d9fb7') + cert = client.certificate.get("sha1", "cff2ab63c8c955aaf71989efa641b906558d9fb7") assert isinstance(cert, models.Certificate) - assert cert.thumbprint == 'cff2ab63c8c955aaf71989efa641b906558d9fb7' - assert cert.thumbprint_algorithm == 'sha1' + assert cert.thumbprint == "cff2ab63c8c955aaf71989efa641b906558d9fb7" + assert cert.thumbprint_algorithm == "sha1" assert cert.delete_certificate_error is None # Test Cancel Certificate Delete - self.assertBatchError('CertificateStateActive', - client.certificate.cancel_deletion, - 'sha1', - 'cff2ab63c8c955aaf71989efa641b906558d9fb7') + self.assertBatchError( + "CertificateStateActive", + client.certificate.cancel_deletion, + "sha1", + "cff2ab63c8c955aaf71989efa641b906558d9fb7", + ) # Test Delete Certificate - response = client.certificate.delete( - 'sha1', - 'cff2ab63c8c955aaf71989efa641b906558d9fb7') + response = client.certificate.delete("sha1", "cff2ab63c8c955aaf71989efa641b906558d9fb7") assert response is None @pytest.mark.live_test_only("Can't use recordings until tests use the test proxy") @@ -167,22 +161,23 @@ def test_batch_create_pools(self, **kwargs): # Test Create Iaas Pool users = [ - models.UserAccount(name='test-user-1', password=BATCH_TEST_PASSWORD), - models.UserAccount(name='test-user-2', password=BATCH_TEST_PASSWORD, elevation_level=models.ElevationLevel.admin) + models.UserAccount(name="test-user-1", password=BATCH_TEST_PASSWORD), + models.UserAccount( + name="test-user-2", password=BATCH_TEST_PASSWORD, elevation_level=models.ElevationLevel.admin + ), ] test_iaas_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_iaas_'), + id=self.get_resource_name("batch_iaas_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( - publisher='MicrosoftWindowsServer', - offer='WindowsServer', - sku='2016-Datacenter-smalldisk' + publisher="MicrosoftWindowsServer", offer="WindowsServer", sku="2016-Datacenter-smalldisk" ), - node_agent_sku_id='batch.node.windows amd64', - windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True)), + node_agent_sku_id="batch.node.windows amd64", + windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True), + ), task_scheduling_policy=models.TaskSchedulingPolicy(node_fill_type=models.ComputeNodeFillType.pack), - user_accounts=users + user_accounts=users, ) response = client.pool.add(test_iaas_pool) assert response is None @@ -198,56 +193,56 @@ def test_batch_create_pools(self, **kwargs): assert counts[0].low_priority.total == 0 # Test Create Pool with Network Configuration - #TODO Public IP tests - network_config = models.NetworkConfiguration(subnet_id='/subscriptions/00000000-0000-0000-0000-000000000000' - '/resourceGroups/test' - '/providers/Microsoft.Network' - '/virtualNetworks/vnet1' - '/subnets/subnet1') + # TODO Public IP tests + network_config = models.NetworkConfiguration( + subnet_id="/subscriptions/00000000-0000-0000-0000-000000000000" + "/resourceGroups/test" + "/providers/Microsoft.Network" + "/virtualNetworks/vnet1" + "/subnets/subnet1" + ) test_network_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_network_'), + id=self.get_resource_name("batch_network_"), vm_size=DEFAULT_VM_SIZE, network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' - ), - node_agent_sku_id='batch.node.ubuntu 18.04') + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), + node_agent_sku_id="batch.node.ubuntu 18.04", + ), + ) + self.assertBatchError( + "InvalidPropertyValue", client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45) ) - self.assertBatchError('InvalidPropertyValue', client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45)) test_image_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_image_'), + id=self.get_resource_name("batch_image_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( virtual_machine_image_id="/subscriptions/00000000-0000-0000-0000-000000000000" - "/resourceGroups/test" - "/providers/Microsoft.Compute" - "/gallery/FakeGallery" - "/images/FakeImage" - "/versions/version" + "/resourceGroups/test" + "/providers/Microsoft.Compute" + "/gallery/FakeGallery" + "/images/FakeImage" + "/versions/version" ), - node_agent_sku_id='batch.node.ubuntu 18.04' - ) + node_agent_sku_id="batch.node.ubuntu 18.04", + ), + ) + self.assertBatchError( + "InvalidPropertyValue", client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45) ) - self.assertBatchError('InvalidPropertyValue', client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with Data Disk data_disk = models.DataDisk(lun=1, disk_size_gb=50) test_disk_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_disk_'), + id=self.get_resource_name("batch_disk_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' - ), - node_agent_sku_id='batch.node.ubuntu 18.04', - data_disks=[data_disk]) + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), + node_agent_sku_id="batch.node.ubuntu 18.04", + data_disks=[data_disk], + ), ) response = client.pool.add(test_disk_pool) assert response is None @@ -257,18 +252,15 @@ def test_batch_create_pools(self, **kwargs): # Test Create Pool with Azure Disk Encryption test_ade_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_ade_'), + id=self.get_resource_name("batch_ade_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), disk_encryption_configuration=models.DiskEncryptionConfiguration( targets=[models.DiskEncryptionTarget.temporary_disk] ), - node_agent_sku_id='batch.node.ubuntu 18.04') + node_agent_sku_id="batch.node.ubuntu 18.04", + ), ) response = client.pool.add(test_ade_pool) assert response is None @@ -278,22 +270,24 @@ def test_batch_create_pools(self, **kwargs): # Test Create Pool with Virtual Machine Configuration With Extensions test_vmextension_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_vmextension_'), + id=self.get_resource_name("batch_vmextension_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( - publisher='microsoftwindowsserver', - offer='windowsserver', - sku='2022-datacenter' + publisher="microsoftwindowsserver", offer="windowsserver", sku="2022-datacenter" ), - extensions=[models.VMExtension( - name="CustomExtension", - publisher="Microsoft.Azure.Geneva", - type="GenevaMonitoring", - type_handler_version="2.0", - auto_upgrade_minor_version=True, - enable_automatic_upgrade=True)], - node_agent_sku_id='batch.node.windows amd64') + extensions=[ + models.VMExtension( + name="CustomExtension", + publisher="Microsoft.Azure.Geneva", + type="GenevaMonitoring", + type_handler_version="2.0", + auto_upgrade_minor_version=True, + enable_automatic_upgrade=True, + ) + ], + node_agent_sku_id="batch.node.windows amd64", + ), ) response = client.pool.add(test_vmextension_pool) assert response is None @@ -302,24 +296,17 @@ def test_batch_create_pools(self, **kwargs): # Test Create Pool with Trusted Launch security type test_trustedlaunch_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_trustedlaunch_'), - vm_size='standard_d2s_v3', + id=self.get_resource_name("batch_trustedlaunch_"), + vm_size="standard_d2s_v3", virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' - ), - node_agent_sku_id='batch.node.ubuntu 18.04', + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), + node_agent_sku_id="batch.node.ubuntu 18.04", security_profile=models.SecurityProfile( security_type=models.SecurityTypes.trusted_launch, encryption_at_host=True, - uefi_settings=models.UefiSettings( - secure_boot_enabled=True, - v_tpm_enabled=True - ) - ) - ) + uefi_settings=models.UefiSettings(secure_boot_enabled=True, v_tpm_enabled=True), + ), + ), ) response = client.pool.add(test_trustedlaunch_pool) assert response is None @@ -332,23 +319,19 @@ def test_batch_create_pools(self, **kwargs): # Test Create Pool with custom OS Disk configuration test_osdisk_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_osdisk_'), + id=self.get_resource_name("batch_osdisk_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' - ), - node_agent_sku_id='batch.node.ubuntu 18.04', + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), + node_agent_sku_id="batch.node.ubuntu 18.04", os_disk=models.OSDisk( caching=models.CachingType.read_only, managed_disk=models.ManagedDisk( storage_account_type=models.StorageAccountType.premium_lrs, ), - disk_size_gb=10 - ) - ) + disk_size_gb=10, + ), + ), ) response = client.pool.add(test_osdisk_pool) assert response is None @@ -357,21 +340,16 @@ def test_batch_create_pools(self, **kwargs): assert osdisk_pool.virtual_machine_configuration.os_disk.managed_disk.storage_account_type == "premium_lrs" assert osdisk_pool.virtual_machine_configuration.os_disk.disk_size_gb == 10 - # Test Create Pool with Upgrade Policy test_upgradepolicy_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_upgradepolicy_'), - vm_size='standard_d2s_v3', + id=self.get_resource_name("batch_upgradepolicy_"), + vm_size="standard_d2s_v3", virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher='Canonical', - offer='UbuntuServer', - sku='18.04-LTS' - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), node_placement_configuration=models.NodePlacementConfiguration( policy=models.NodePlacementPolicyType.zonal ), - node_agent_sku_id='batch.node.ubuntu 18.04' + node_agent_sku_id="batch.node.ubuntu 18.04", ), upgrade_policy=models.UpgradePolicy( mode=models.UpgradeMode.automatic, @@ -379,7 +357,7 @@ def test_batch_create_pools(self, **kwargs): disable_automatic_rollback=True, enable_automatic_os_upgrade=True, use_rolling_upgrade_policy=True, - os_rolling_upgrade_deferral=True + os_rolling_upgrade_deferral=True, ), rolling_upgrade_policy=models.RollingUpgradePolicy( enable_cross_zone_upgrade=True, @@ -388,11 +366,11 @@ def test_batch_create_pools(self, **kwargs): max_unhealthy_upgraded_instance_percent=20, pause_time_between_batches=datetime.timedelta(seconds=5), prioritize_unhealthy_instances=False, - rollback_failed_instances_on_policy_breach=False - ) - ) + rollback_failed_instances_on_policy_breach=False, + ), + ), ) - + response = client.pool.add(test_upgradepolicy_pool) assert response is None upgradepolicy_pool = client.pool.get(test_upgradepolicy_pool.id) @@ -414,10 +392,7 @@ def test_batch_create_pools(self, **kwargs): assert len(pools.current_page) == 1 # Test List Pools with Filter - options = models.PoolListOptions( - filter='startswith(id,\'batch_disk_\')', - select='id,state', - expand='stats') + options = models.PoolListOptions(filter="startswith(id,'batch_disk_')", select="id,state", expand="stats") pools = list(client.pool.list(options)) assert len(pools) == 1 @@ -428,25 +403,26 @@ def test_batch_create_pool_with_blobfuse_mount(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test Create Iaas Pool test_iaas_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_iaas_'), + id=self.get_resource_name("batch_iaas_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( - publisher='MicrosoftWindowsServer', - offer='WindowsServer', - sku='2016-Datacenter-smalldisk' + publisher="MicrosoftWindowsServer", offer="WindowsServer", sku="2016-Datacenter-smalldisk" ), - node_agent_sku_id='batch.node.windows amd64', - windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True)), + node_agent_sku_id="batch.node.windows amd64", + windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True), + ), task_scheduling_policy=models.TaskSchedulingPolicy(node_fill_type=models.ComputeNodeFillType.pack), - mount_configuration=[models.MountConfiguration( - azure_blob_file_system_configuration=models.AzureBlobFileSystemConfiguration( - account_name='test', - container_name='https://test.blob.core.windows.net:443/test-container', - relative_mount_path='foo', - account_key='fake_account_key' + mount_configuration=[ + models.MountConfiguration( + azure_blob_file_system_configuration=models.AzureBlobFileSystemConfiguration( + account_name="test", + container_name="https://test.blob.core.windows.net:443/test-container", + relative_mount_path="foo", + account_key="fake_account_key", + ) ) - )] + ], ) response = client.pool.add(test_iaas_pool) assert response is None @@ -464,35 +440,32 @@ def test_batch_update_pools(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test Create Paas Pool test_paas_pool = models.PoolAddParameter( - id=self.get_resource_name('batch_paas_'), + id=self.get_resource_name("batch_paas_"), vm_size=DEFAULT_VM_SIZE, - cloud_service_configuration=models.CloudServiceConfiguration( - os_family='5' - ), + cloud_service_configuration=models.CloudServiceConfiguration(os_family="5"), start_task=models.StartTask( - command_line="cmd.exe /c \"echo hello world\"", - resource_files=[models.ResourceFile(http_url='https://blobsource.com', file_path='filename.txt')], - environment_settings=[models.EnvironmentSetting(name='ENV_VAR', value='env_value')], + command_line='cmd.exe /c "echo hello world"', + resource_files=[models.ResourceFile(http_url="https://blobsource.com", file_path="filename.txt")], + environment_settings=[models.EnvironmentSetting(name="ENV_VAR", value="env_value")], user_identity=models.UserIdentity( - auto_user=models.AutoUserSpecification( - elevation_level=models.ElevationLevel.admin - ) - ) - ) + auto_user=models.AutoUserSpecification(elevation_level=models.ElevationLevel.admin) + ), + ), ) response = client.pool.add(test_paas_pool) assert response is None # Test Update Pool Parameters params = models.PoolUpdatePropertiesParameter( - certificate_references=[], - application_package_references=[], - metadata=[models.MetadataItem(name='foo', value='bar')]) + certificate_references=[], + application_package_references=[], + metadata=[models.MetadataItem(name="foo", value="bar")], + ) response = client.pool.update_properties(test_paas_pool.id, params) assert response is None # Test Patch Pool Parameters - params = models.PoolPatchParameter(metadata=[models.MetadataItem(name='foo2', value='bar2')]) + params = models.PoolPatchParameter(metadata=[models.MetadataItem(name="foo2", value="bar2")]) response = client.pool.patch(test_paas_pool.id, params) assert response is None @@ -506,14 +479,14 @@ def test_batch_update_pools(self, **kwargs): assert pool.id == test_paas_pool.id assert pool.state == models.PoolState.active assert pool.allocation_state == models.AllocationState.steady - assert pool.cloud_service_configuration.os_family == '5' + assert pool.cloud_service_configuration.os_family == "5" assert pool.vm_size == DEFAULT_VM_SIZE assert pool.start_task is None - assert pool.metadata[0].name == 'foo2' - assert pool.metadata[0].value == 'bar2' + assert pool.metadata[0].name == "foo2" + assert pool.metadata[0].value == "bar2" # Test Get Pool with OData Clauses - options = models.PoolGetOptions(select='id,state', expand='stats') + options = models.PoolGetOptions(select="id,state", expand="stats") pool = client.pool.get(test_paas_pool.id, options) assert isinstance(pool, models.CloudPool) assert pool.id == test_paas_pool.id @@ -531,24 +504,23 @@ def test_batch_update_pools(self, **kwargs): @PoolPreparer(location=AZURE_LOCATION) def test_batch_scale_pools(self, **kwargs): - time.sleep(10) # temporary fix for timeout issue - + time.sleep(10) # temporary fix for timeout issue + batch_pool = kwargs.pop("batch_pool") client = self.create_sharedkey_client(**kwargs) # Test Enable Autoscale interval = datetime.timedelta(minutes=6) response = client.pool.enable_auto_scale( - batch_pool.name, - auto_scale_formula='$TargetDedicatedNodes=2', - auto_scale_evaluation_interval=interval) + batch_pool.name, auto_scale_formula="$TargetDedicatedNodes=2", auto_scale_evaluation_interval=interval + ) assert response is None # Test Evaluate Autoscale - result = client.pool.evaluate_auto_scale(batch_pool.name, '$TargetDedicatedNodes=3') + result = client.pool.evaluate_auto_scale(batch_pool.name, "$TargetDedicatedNodes=3") assert isinstance(result, models.AutoScaleRun) - assert result.results == '$TargetDedicatedNodes=3;$TargetLowPriorityNodes=0;$NodeDeallocationOption=requeue' - + assert result.results == "$TargetDedicatedNodes=3;$TargetLowPriorityNodes=0;$NodeDeallocationOption=requeue" + # Test Disable Autoscale pool = client.pool.get(batch_pool.name) while self.is_live and pool.allocation_state != models.AllocationState.steady: @@ -584,21 +556,16 @@ def test_batch_scale_pools(self, **kwargs): def test_batch_job_schedules(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test Create Job Schedule - schedule_id = self.get_resource_name('batch_schedule_') + schedule_id = self.get_resource_name("batch_schedule_") job_spec = models.JobSpecification( pool_info=models.PoolInformation(pool_id="pool_id"), constraints=models.JobConstraints(max_task_retry_count=2), - on_all_tasks_complete=models.OnAllTasksComplete.terminate_job + on_all_tasks_complete=models.OnAllTasksComplete.terminate_job, ) schedule = models.Schedule( - start_window=datetime.timedelta(hours=1), - recurrence_interval=datetime.timedelta(days=1) - ) - params = models.JobScheduleAddParameter( - id=schedule_id, - schedule=schedule, - job_specification=job_spec + start_window=datetime.timedelta(hours=1), recurrence_interval=datetime.timedelta(days=1) ) + params = models.JobScheduleAddParameter(id=schedule_id, schedule=schedule, job_specification=job_spec) response = client.job_schedule.add(params) assert response is None @@ -629,20 +596,14 @@ def test_batch_job_schedules(self, **kwargs): assert response is None # Test Update Job Schedule - job_spec = models.JobSpecification( - pool_info=models.PoolInformation(pool_id='pool_id') - ) - schedule = models.Schedule( - recurrence_interval=datetime.timedelta(hours=10) - ) + job_spec = models.JobSpecification(pool_info=models.PoolInformation(pool_id="pool_id")) + schedule = models.Schedule(recurrence_interval=datetime.timedelta(hours=10)) params = models.JobScheduleUpdateParameter(schedule=schedule, job_specification=job_spec) response = client.job_schedule.update(schedule_id, params) assert response is None # Test Patch Job Schedule - schedule = models.Schedule( - recurrence_interval=datetime.timedelta(hours=5) - ) + schedule = models.Schedule(recurrence_interval=datetime.timedelta(hours=5)) params = models.JobSchedulePatchParameter(schedule=schedule) response = client.job_schedule.patch(schedule_id, params) assert response is None @@ -674,26 +635,23 @@ def test_batch_network_configuration(self, **kwargs): models.NetworkSecurityGroupRule( priority=150, access=models.NetworkSecurityGroupRuleAccess.allow, - source_address_prefix='*' + source_address_prefix="*", ) - ] + ], ) ] ) ) virtual_machine_config = models.VirtualMachineConfiguration( node_agent_sku_id="batch.node.ubuntu 18.04", - image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", - sku="18.04-LTS") + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), ) pool = models.PoolAddParameter( - id=self.get_resource_name('batch_network_'), + id=self.get_resource_name("batch_network_"), target_dedicated_nodes=1, vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=virtual_machine_config, - network_configuration=network_config + network_configuration=network_config, ) client.pool.add(pool) @@ -707,14 +665,14 @@ def test_batch_network_configuration(self, **kwargs): assert len(nodes) == 1 assert isinstance(nodes[0], models.ComputeNode) assert len(nodes[0].endpoint_configuration.inbound_endpoints) == 2 - assert nodes[0].endpoint_configuration.inbound_endpoints[0].name == 'TestEndpointConfig.0' - assert nodes[0].endpoint_configuration.inbound_endpoints[0].protocol.value == 'udp' + assert nodes[0].endpoint_configuration.inbound_endpoints[0].name == "TestEndpointConfig.0" + assert nodes[0].endpoint_configuration.inbound_endpoints[0].protocol.value == "udp" @pytest.mark.live_test_only("Can't use recordings until tests use the test proxy") @ResourceGroupPreparer(location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) def test_batch_network_configuration_acceleratednetworking(self, **kwargs): - client = self.create_sharedkey_client(**kwargs) + client = self.create_sharedkey_client(**kwargs) # Test Network enableAcceleratedNetworking configuration network_config = models.NetworkConfiguration(enable_accelerated_networking=True) virtual_machine_config = models.VirtualMachineConfiguration( @@ -723,14 +681,15 @@ def test_batch_network_configuration_acceleratednetworking(self, **kwargs): publisher="MicrosoftWindowsServer", offer="WindowsServer", sku="2016-datacenter-smalldisk", - version="latest") + version="latest", + ), ) pool = models.PoolAddParameter( - id=self.get_resource_name('batch_network_acceleratednetworking_'), + id=self.get_resource_name("batch_network_acceleratednetworking_"), target_dedicated_nodes=2, vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=virtual_machine_config, - network_configuration=network_config + network_configuration=network_config, ) client.pool.add(pool) @@ -744,7 +703,7 @@ def test_batch_network_configuration_acceleratednetworking(self, **kwargs): @pytest.mark.live_test_only("Can't use recordings until tests use the test proxy") @ResourceGroupPreparer(location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) - @PoolPreparer(location=AZURE_LOCATION, size=2, config='iaas') + @PoolPreparer(location=AZURE_LOCATION, size=2, config="iaas") def test_batch_compute_nodes(self, **kwargs): batch_pool = kwargs.pop("batch_pool") client = self.create_sharedkey_client(**kwargs) @@ -766,8 +725,9 @@ def test_batch_compute_nodes(self, **kwargs): # Test Upload Log config = models.UploadBatchServiceLogsConfiguration( - container_url = "https://computecontainer.blob.core.windows.net/", - start_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=6)) + container_url="https://computecontainer.blob.core.windows.net/", + start_time=datetime.datetime.utcnow() - datetime.timedelta(minutes=6), + ) result = client.compute_node.upload_batch_service_logs(batch_pool.name, nodes[0].id, config) assert result is not None assert result.number_of_files_uploaded > 0 @@ -783,15 +743,18 @@ def test_batch_compute_nodes(self, **kwargs): # Test Reboot Node response = client.compute_node.reboot( - batch_pool.name, nodes[0].id, node_reboot_option=models.ComputeNodeRebootOption.terminate) + batch_pool.name, nodes[0].id, node_reboot_option=models.ComputeNodeRebootOption.terminate + ) assert response is None # Test Reimage Node - self.assertBatchError('OperationNotValidOnNode', - client.compute_node.reimage, - batch_pool.name, - nodes[1].id, - node_reimage_option=models.ComputeNodeReimageOption.terminate) + self.assertBatchError( + "OperationNotValidOnNode", + client.compute_node.reimage, + batch_pool.name, + nodes[1].id, + node_reimage_option=models.ComputeNodeReimageOption.terminate, + ) # Test Remove Nodes options = models.NodeRemoveParameter(node_list=[n.id for n in nodes]) @@ -812,14 +775,14 @@ def test_batch_compute_node_user(self, **kwargs): assert len(nodes) == 1 # Test Add User - user_name = 'BatchPythonSDKUser' + user_name = "BatchPythonSDKUser" nodes = list(client.compute_node.list(batch_pool.name)) user = models.ComputeNodeUser(name=user_name, password=BATCH_TEST_PASSWORD, is_admin=False) response = client.compute_node.add_user(batch_pool.name, nodes[0].id, user) assert response is None # Test Update User - user = models.NodeUpdateUserParameter(password='liilef#$DdRGSa_ewkjh') + user = models.NodeUpdateUserParameter(password="liilef#$DdRGSa_ewkjh") response = client.compute_node.update_user(batch_pool.name, nodes[0].id, user_name, user) assert response is None @@ -835,8 +798,8 @@ def test_batch_compute_node_user(self, **kwargs): @pytest.mark.live_test_only("Can't use recordings until tests use the test proxy") @ResourceGroupPreparer(location=AZURE_LOCATION) - @StorageAccountPreparer(name_prefix='batch4', location=AZURE_LOCATION) - @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT, name_prefix='batch4') + @StorageAccountPreparer(name_prefix="batch4", location=AZURE_LOCATION) + @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT, name_prefix="batch4") @PoolPreparer(size=1) @JobPreparer() def test_batch_files(self, **kwargs): @@ -849,7 +812,7 @@ def test_batch_files(self, **kwargs): nodes = list(client.compute_node.list(batch_pool.name)) assert len(nodes) == 1 node = nodes[0].id - task_id = 'test_task' + task_id = "test_task" task_param = models.TaskAddParameter(id=task_id, command_line='cmd /c "echo hello world"') response = client.task.add(batch_job.id, task_param) assert response is None @@ -864,10 +827,9 @@ def test_batch_files(self, **kwargs): assert len(only_files) >= 2 # Test File Properties from Compute Node - props = client.file.get_properties_from_compute_node( - batch_pool.name, node, only_files[0].name, raw=True) - assert 'Content-Length' in props.headers - assert 'Content-Type' in props.headers + props = client.file.get_properties_from_compute_node(batch_pool.name, node, only_files[0].name, raw=True) + assert "Content-Length" in props.headers + assert "Content-Type" in props.headers # Test Get File from Compute Node file_length = 0 @@ -875,7 +837,7 @@ def test_batch_files(self, **kwargs): response = client.file.get_from_compute_node(batch_pool.name, node, only_files[0].name) for data in response: file_length += len(data) - assert file_length == props.headers['Content-Length'] + assert file_length == props.headers["Content-Length"] # Test Delete File from Compute Node response = client.file.delete_from_compute_node(batch_pool.name, node, only_files[0].name) @@ -887,10 +849,9 @@ def test_batch_files(self, **kwargs): assert len(only_files) >= 1 # Test File Properties from Task - props = client.file.get_properties_from_task( - batch_job.id, task_id, only_files[0].name, raw=True) - assert 'Content-Length' in props.headers - assert 'Content-Type' in props.headers + props = client.file.get_properties_from_task(batch_job.id, task_id, only_files[0].name, raw=True) + assert "Content-Length" in props.headers + assert "Content-Type" in props.headers # Test Get File from Task file_length = 0 @@ -898,7 +859,7 @@ def test_batch_files(self, **kwargs): response = client.file.get_from_task(batch_job.id, task_id, only_files[0].name) for data in response: file_length += len(data) - assert file_length == props.headers['Content-Length'] + assert file_length == props.headers["Content-Length"] # Test Delete File from Task response = client.file.delete_from_task(batch_job.id, task_id, only_files[0].name) @@ -914,13 +875,20 @@ def test_batch_tasks(self, **kwargs): # Test Create Task with Auto Complete exit_conditions = models.ExitConditions( - exit_codes=[models.ExitCodeMapping(code=1, exit_options=models.ExitOptions(job_action=models.JobAction.terminate))], - exit_code_ranges=[models.ExitCodeRangeMapping(start=2, end=4, exit_options=models.ExitOptions(job_action=models.JobAction.disable))], - default=models.ExitOptions(job_action=models.JobAction.none)) + exit_codes=[ + models.ExitCodeMapping(code=1, exit_options=models.ExitOptions(job_action=models.JobAction.terminate)) + ], + exit_code_ranges=[ + models.ExitCodeRangeMapping( + start=2, end=4, exit_options=models.ExitOptions(job_action=models.JobAction.disable) + ) + ], + default=models.ExitOptions(job_action=models.JobAction.none), + ) task_param = models.TaskAddParameter( - id=self.get_resource_name('batch_task1_'), + id=self.get_resource_name("batch_task1_"), command_line='cmd /c "echo hello world"', - exit_conditions=exit_conditions + exit_conditions=exit_conditions, ) try: client.task.add(batch_job.id, task_param) @@ -942,21 +910,27 @@ def test_batch_tasks(self, **kwargs): file_pattern="../stdout.txt", destination=models.OutputFileDestination( container=models.OutputFileBlobContainerDestination( - container_url=container_url, path="taskLogs/output.txt")), + container_url=container_url, path="taskLogs/output.txt" + ) + ), upload_options=models.OutputFileUploadOptions( - upload_condition=models.OutputFileUploadCondition.task_completion)), + upload_condition=models.OutputFileUploadCondition.task_completion + ), + ), models.OutputFile( file_pattern="../stderr.txt", destination=models.OutputFileDestination( container=models.OutputFileBlobContainerDestination( - container_url=container_url, path="taskLogs/error.txt")), + container_url=container_url, path="taskLogs/error.txt" + ) + ), upload_options=models.OutputFileUploadOptions( - upload_condition=models.OutputFileUploadCondition.task_failure)), + upload_condition=models.OutputFileUploadCondition.task_failure + ), + ), ] task_param = models.TaskAddParameter( - id=self.get_resource_name('batch_task2_'), - command_line='cmd /c "echo hello world"', - output_files=outputs + id=self.get_resource_name("batch_task2_"), command_line='cmd /c "echo hello world"', output_files=outputs ) client.task.add(batch_job.id, task_param) task = client.task.get(batch_job.id, task_param.id) @@ -965,12 +939,12 @@ def test_batch_tasks(self, **kwargs): # Test Create Task with Auto User auto_user = models.AutoUserSpecification( - scope=models.AutoUserScope.task, - elevation_level=models.ElevationLevel.admin) + scope=models.AutoUserScope.task, elevation_level=models.ElevationLevel.admin + ) task_param = models.TaskAddParameter( - id=self.get_resource_name('batch_task3_'), + id=self.get_resource_name("batch_task3_"), command_line='cmd /c "echo hello world"', - user_identity=models.UserIdentity(auto_user=auto_user) + user_identity=models.UserIdentity(auto_user=auto_user), ) client.task.add(batch_job.id, task_param) task = client.task.get(batch_job.id, task_param.id) @@ -980,10 +954,9 @@ def test_batch_tasks(self, **kwargs): # Test Create Task with Token Settings task_param = models.TaskAddParameter( - id=self.get_resource_name('batch_task4_'), + id=self.get_resource_name("batch_task4_"), command_line='cmd /c "echo hello world"', - authentication_token_settings=models.AuthenticationTokenSettings( - access=[models.AccessScope.job]) + authentication_token_settings=models.AuthenticationTokenSettings(access=[models.AccessScope.job]), ) client.task.add(batch_job.id, task_param) task = client.task.get(batch_job.id, task_param.id) @@ -992,35 +965,38 @@ def test_batch_tasks(self, **kwargs): # Test Create Task with Container Settings task_param = models.TaskAddParameter( - id=self.get_resource_name('batch_task5_'), + id=self.get_resource_name("batch_task5_"), command_line='cmd /c "echo hello world"', container_settings=models.TaskContainerSettings( - image_name='windows_container:latest', - registry=models.ContainerRegistry(user_name='username', password='password')) + image_name="windows_container:latest", + registry=models.ContainerRegistry(user_name="username", password="password"), + ), ) client.task.add(batch_job.id, task_param) task = client.task.get(batch_job.id, task_param.id) assert isinstance(task, models.CloudTask) - assert task.container_settings.image_name == 'windows_container:latest' - assert task.container_settings.registry.user_name == 'username' + assert task.container_settings.image_name == "windows_container:latest" + assert task.container_settings.registry.user_name == "username" # Test Create Task with Run-As-User task_param = models.TaskAddParameter( - id=self.get_resource_name('batch_task6_'), + id=self.get_resource_name("batch_task6_"), command_line='cmd /c "echo hello world"', - user_identity=models.UserIdentity(user_name='task-user') + user_identity=models.UserIdentity(user_name="task-user"), ) client.task.add(batch_job.id, task_param) task = client.task.get(batch_job.id, task_param.id) assert isinstance(task, models.CloudTask) - assert task.user_identity.user_name == 'task-user' + assert task.user_identity.user_name == "task-user" # Test Add Task Collection tasks = [] for i in range(7, 10): - tasks.append(models.TaskAddParameter( - id=self.get_resource_name('batch_task{}_'.format(i)), - command_line='cmd /c "echo hello world"')) + tasks.append( + models.TaskAddParameter( + id=self.get_resource_name("batch_task{}_".format(i)), command_line='cmd /c "echo hello world"' + ) + ) result = client.task.add_collection(batch_job.id, tasks) assert isinstance(result, models.TaskAddCollectionResult) assert len(result.value) == 3 @@ -1050,8 +1026,8 @@ def test_batch_tasks(self, **kwargs): # Test Update Task response = client.task.update( - batch_job.id, task_param.id, - constraints=models.TaskConstraints(max_task_retry_count=1)) + batch_job.id, task_param.id, constraints=models.TaskConstraints(max_task_retry_count=1) + ) assert response is None # Test Get Subtasks @@ -1071,24 +1047,15 @@ def test_batch_tasks(self, **kwargs): for i in range(10000): resource_file = models.ResourceFile( http_url="https://mystorageaccount.blob.core.windows.net/files/resourceFile{}".format(str(i)), - file_path="resourceFile{}".format(str(i))) + file_path="resourceFile{}".format(str(i)), + ) resource_files.append(resource_file) - task = models.TaskAddParameter( - id=task_id, - command_line="sleep 1", - resource_files=resource_files) + task = models.TaskAddParameter(id=task_id, command_line="sleep 1", resource_files=resource_files) tasks_to_add.append(task) + self.assertCreateTasksError("RequestBodyTooLarge", client.task.add_collection, batch_job.id, tasks_to_add) self.assertCreateTasksError( - "RequestBodyTooLarge", - client.task.add_collection, - batch_job.id, - tasks_to_add) - self.assertCreateTasksError( - "RequestBodyTooLarge", - client.task.add_collection, - batch_job.id, - tasks_to_add, - threads=3) + "RequestBodyTooLarge", client.task.add_collection, batch_job.id, tasks_to_add, threads=3 + ) # Test Bulk Add Task Success task_id = "mytask" @@ -1097,13 +1064,11 @@ def test_batch_tasks(self, **kwargs): for i in range(100): resource_file = models.ResourceFile( http_url="https://mystorageaccount.blob.core.windows.net/files/resourceFile" + str(i), - file_path="resourceFile"+str(i)) + file_path="resourceFile" + str(i), + ) resource_files.append(resource_file) for i in range(733): - task = models.TaskAddParameter( - id=task_id + str(i), - command_line="sleep 1", - resource_files=resource_files) + task = models.TaskAddParameter(id=task_id + str(i), command_line="sleep 1", resource_files=resource_files) tasks_to_add.append(task) result = client.task.add_collection(batch_job.id, tasks_to_add) assert isinstance(result, models.TaskAddCollectionResult) @@ -1120,21 +1085,16 @@ def test_batch_jobs(self, **kwargs): auto_pool = models.AutoPoolSpecification( pool_lifetime_option=models.PoolLifetimeOption.job, pool=models.PoolSpecification( - vm_size=DEFAULT_VM_SIZE, - cloud_service_configuration=models.CloudServiceConfiguration( - os_family='5' - ) - ) + vm_size=DEFAULT_VM_SIZE, cloud_service_configuration=models.CloudServiceConfiguration(os_family="5") + ), ) - job_prep = models.JobPreparationTask(command_line="cmd /c \"echo hello world\"") - job_release = models.JobReleaseTask(command_line="cmd /c \"echo goodbye world\"") + job_prep = models.JobPreparationTask(command_line='cmd /c "echo hello world"') + job_release = models.JobReleaseTask(command_line='cmd /c "echo goodbye world"') job_param = models.JobAddParameter( - id=self.get_resource_name('batch_job1_'), - pool_info=models.PoolInformation( - auto_pool_specification=auto_pool - ), + id=self.get_resource_name("batch_job1_"), + pool_info=models.PoolInformation(auto_pool_specification=auto_pool), job_preparation_task=job_prep, - job_release_task=job_release + job_release_task=job_release, ) response = client.job.add(job_param) assert response is None @@ -1142,11 +1102,7 @@ def test_batch_jobs(self, **kwargs): # Test Update Job constraints = models.JobConstraints(max_task_retry_count=3) options = models.JobUpdateParameter( - priority=500, - constraints=constraints, - pool_info=models.PoolInformation( - auto_pool_specification=auto_pool - ) + priority=500, constraints=constraints, pool_info=models.PoolInformation(auto_pool_specification=auto_pool) ) response = client.job.update(job_param.id, options) assert response is None @@ -1164,12 +1120,10 @@ def test_batch_jobs(self, **kwargs): # Test Create Job with Auto Complete job_auto_param = models.JobAddParameter( - id=self.get_resource_name('batch_job2_'), + id=self.get_resource_name("batch_job2_"), on_all_tasks_complete=models.OnAllTasksComplete.terminate_job, on_task_failure=models.OnTaskFailure.perform_exit_options_job_action, - pool_info=models.PoolInformation( - auto_pool_specification=auto_pool - ) + pool_info=models.PoolInformation(auto_pool_specification=auto_pool), ) response = client.job.add(job_auto_param) assert response is None diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml new file mode 100644 index 000000000000..aa53731faad3 --- /dev/null +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/batch/Azure.Batch +commit: e8136c11848f05e79597bab310539c506b4af9df +repo: test-repo-billy/azure-rest-api-specs +additionalDirectories: