# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
[docs]class AccountListPoolNodeCountsOptions(Model):
"""Additional parameters for list_pool_node_counts operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch.
:type filter: str
:param max_results: The maximum number of items to return in the response.
Default value: 10 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, max_results: int=10, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs)
self.filter = filter
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class AccountListSupportedImagesOptions(Model):
"""Additional parameters for list_supported_images operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images.
:type filter: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 results will be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(AccountListSupportedImagesOptions, self).__init__(**kwargs)
self.filter = filter
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ApplicationGetOptions(Model):
"""Additional parameters for get operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ApplicationGetOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ApplicationListOptions(Model):
"""Additional parameters for list operation.
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 applications can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ApplicationListOptions, self).__init__(**kwargs)
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ApplicationPackageReference(Model):
"""A reference to an Package to be deployed to Compute Nodes.
All required parameters must be populated in order to send to Azure.
:param application_id: Required. The ID of the application to deploy.
:type application_id: str
:param version: The version of the application to deploy. If omitted, the
default version is deployed. If this is omitted on a Pool, and no default
version is specified for this application, the request fails with the
error code InvalidApplicationPackageReferences and HTTP status code 409.
If this is omitted on a Task, and no default version is specified for this
application, the Task fails with a pre-processing error.
:type version: str
"""
_validation = {
'application_id': {'required': True},
}
_attribute_map = {
'application_id': {'key': 'applicationId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, *, application_id: str, version: str=None, **kwargs) -> None:
super(ApplicationPackageReference, self).__init__(**kwargs)
self.application_id = application_id
self.version = version
[docs]class ApplicationSummary(Model):
"""Contains information about an application in an Azure Batch Account.
All required parameters must be populated in order to send to Azure.
:param id: Required. A string that uniquely identifies the application
within the Account.
:type id: str
:param display_name: Required. The display name for the application.
:type display_name: str
:param versions: Required. The list of available versions of the
application.
:type versions: list[str]
"""
_validation = {
'id': {'required': True},
'display_name': {'required': True},
'versions': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'versions': {'key': 'versions', 'type': '[str]'},
}
def __init__(self, *, id: str, display_name: str, versions, **kwargs) -> None:
super(ApplicationSummary, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.versions = versions
[docs]class AuthenticationTokenSettings(Model):
"""The settings for an authentication token that the Task can use to perform
Batch service operations.
:param access: The Batch resources to which the token grants access. The
authentication token grants access to a limited set of Batch service
operations. Currently the only supported value for the access property is
'job', which grants access to all operations related to the Job which
contains the Task.
:type access: list[str or ~azure.batch.models.AccessScope]
"""
_attribute_map = {
'access': {'key': 'access', 'type': '[AccessScope]'},
}
def __init__(self, *, access=None, **kwargs) -> None:
super(AuthenticationTokenSettings, self).__init__(**kwargs)
self.access = access
[docs]class AutoPoolSpecification(Model):
"""Specifies characteristics for a temporary 'auto pool'. The Batch service
will create this auto Pool when the Job is submitted.
All required parameters must be populated in order to send to Azure.
:param auto_pool_id_prefix: A prefix to be added to the unique identifier
when a Pool is automatically created. The Batch service assigns each auto
Pool a unique identifier on creation. To distinguish between Pools created
for different purposes, you can specify this element to add a prefix to
the ID that is assigned. The prefix can be up to 20 characters long.
:type auto_pool_id_prefix: str
:param pool_lifetime_option: Required. The minimum lifetime of created
auto Pools, and how multiple Jobs on a schedule are assigned to Pools.
Possible values include: 'jobSchedule', 'job'
:type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption
:param keep_alive: Whether to keep an auto Pool alive after its lifetime
expires. If false, the Batch service deletes the Pool once its lifetime
(as determined by the poolLifetimeOption setting) expires; that is, when
the Job or Job Schedule completes. If true, the Batch service does not
delete the Pool automatically. It is up to the user to delete auto Pools
created with this option.
:type keep_alive: bool
:param pool: The Pool specification for the auto Pool.
:type pool: ~azure.batch.models.PoolSpecification
"""
_validation = {
'pool_lifetime_option': {'required': True},
}
_attribute_map = {
'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'},
'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'},
'keep_alive': {'key': 'keepAlive', 'type': 'bool'},
'pool': {'key': 'pool', 'type': 'PoolSpecification'},
}
def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None, keep_alive: bool=None, pool=None, **kwargs) -> None:
super(AutoPoolSpecification, self).__init__(**kwargs)
self.auto_pool_id_prefix = auto_pool_id_prefix
self.pool_lifetime_option = pool_lifetime_option
self.keep_alive = keep_alive
self.pool = pool
[docs]class AutoScaleRun(Model):
"""The results and errors from an execution of a Pool autoscale formula.
All required parameters must be populated in order to send to Azure.
:param timestamp: Required. The time at which the autoscale formula was
last evaluated.
:type timestamp: datetime
:param results: The final values of all variables used in the evaluation
of the autoscale formula. Each variable value is returned in the form
$variable=value, and variables are separated by semicolons.
:type results: str
:param error: Details of the error encountered evaluating the autoscale
formula on the Pool, if the evaluation was unsuccessful.
:type error: ~azure.batch.models.AutoScaleRunError
"""
_validation = {
'timestamp': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'results': {'key': 'results', 'type': 'str'},
'error': {'key': 'error', 'type': 'AutoScaleRunError'},
}
def __init__(self, *, timestamp, results: str=None, error=None, **kwargs) -> None:
super(AutoScaleRun, self).__init__(**kwargs)
self.timestamp = timestamp
self.results = results
self.error = error
[docs]class AutoScaleRunError(Model):
"""An error that occurred when executing or evaluating a Pool autoscale
formula.
:param code: An identifier for the autoscale error. Codes are invariant
and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the autoscale error, intended to be
suitable for display in a user interface.
:type message: str
:param values: A list of additional error details related to the autoscale
error.
:type values: list[~azure.batch.models.NameValuePair]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'values': {'key': 'values', 'type': '[NameValuePair]'},
}
def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None:
super(AutoScaleRunError, self).__init__(**kwargs)
self.code = code
self.message = message
self.values = values
[docs]class AutoUserSpecification(Model):
"""Specifies the parameters for the auto user that runs a Task on the Batch
service.
:param scope: The scope for the auto user. The default value is pool. If
the pool is running Windows a value of Task should be specified if
stricter isolation between tasks is required. For example, if the task
mutates the registry in a way which could impact other tasks, or if
certificates have been specified on the pool which should not be
accessible by normal tasks but should be accessible by StartTasks.
Possible values include: 'task', 'pool'
:type scope: str or ~azure.batch.models.AutoUserScope
:param elevation_level: The elevation level of the auto user. The default
value is nonAdmin. Possible values include: 'nonAdmin', 'admin'
:type elevation_level: str or ~azure.batch.models.ElevationLevel
"""
_attribute_map = {
'scope': {'key': 'scope', 'type': 'AutoUserScope'},
'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'},
}
def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None:
super(AutoUserSpecification, self).__init__(**kwargs)
self.scope = scope
self.elevation_level = elevation_level
[docs]class AzureBlobFileSystemConfiguration(Model):
"""Information used to connect to an Azure Storage Container using Blobfuse.
All required parameters must be populated in order to send to Azure.
:param account_name: Required. The Azure Storage Account name.
:type account_name: str
:param container_name: Required. The Azure Blob Storage Container name.
:type container_name: str
:param account_key: The Azure Storage Account key. This property is
mutually exclusive with sasKey and one must be specified.
:type account_key: str
:param sas_key: The Azure Storage SAS token. This property is mutually
exclusive with accountKey and one must be specified.
:type sas_key: str
:param blobfuse_options: Additional command line options to pass to the
mount command. These are 'net use' options in Windows and 'mount' options
in Linux.
:type blobfuse_options: str
:param relative_mount_path: Required. The relative path on the compute
node where the file system will be mounted. All file systems are mounted
relative to the Batch mounts directory, accessible via the
AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:type relative_mount_path: str
"""
_validation = {
'account_name': {'required': True},
'container_name': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'account_key': {'key': 'accountKey', 'type': 'str'},
'sas_key': {'key': 'sasKey', 'type': 'str'},
'blobfuse_options': {'key': 'blobfuseOptions', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
}
def __init__(self, *, account_name: str, container_name: str, relative_mount_path: str, account_key: str=None, sas_key: str=None, blobfuse_options: str=None, **kwargs) -> None:
super(AzureBlobFileSystemConfiguration, self).__init__(**kwargs)
self.account_name = account_name
self.container_name = container_name
self.account_key = account_key
self.sas_key = sas_key
self.blobfuse_options = blobfuse_options
self.relative_mount_path = relative_mount_path
[docs]class AzureFileShareConfiguration(Model):
"""Information used to connect to an Azure Fileshare.
All required parameters must be populated in order to send to Azure.
:param account_name: Required. The Azure Storage account name.
:type account_name: str
:param azure_file_url: Required. The Azure Files URL. This is of the form
'https://{account}.file.core.windows.net/'.
:type azure_file_url: str
:param account_key: Required. The Azure Storage account key.
:type account_key: str
:param relative_mount_path: Required. The relative path on the compute
node where the file system will be mounted. All file systems are mounted
relative to the Batch mounts directory, accessible via the
AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:type relative_mount_path: str
:param mount_options: Additional command line options to pass to the mount
command. These are 'net use' options in Windows and 'mount' options in
Linux.
:type mount_options: str
"""
_validation = {
'account_name': {'required': True},
'azure_file_url': {'required': True},
'account_key': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'azure_file_url': {'key': 'azureFileUrl', 'type': 'str'},
'account_key': {'key': 'accountKey', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
'mount_options': {'key': 'mountOptions', 'type': 'str'},
}
def __init__(self, *, account_name: str, azure_file_url: str, account_key: str, relative_mount_path: str, mount_options: str=None, **kwargs) -> None:
super(AzureFileShareConfiguration, self).__init__(**kwargs)
self.account_name = account_name
self.azure_file_url = azure_file_url
self.account_key = account_key
self.relative_mount_path = relative_mount_path
self.mount_options = mount_options
[docs]class BatchError(Model):
"""An error response received from the Azure Batch service.
:param code: An identifier for the error. Codes are invariant and are
intended to be consumed programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable
for display in a user interface.
:type message: ~azure.batch.models.ErrorMessage
:param values: A collection of key-value pairs containing additional
details about the error.
:type values: list[~azure.batch.models.BatchErrorDetail]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'ErrorMessage'},
'values': {'key': 'values', 'type': '[BatchErrorDetail]'},
}
def __init__(self, *, code: str=None, message=None, values=None, **kwargs) -> None:
super(BatchError, self).__init__(**kwargs)
self.code = code
self.message = message
self.values = values
[docs]class BatchErrorException(HttpOperationError):
"""Server responsed with exception of type: 'BatchError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args)
[docs]class BatchErrorDetail(Model):
"""An item of additional information included in an Azure Batch error
response.
:param key: An identifier specifying the meaning of the Value property.
:type key: str
:param value: The additional information included with the error response.
:type value: str
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None:
super(BatchErrorDetail, self).__init__(**kwargs)
self.key = key
self.value = value
[docs]class Certificate(Model):
"""A Certificate that can be installed on Compute Nodes and can be used to
authenticate operations on the machine.
:param thumbprint: The X.509 thumbprint of the Certificate. This is a
sequence of up to 40 hex digits.
:type thumbprint: str
:param thumbprint_algorithm: The algorithm used to derive the thumbprint.
:type thumbprint_algorithm: str
:param url: The URL of the Certificate.
:type url: str
:param state: The current state of the Certificate. Possible values
include: 'active', 'deleting', 'deleteFailed'
:type state: str or ~azure.batch.models.CertificateState
:param state_transition_time: The time at which the Certificate entered
its current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the Certificate. This
property is not set if the Certificate is in its initial active state.
Possible values include: 'active', 'deleting', 'deleteFailed'
:type previous_state: str or ~azure.batch.models.CertificateState
:param previous_state_transition_time: The time at which the Certificate
entered its previous state. This property is not set if the Certificate is
in its initial Active state.
:type previous_state_transition_time: datetime
:param public_data: The public part of the Certificate as a base-64
encoded .cer file.
:type public_data: str
:param delete_certificate_error: The error that occurred on the last
attempt to delete this Certificate. This property is set only if the
Certificate is in the DeleteFailed state.
:type delete_certificate_error: ~azure.batch.models.DeleteCertificateError
"""
_attribute_map = {
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'state': {'key': 'state', 'type': 'CertificateState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'CertificateState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'public_data': {'key': 'publicData', 'type': 'str'},
'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'},
}
def __init__(self, *, thumbprint: str=None, thumbprint_algorithm: str=None, url: str=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, public_data: str=None, delete_certificate_error=None, **kwargs) -> None:
super(Certificate, self).__init__(**kwargs)
self.thumbprint = thumbprint
self.thumbprint_algorithm = thumbprint_algorithm
self.url = url
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.public_data = public_data
self.delete_certificate_error = delete_certificate_error
[docs]class CertificateAddOptions(Model):
"""Additional parameters for add operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(CertificateAddOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class CertificateAddParameter(Model):
"""A Certificate that can be installed on Compute Nodes and can be used to
authenticate operations on the machine.
All required parameters must be populated in order to send to Azure.
:param thumbprint: Required. The X.509 thumbprint of the Certificate. This
is a sequence of up to 40 hex digits (it may include spaces but these are
removed).
:type thumbprint: str
:param thumbprint_algorithm: Required. The algorithm used to derive the
thumbprint. This must be sha1.
:type thumbprint_algorithm: str
:param data: Required. The base64-encoded contents of the Certificate. The
maximum size is 10KB.
:type data: str
:param certificate_format: The format of the Certificate data. Possible
values include: 'pfx', 'cer'
:type certificate_format: str or ~azure.batch.models.CertificateFormat
:param password: The password to access the Certificate's private key.
This must be omitted if the Certificate format is cer.
:type password: str
"""
_validation = {
'thumbprint': {'required': True},
'thumbprint_algorithm': {'required': True},
'data': {'required': True},
}
_attribute_map = {
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'},
'data': {'key': 'data', 'type': 'str'},
'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, data: str, certificate_format=None, password: str=None, **kwargs) -> None:
super(CertificateAddParameter, self).__init__(**kwargs)
self.thumbprint = thumbprint
self.thumbprint_algorithm = thumbprint_algorithm
self.data = data
self.certificate_format = certificate_format
self.password = password
[docs]class CertificateCancelDeletionOptions(Model):
"""Additional parameters for cancel_deletion operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(CertificateCancelDeletionOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class CertificateDeleteOptions(Model):
"""Additional parameters for delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(CertificateDeleteOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class CertificateGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(CertificateGetOptions, self).__init__(**kwargs)
self.select = select
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class CertificateListOptions(Model):
"""Additional parameters for list operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Certificates can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(CertificateListOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class CertificateReference(Model):
"""A reference to a Certificate to be installed on Compute Nodes in a Pool.
All required parameters must be populated in order to send to Azure.
:param thumbprint: Required. The thumbprint of the Certificate.
:type thumbprint: str
:param thumbprint_algorithm: Required. The algorithm with which the
thumbprint is associated. This must be sha1.
:type thumbprint_algorithm: str
:param store_location: The location of the Certificate store on the
Compute Node into which to install the Certificate. The default value is
currentuser. This property is applicable only for Pools configured with
Windows Compute Nodes (that is, created with cloudServiceConfiguration, or
with virtualMachineConfiguration using a Windows Image reference). For
Linux Compute Nodes, the Certificates are stored in a directory inside the
Task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this
location. For Certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and Certificates are placed in that directory.
Possible values include: 'currentUser', 'localMachine'
:type store_location: str or ~azure.batch.models.CertificateStoreLocation
:param store_name: The name of the Certificate store on the Compute Node
into which to install the Certificate. This property is applicable only
for Pools configured with Windows Compute Nodes (that is, created with
cloudServiceConfiguration, or with virtualMachineConfiguration using a
Windows Image reference). Common store names include: My, Root, CA, Trust,
Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but
any custom store name can also be used. The default value is My.
:type store_name: str
:param visibility: Which user Accounts on the Compute Node should have
access to the private data of the Certificate. You can specify more than
one visibility in this collection. The default is all Accounts.
:type visibility: list[str or ~azure.batch.models.CertificateVisibility]
"""
_validation = {
'thumbprint': {'required': True},
'thumbprint_algorithm': {'required': True},
}
_attribute_map = {
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'},
'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'},
'store_name': {'key': 'storeName', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'},
}
def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None:
super(CertificateReference, self).__init__(**kwargs)
self.thumbprint = thumbprint
self.thumbprint_algorithm = thumbprint_algorithm
self.store_location = store_location
self.store_name = store_name
self.visibility = visibility
[docs]class CIFSMountConfiguration(Model):
"""Information used to connect to a CIFS file system.
All required parameters must be populated in order to send to Azure.
:param username: Required. The user to use for authentication against the
CIFS file system.
:type username: str
:param source: Required. The URI of the file system to mount.
:type source: str
:param relative_mount_path: Required. The relative path on the compute
node where the file system will be mounted. All file systems are mounted
relative to the Batch mounts directory, accessible via the
AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:type relative_mount_path: str
:param mount_options: Additional command line options to pass to the mount
command. These are 'net use' options in Windows and 'mount' options in
Linux.
:type mount_options: str
:param password: Required. The password to use for authentication against
the CIFS file system.
:type password: str
"""
_validation = {
'username': {'required': True},
'source': {'required': True},
'relative_mount_path': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'username': {'key': 'username', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
'mount_options': {'key': 'mountOptions', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(self, *, username: str, source: str, relative_mount_path: str, password: str, mount_options: str=None, **kwargs) -> None:
super(CIFSMountConfiguration, self).__init__(**kwargs)
self.username = username
self.source = source
self.relative_mount_path = relative_mount_path
self.mount_options = mount_options
self.password = password
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
[docs]class CloudJob(Model):
"""An Azure Batch Job.
:param id: A string that uniquely identifies the Job within the Account.
The ID is case-preserving and case-insensitive (that is, you may not have
two IDs within an Account that differ only by case).
:type id: str
:param display_name: The display name for the Job.
:type display_name: str
:param uses_task_dependencies: Whether Tasks in the Job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param url: The URL of the Job.
:type url: str
:param e_tag: The ETag of the Job. This is an opaque string. You can use
it to detect whether the Job has changed between requests. In particular,
you can be pass the ETag when updating a Job to specify that your changes
should take effect only if nobody else has modified the Job in the
meantime.
:type e_tag: str
:param last_modified: The last modified time of the Job. This is the last
time at which the Job level data, such as the Job state or priority,
changed. It does not factor in task-level changes such as adding new Tasks
or Tasks changing state.
:type last_modified: datetime
:param creation_time: The creation time of the Job.
:type creation_time: datetime
:param state: The current state of the Job. Possible values include:
'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed',
'deleting'
:type state: str or ~azure.batch.models.JobState
:param state_transition_time: The time at which the Job entered its
current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the Job. This property is not
set if the Job is in its initial Active state. Possible values include:
'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed',
'deleting'
:type previous_state: str or ~azure.batch.models.JobState
:param previous_state_transition_time: The time at which the Job entered
its previous state. This property is not set if the Job is in its initial
Active state.
:type previous_state_transition_time: datetime
:param priority: The priority of the Job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. The default value is 0.
:type priority: int
:param constraints: The execution constraints for the Job.
:type constraints: ~azure.batch.models.JobConstraints
:param job_manager_task: Details of a Job Manager Task to be launched when
the Job is started.
:type job_manager_task: ~azure.batch.models.JobManagerTask
:param job_preparation_task: The Job Preparation Task. The Job Preparation
Task is a special Task run on each Compute Node before any other Task of
the Job.
:type job_preparation_task: ~azure.batch.models.JobPreparationTask
:param job_release_task: The Job Release Task. The Job Release Task is a
special Task run at the end of the Job on each Compute Node that has run
any other Task of the Job.
:type job_release_task: ~azure.batch.models.JobReleaseTask
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all Tasks in
the Job (including the Job Manager, Job Preparation and Job Release
Tasks). Individual Tasks can override an environment setting specified
here by specifying the same setting name with a different value.
:type common_environment_settings:
list[~azure.batch.models.EnvironmentSetting]
:param pool_info: The Pool settings associated with the Job.
:type pool_info: ~azure.batch.models.PoolInformation
:param on_all_tasks_complete: The action the Batch service should take
when all Tasks in the Job are in the completed state. The default is
noaction. Possible values include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
:param on_task_failure: The action the Batch service should take when any
Task in the Job fails. A Task is considered to have failed if has a
failureInfo. A failureInfo is set if the Task completes with a non-zero
exit code after exhausting its retry count, or if there was an error
starting the Task, for example due to a resource file download error. The
default is noaction. Possible values include: 'noAction',
'performExitOptionsJobAction'
:type on_task_failure: str or ~azure.batch.models.OnTaskFailure
:param network_configuration: The network configuration for the Job.
:type network_configuration: ~azure.batch.models.JobNetworkConfiguration
:param metadata: A list of name-value pairs associated with the Job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param execution_info: The execution information for the Job.
:type execution_info: ~azure.batch.models.JobExecutionInformation
:param stats: Resource usage statistics for the entire lifetime of the
Job. This property is populated only if the CloudJob was retrieved with an
expand clause including the 'stats' attribute; otherwise it is null. The
statistics may not be immediately available. The Batch service performs
periodic roll-up of statistics. The typical delay is about 30 minutes.
:type stats: ~azure.batch.models.JobStatistics
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'JobState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'},
'stats': {'key': 'stats', 'type': 'JobStatistics'},
}
def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None:
super(CloudJob, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.uses_task_dependencies = uses_task_dependencies
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.priority = priority
self.constraints = constraints
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.pool_info = pool_info
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.network_configuration = network_configuration
self.metadata = metadata
self.execution_info = execution_info
self.stats = stats
[docs]class CloudJobSchedule(Model):
"""A Job Schedule that allows recurring Jobs by specifying when to run Jobs
and a specification used to create each Job.
:param id: A string that uniquely identifies the schedule within the
Account.
:type id: str
:param display_name: The display name for the schedule.
:type display_name: str
:param url: The URL of the Job Schedule.
:type url: str
:param e_tag: The ETag of the Job Schedule. This is an opaque string. You
can use it to detect whether the Job Schedule has changed between
requests. In particular, you can be pass the ETag with an Update Job
Schedule request to specify that your changes should take effect only if
nobody else has modified the schedule in the meantime.
:type e_tag: str
:param last_modified: The last modified time of the Job Schedule. This is
the last time at which the schedule level data, such as the Job
specification or recurrence information, changed. It does not factor in
job-level changes such as new Jobs being created or Jobs changing state.
:type last_modified: datetime
:param creation_time: The creation time of the Job Schedule.
:type creation_time: datetime
:param state: The current state of the Job Schedule. Possible values
include: 'active', 'completed', 'disabled', 'terminating', 'deleting'
:type state: str or ~azure.batch.models.JobScheduleState
:param state_transition_time: The time at which the Job Schedule entered
the current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the Job Schedule. This
property is not present if the Job Schedule is in its initial active
state. Possible values include: 'active', 'completed', 'disabled',
'terminating', 'deleting'
:type previous_state: str or ~azure.batch.models.JobScheduleState
:param previous_state_transition_time: The time at which the Job Schedule
entered its previous state. This property is not present if the Job
Schedule is in its initial active state.
:type previous_state_transition_time: datetime
:param schedule: The schedule according to which Jobs will be created.
:type schedule: ~azure.batch.models.Schedule
:param job_specification: The details of the Jobs to be created on this
schedule.
:type job_specification: ~azure.batch.models.JobSpecification
:param execution_info: Information about Jobs that have been and will be
run under this schedule.
:type execution_info: ~azure.batch.models.JobScheduleExecutionInformation
:param metadata: A list of name-value pairs associated with the schedule
as metadata. The Batch service does not assign any meaning to metadata; it
is solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param stats: The lifetime resource usage statistics for the Job Schedule.
The statistics may not be immediately available. The Batch service
performs periodic roll-up of statistics. The typical delay is about 30
minutes.
:type stats: ~azure.batch.models.JobScheduleStatistics
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobScheduleState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'},
}
def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None, **kwargs) -> None:
super(CloudJobSchedule, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.schedule = schedule
self.job_specification = job_specification
self.execution_info = execution_info
self.metadata = metadata
self.stats = stats
[docs]class CloudPool(Model):
"""A Pool in the Azure Batch service.
:param id: A string that uniquely identifies the Pool within the Account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
IDs within an Account that differ only by case).
:type id: str
:param display_name: The display name for the Pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param url: The URL of the Pool.
:type url: str
:param e_tag: The ETag of the Pool. This is an opaque string. You can use
it to detect whether the Pool has changed between requests. In particular,
you can be pass the ETag when updating a Pool to specify that your changes
should take effect only if nobody else has modified the Pool in the
meantime.
:type e_tag: str
:param last_modified: The last modified time of the Pool. This is the last
time at which the Pool level data, such as the targetDedicatedNodes or
enableAutoscale settings, changed. It does not factor in node-level
changes such as a Compute Node changing state.
:type last_modified: datetime
:param creation_time: The creation time of the Pool.
:type creation_time: datetime
:param state: The current state of the Pool. Possible values include:
'active', 'deleting'
:type state: str or ~azure.batch.models.PoolState
:param state_transition_time: The time at which the Pool entered its
current state.
:type state_transition_time: datetime
:param allocation_state: Whether the Pool is resizing. Possible values
include: 'steady', 'resizing', 'stopping'
:type allocation_state: str or ~azure.batch.models.AllocationState
:param allocation_state_transition_time: The time at which the Pool
entered its current allocation state.
:type allocation_state_transition_time: datetime
:param vm_size: The size of virtual machines in the Pool. All virtual
machines in a Pool are the same size. For information about available
sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes
in an Azure Batch Pool
(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the Pool. This property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch Account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration:
~azure.batch.models.CloudServiceConfiguration
:param virtual_machine_configuration: The virtual machine configuration
for the Pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration:
~azure.batch.models.VirtualMachineConfiguration
:param resize_timeout: The timeout for allocation of Compute Nodes to the
Pool. This is the timeout for the most recent resize operation. (The
initial sizing when the Pool is created counts as a resize.) The default
value is 15 minutes.
:type resize_timeout: timedelta
:param resize_errors: A list of errors encountered while performing the
last resize on the Pool. This property is set only if one or more errors
occurred during the last Pool resize, and only when the Pool
allocationState is Steady.
:type resize_errors: list[~azure.batch.models.ResizeError]
:param current_dedicated_nodes: The number of dedicated Compute Nodes
currently in the Pool.
:type current_dedicated_nodes: int
:param current_low_priority_nodes: The number of low-priority Compute
Nodes currently in the Pool. Low-priority Compute Nodes which have been
preempted are included in this count.
:type current_low_priority_nodes: int
:param target_dedicated_nodes: The desired number of dedicated Compute
Nodes in the Pool.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
Compute Nodes in the Pool.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the Pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the Pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of Compute
Nodes in the Pool. This property is set only if the Pool automatically
scales, i.e. enableAutoScale is true.
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the Pool size according to the autoscale formula.
This property is set only if the Pool automatically scales, i.e.
enableAutoScale is true.
:type auto_scale_evaluation_interval: timedelta
:param auto_scale_run: The results and errors from the last execution of
the autoscale formula. This property is set only if the Pool automatically
scales, i.e. enableAutoScale is true.
:type auto_scale_run: ~azure.batch.models.AutoScaleRun
:param enable_inter_node_communication: Whether the Pool permits direct
communication between Compute Nodes. This imposes restrictions on which
Compute Nodes can be assigned to the Pool. Specifying this value can
reduce the chance of the requested number of Compute Nodes to be allocated
in the Pool.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the Pool.
:type network_configuration: ~azure.batch.models.NetworkConfiguration
:param start_task: A Task specified to run on each Compute Node as it
joins the Pool.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: The list of Certificates to be installed on
each Compute Node in the Pool. For Windows Nodes, the Batch service
installs the Certificates to the specified Certificate store and location.
For Linux Compute Nodes, the Certificates are stored in a directory inside
the Task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this
location. For Certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and Certificates are placed in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: The list of Packages to be
installed on each Compute Node in the Pool. Changes to Package references
affect all new Nodes joining the Pool, but do not affect Compute Nodes
that are already in the Pool until they are rebooted or reimaged. There is
a maximum of 10 Package references on any given Pool.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param application_licenses: The list of application licenses the Batch
service will make available on each Compute Node in the Pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
Pool creation will fail.
:type application_licenses: list[str]
:param max_tasks_per_node: The maximum number of Tasks that can run
concurrently on a single Compute Node in the Pool. The default value is 1.
The maximum value is the smaller of 4 times the number of cores of the
vmSize of the Pool or 256.
:type max_tasks_per_node: int
:param task_scheduling_policy: How Tasks are distributed across Compute
Nodes in a Pool. If not specified, the default is spread.
:type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy
:param user_accounts: The list of user Accounts to be created on each
Compute Node in the Pool.
:type user_accounts: list[~azure.batch.models.UserAccount]
:param metadata: A list of name-value pairs associated with the Pool as
metadata.
:type metadata: list[~azure.batch.models.MetadataItem]
:param stats: Utilization and resource usage statistics for the entire
lifetime of the Pool. This property is populated only if the CloudPool was
retrieved with an expand clause including the 'stats' attribute; otherwise
it is null. The statistics may not be immediately available. The Batch
service performs periodic roll-up of statistics. The typical delay is
about 30 minutes.
:type stats: ~azure.batch.models.PoolStatistics
:param mount_configuration: A list of file systems to mount on each node
in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.
:type mount_configuration: list[~azure.batch.models.MountConfiguration]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'PoolState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'},
'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'},
'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'},
'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'stats': {'key': 'stats', 'type': 'PoolStatistics'},
'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'},
}
def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_errors=None, current_dedicated_nodes: int=None, current_low_priority_nodes: int=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, stats=None, mount_configuration=None, **kwargs) -> None:
super(CloudPool, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.allocation_state = allocation_state
self.allocation_state_transition_time = allocation_state_transition_time
self.vm_size = vm_size
self.cloud_service_configuration = cloud_service_configuration
self.virtual_machine_configuration = virtual_machine_configuration
self.resize_timeout = resize_timeout
self.resize_errors = resize_errors
self.current_dedicated_nodes = current_dedicated_nodes
self.current_low_priority_nodes = current_low_priority_nodes
self.target_dedicated_nodes = target_dedicated_nodes
self.target_low_priority_nodes = target_low_priority_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
self.auto_scale_run = auto_scale_run
self.enable_inter_node_communication = enable_inter_node_communication
self.network_configuration = network_configuration
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.application_licenses = application_licenses
self.max_tasks_per_node = max_tasks_per_node
self.task_scheduling_policy = task_scheduling_policy
self.user_accounts = user_accounts
self.metadata = metadata
self.stats = stats
self.mount_configuration = mount_configuration
[docs]class CloudServiceConfiguration(Model):
"""The configuration for Compute Nodes in a Pool based on the Azure Cloud
Services platform.
All required parameters must be populated in order to send to Azure.
:param os_family: Required. The Azure Guest OS family to be installed on
the virtual machines in the Pool. Possible values are:
2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1.
3 - OS Family 3, equivalent to Windows Server 2012.
4 - OS Family 4, equivalent to Windows Server 2012 R2.
5 - OS Family 5, equivalent to Windows Server 2016.
6 - OS Family 6, equivalent to Windows Server 2019. For more information,
see Azure Guest OS Releases
(https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases).
:type os_family: str
:param os_version: The Azure Guest OS version to be installed on the
virtual machines in the Pool. The default value is * which specifies the
latest operating system version for the specified OS family.
:type os_version: str
"""
_validation = {
'os_family': {'required': True},
}
_attribute_map = {
'os_family': {'key': 'osFamily', 'type': 'str'},
'os_version': {'key': 'osVersion', 'type': 'str'},
}
def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None:
super(CloudServiceConfiguration, self).__init__(**kwargs)
self.os_family = os_family
self.os_version = os_version
[docs]class CloudTask(Model):
"""An Azure Batch Task.
Batch will retry Tasks when a recovery operation is triggered on a Node.
Examples of recovery operations include (but are not limited to) when an
unhealthy Node is rebooted or a Compute Node disappeared due to host
failure. Retries due to recovery operations are independent of and are not
counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0,
an internal retry due to a recovery operation may occur. Because of this,
all Tasks should be idempotent. This means Tasks need to tolerate being
interrupted and restarted without causing any corruption or duplicate data.
The best practice for long running Tasks is to use some form of
checkpointing.
:param id: A string that uniquely identifies the Task within the Job. The
ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters.
:type id: str
:param display_name: A display name for the Task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param url: The URL of the Task.
:type url: str
:param e_tag: The ETag of the Task. This is an opaque string. You can use
it to detect whether the Task has changed between requests. In particular,
you can be pass the ETag when updating a Task to specify that your changes
should take effect only if nobody else has modified the Task in the
meantime.
:type e_tag: str
:param last_modified: The last modified time of the Task.
:type last_modified: datetime
:param creation_time: The creation time of the Task.
:type creation_time: datetime
:param exit_conditions: How the Batch service should respond when the Task
completes.
:type exit_conditions: ~azure.batch.models.ExitConditions
:param state: The current state of the Task. Possible values include:
'active', 'preparing', 'running', 'completed'
:type state: str or ~azure.batch.models.TaskState
:param state_transition_time: The time at which the Task entered its
current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the Task. This property is
not set if the Task is in its initial Active state. Possible values
include: 'active', 'preparing', 'running', 'completed'
:type previous_state: str or ~azure.batch.models.TaskState
:param previous_state_transition_time: The time at which the Task entered
its previous state. This property is not set if the Task is in its initial
Active state.
:type previous_state_transition_time: datetime
:param command_line: The command line of the Task. For multi-instance
Tasks, the command line is executed as the primary Task, after the primary
Task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux. If the command line refers to file
paths, it should use a relative path (relative to the Task working
directory), or use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
Task runs. If the Pool that will run this Task has containerConfiguration
set, this must be set as well. If the Pool that will run this Task doesn't
have containerConfiguration set, this must not be set. When this is
specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR
(the root of Azure Batch directories on the node) are mapped into the
container, all Task environment variables are mapped into the container,
and the Task command line is executed in the container. Files produced in
the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to
the host disk, meaning that Batch file APIs will not be able to access
those files.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line. For
multi-instance Tasks, the resource files will only be downloaded to the
Compute Node on which the primary Task is executed. There is a maximum
size for the list of resource files. When the max size is exceeded, the
request will fail and the response error code will be
RequestEntityTooLarge. If this occurs, the collection of ResourceFiles
must be reduced in size. This can be achieved using .zip files,
Application Packages, or Docker Containers.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param output_files: A list of files that the Batch service will upload
from the Compute Node after running the command line. For multi-instance
Tasks, the files will only be uploaded from the Compute Node on which the
primary Task is executed.
:type output_files: list[~azure.batch.models.OutputFile]
:param environment_settings: A list of environment variable settings for
the Task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param affinity_info: A locality hint that can be used by the Batch
service to select a Compute Node on which to start the new Task.
:type affinity_info: ~azure.batch.models.AffinityInformation
:param constraints: The execution constraints that apply to this Task.
:type constraints: ~azure.batch.models.TaskConstraints
:param user_identity: The user identity under which the Task runs. If
omitted, the Task runs as a non-administrative user unique to the Task.
:type user_identity: ~azure.batch.models.UserIdentity
:param execution_info: Information about the execution of the Task.
:type execution_info: ~azure.batch.models.TaskExecutionInformation
:param node_info: Information about the Compute Node on which the Task
ran.
:type node_info: ~azure.batch.models.ComputeNodeInformation
:param multi_instance_settings: An object that indicates that the Task is
a multi-instance Task, and contains information about how to run the
multi-instance Task.
:type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings
:param stats: Resource usage statistics for the Task.
:type stats: ~azure.batch.models.TaskStatistics
:param depends_on: The Tasks that this Task depends on. This Task will not
be scheduled until all Tasks that it depends on have completed
successfully. If any of those Tasks fail and exhaust their retry counts,
this Task will never be scheduled.
:type depends_on: ~azure.batch.models.TaskDependencies
:param application_package_references: A list of Packages that the Batch
service will deploy to the Compute Node before running the command line.
Application packages are downloaded and deployed to a shared directory,
not the Task working directory. Therefore, if a referenced package is
already on the Node, and is up to date, then it is not re-downloaded; the
existing copy on the Compute Node is used. If a referenced Package cannot
be installed, for example because the package has been deleted or because
download failed, the Task fails.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param authentication_token_settings: The settings for an authentication
token that the Task can use to perform Batch service operations. If this
property is set, the Batch service provides the Task with an
authentication token which can be used to authenticate Batch service
operations without requiring an Account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the Task can carry out using the token depend on the settings. For
example, a Task can request Job permissions in order to add other Tasks to
the Job, or check the status of the Job or of other Tasks under the Job.
:type authentication_token_settings:
~azure.batch.models.AuthenticationTokenSettings
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'state': {'key': 'state', 'type': 'TaskState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'TaskState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'},
'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'},
'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'},
'stats': {'key': 'stats', 'type': 'TaskStatistics'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
}
def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None:
super(CloudTask, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.exit_conditions = exit_conditions
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.execution_info = execution_info
self.node_info = node_info
self.multi_instance_settings = multi_instance_settings
self.stats = stats
self.depends_on = depends_on
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
[docs]class CloudTaskListSubtasksResult(Model):
"""The result of listing the subtasks of a Task.
:param value: The list of subtasks.
:type value: list[~azure.batch.models.SubtaskInformation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SubtaskInformation]'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(CloudTaskListSubtasksResult, self).__init__(**kwargs)
self.value = value
[docs]class ComputeNode(Model):
"""A Compute Node in the Batch service.
:param id: The ID of the Compute Node. Every Compute Node that is added to
a Pool is assigned a unique ID. Whenever a Compute Node is removed from a
Pool, all of its local files are deleted, and the ID is reclaimed and
could be reused for new Compute Nodes.
:type id: str
:param url: The URL of the Compute Node.
:type url: str
:param state: The current state of the Compute Node. The low-priority
Compute Node has been preempted. Tasks which were running on the Compute
Node when it was preempted will be rescheduled when another Compute Node
becomes available. Possible values include: 'idle', 'rebooting',
'reimaging', 'running', 'unusable', 'creating', 'starting',
'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool',
'offline', 'preempted'
:type state: str or ~azure.batch.models.ComputeNodeState
:param scheduling_state: Whether the Compute Node is available for Task
scheduling. Possible values include: 'enabled', 'disabled'
:type scheduling_state: str or ~azure.batch.models.SchedulingState
:param state_transition_time: The time at which the Compute Node entered
its current state.
:type state_transition_time: datetime
:param last_boot_time: The last time at which the Compute Node was
started. This property may not be present if the Compute Node state is
unusable.
:type last_boot_time: datetime
:param allocation_time: The time at which this Compute Node was allocated
to the Pool. This is the time when the Compute Node was initially
allocated and doesn't change once set. It is not updated when the Compute
Node is service healed or preempted.
:type allocation_time: datetime
:param ip_address: The IP address that other Nodes can use to communicate
with this Compute Node. Every Compute Node that is added to a Pool is
assigned a unique IP address. Whenever a Compute Node is removed from a
Pool, all of its local files are deleted, and the IP address is reclaimed
and could be reused for new Compute Nodes.
:type ip_address: str
:param affinity_id: An identifier which can be passed when adding a Task
to request that the Task be scheduled on this Compute Node. Note that this
is just a soft affinity. If the target Compute Node is busy or unavailable
at the time the Task is scheduled, then the Task will be scheduled
elsewhere.
:type affinity_id: str
:param vm_size: The size of the virtual machine hosting the Compute Node.
For information about available sizes of virtual machines in Pools, see
Choose a VM size for Compute Nodes in an Azure Batch Pool
(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
:type vm_size: str
:param total_tasks_run: The total number of Job Tasks completed on the
Compute Node. This includes Job Manager Tasks and normal Tasks, but not
Job Preparation, Job Release or Start Tasks.
:type total_tasks_run: int
:param running_tasks_count: The total number of currently running Job
Tasks on the Compute Node. This includes Job Manager Tasks and normal
Tasks, but not Job Preparation, Job Release or Start Tasks.
:type running_tasks_count: int
:param total_tasks_succeeded: The total number of Job Tasks which
completed successfully (with exitCode 0) on the Compute Node. This
includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job
Release or Start Tasks.
:type total_tasks_succeeded: int
:param recent_tasks: A list of Tasks whose state has recently changed.
This property is present only if at least one Task has run on this Compute
Node since it was assigned to the Pool.
:type recent_tasks: list[~azure.batch.models.TaskInformation]
:param start_task: The Task specified to run on the Compute Node as it
joins the Pool.
:type start_task: ~azure.batch.models.StartTask
:param start_task_info: Runtime information about the execution of the
StartTask on the Compute Node.
:type start_task_info: ~azure.batch.models.StartTaskInformation
:param certificate_references: The list of Certificates installed on the
Compute Node. For Windows Nodes, the Batch service installs the
Certificates to the specified Certificate store and location. For Linux
Compute Nodes, the Certificates are stored in a directory inside the Task
working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is
supplied to the Task to query for this location. For Certificates with
visibility of 'remoteUser', a 'certs' directory is created in the user's
home directory (e.g., /home/{user-name}/certs) and Certificates are placed
in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param errors: The list of errors that are currently being encountered by
the Compute Node.
:type errors: list[~azure.batch.models.ComputeNodeError]
:param is_dedicated: Whether this Compute Node is a dedicated Compute
Node. If false, the Compute Node is a low-priority Compute Node.
:type is_dedicated: bool
:param endpoint_configuration: The endpoint configuration for the Compute
Node.
:type endpoint_configuration:
~azure.batch.models.ComputeNodeEndpointConfiguration
:param node_agent_info: Information about the Compute Node agent version
and the time the Compute Node upgraded to a new version.
:type node_agent_info: ~azure.batch.models.NodeAgentInformation
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'state': {'key': 'state', 'type': 'ComputeNodeState'},
'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'},
'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'affinity_id': {'key': 'affinityId', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'},
'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'},
'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'},
'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'errors': {'key': 'errors', 'type': '[ComputeNodeError]'},
'is_dedicated': {'key': 'isDedicated', 'type': 'bool'},
'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'},
'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'},
}
def __init__(self, *, id: str=None, url: str=None, state=None, scheduling_state=None, state_transition_time=None, last_boot_time=None, allocation_time=None, ip_address: str=None, affinity_id: str=None, vm_size: str=None, total_tasks_run: int=None, running_tasks_count: int=None, total_tasks_succeeded: int=None, recent_tasks=None, start_task=None, start_task_info=None, certificate_references=None, errors=None, is_dedicated: bool=None, endpoint_configuration=None, node_agent_info=None, **kwargs) -> None:
super(ComputeNode, self).__init__(**kwargs)
self.id = id
self.url = url
self.state = state
self.scheduling_state = scheduling_state
self.state_transition_time = state_transition_time
self.last_boot_time = last_boot_time
self.allocation_time = allocation_time
self.ip_address = ip_address
self.affinity_id = affinity_id
self.vm_size = vm_size
self.total_tasks_run = total_tasks_run
self.running_tasks_count = running_tasks_count
self.total_tasks_succeeded = total_tasks_succeeded
self.recent_tasks = recent_tasks
self.start_task = start_task
self.start_task_info = start_task_info
self.certificate_references = certificate_references
self.errors = errors
self.is_dedicated = is_dedicated
self.endpoint_configuration = endpoint_configuration
self.node_agent_info = node_agent_info
[docs]class ComputeNodeAddUserOptions(Model):
"""Additional parameters for add_user operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeAddUserOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeDeleteUserOptions(Model):
"""Additional parameters for delete_user operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeDisableSchedulingOptions(Model):
"""Additional parameters for disable_scheduling operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeEnableSchedulingOptions(Model):
"""Additional parameters for enable_scheduling operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeEndpointConfiguration(Model):
"""The endpoint configuration for the Compute Node.
All required parameters must be populated in order to send to Azure.
:param inbound_endpoints: Required. The list of inbound endpoints that are
accessible on the Compute Node.
:type inbound_endpoints: list[~azure.batch.models.InboundEndpoint]
"""
_validation = {
'inbound_endpoints': {'required': True},
}
_attribute_map = {
'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'},
}
def __init__(self, *, inbound_endpoints, **kwargs) -> None:
super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs)
self.inbound_endpoints = inbound_endpoints
[docs]class ComputeNodeError(Model):
"""An error encountered by a Compute Node.
:param code: An identifier for the Compute Node error. Codes are invariant
and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the Compute Node error, intended to
be suitable for display in a user interface.
:type message: str
:param error_details: The list of additional error details related to the
Compute Node error.
:type error_details: list[~azure.batch.models.NameValuePair]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'},
}
def __init__(self, *, code: str=None, message: str=None, error_details=None, **kwargs) -> None:
super(ComputeNodeError, self).__init__(**kwargs)
self.code = code
self.message = message
self.error_details = error_details
[docs]class ComputeNodeGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeGetOptions, self).__init__(**kwargs)
self.select = select
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeGetRemoteDesktopOptions(Model):
"""Additional parameters for get_remote_desktop operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeGetRemoteLoginSettingsOptions(Model):
"""Additional parameters for get_remote_login_settings operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeGetRemoteLoginSettingsResult(Model):
"""The remote login settings for a Compute Node.
All required parameters must be populated in order to send to Azure.
:param remote_login_ip_address: Required. The IP address used for remote
login to the Compute Node.
:type remote_login_ip_address: str
:param remote_login_port: Required. The port used for remote login to the
Compute Node.
:type remote_login_port: int
"""
_validation = {
'remote_login_ip_address': {'required': True},
'remote_login_port': {'required': True},
}
_attribute_map = {
'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'},
'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'},
}
def __init__(self, *, remote_login_ip_address: str, remote_login_port: int, **kwargs) -> None:
super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs)
self.remote_login_ip_address = remote_login_ip_address
self.remote_login_port = remote_login_port
[docs]class ComputeNodeListOptions(Model):
"""Additional parameters for list operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Compute Nodes can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeListOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeRebootOptions(Model):
"""Additional parameters for reboot operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeRebootOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeReimageOptions(Model):
"""Additional parameters for reimage operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeReimageOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeUpdateUserOptions(Model):
"""Additional parameters for update_user operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeUploadBatchServiceLogsOptions(Model):
"""Additional parameters for upload_batch_service_logs operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class ComputeNodeUser(Model):
"""A user Account for RDP or SSH access on a Compute Node.
All required parameters must be populated in order to send to Azure.
:param name: Required. The user name of the Account.
:type name: str
:param is_admin: Whether the Account should be an administrator on the
Compute Node. The default value is false.
:type is_admin: bool
:param expiry_time: The time at which the Account should expire. If
omitted, the default is 1 day from the current time. For Linux Compute
Nodes, the expiryTime has a precision up to a day.
:type expiry_time: datetime
:param password: The password of the Account. The password is required for
Windows Compute Nodes (those created with 'cloudServiceConfiguration', or
created with 'virtualMachineConfiguration' using a Windows Image
reference). For Linux Compute Nodes, the password can optionally be
specified along with the sshPublicKey property.
:type password: str
:param ssh_public_key: The SSH public key that can be used for remote
login to the Compute Node. The public key should be compatible with
OpenSSH encoding and should be base 64 encoded. This property can be
specified only for Linux Compute Nodes. If this is specified for a Windows
Compute Node, then the Batch service rejects the request; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type ssh_public_key: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'password': {'key': 'password', 'type': 'str'},
'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'},
}
def __init__(self, *, name: str, is_admin: bool=None, expiry_time=None, password: str=None, ssh_public_key: str=None, **kwargs) -> None:
super(ComputeNodeUser, self).__init__(**kwargs)
self.name = name
self.is_admin = is_admin
self.expiry_time = expiry_time
self.password = password
self.ssh_public_key = ssh_public_key
[docs]class ContainerConfiguration(Model):
"""The configuration for container-enabled Pools.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. The container technology to be used. Default value:
"dockerCompatible" .
:vartype type: str
:param container_image_names: The collection of container Image names.
This is the full Image reference, as would be specified to "docker pull".
An Image will be sourced from the default Docker registry unless the Image
is fully qualified with an alternative registry.
:type container_image_names: list[str]
:param container_registries: Additional private registries from which
containers can be pulled. If any Images must be downloaded from a private
registry which requires credentials, then those credentials must be
provided here.
:type container_registries: list[~azure.batch.models.ContainerRegistry]
"""
_validation = {
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'container_image_names': {'key': 'containerImageNames', 'type': '[str]'},
'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'},
}
type = "dockerCompatible"
def __init__(self, *, container_image_names=None, container_registries=None, **kwargs) -> None:
super(ContainerConfiguration, self).__init__(**kwargs)
self.container_image_names = container_image_names
self.container_registries = container_registries
[docs]class ContainerRegistry(Model):
"""A private container registry.
All required parameters must be populated in order to send to Azure.
:param registry_server: The registry URL. If omitted, the default is
"docker.io".
:type registry_server: str
:param user_name: Required. The user name to log into the registry server.
:type user_name: str
:param password: Required. The password to log into the registry server.
:type password: str
"""
_validation = {
'user_name': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'registry_server': {'key': 'registryServer', 'type': 'str'},
'user_name': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(self, *, user_name: str, password: str, registry_server: str=None, **kwargs) -> None:
super(ContainerRegistry, self).__init__(**kwargs)
self.registry_server = registry_server
self.user_name = user_name
self.password = password
[docs]class DataDisk(Model):
"""Settings which will be used by the data disks associated to Compute Nodes
in the Pool. When using attached data disks, you need to mount and format
the disks from within a VM to use them.
All required parameters must be populated in order to send to Azure.
:param lun: Required. The logical unit number. The lun is used to uniquely
identify each data disk. If attaching multiple disks, each should have a
distinct lun.
:type lun: int
:param caching: The type of caching to be enabled for the data disks. The
default value for caching is readwrite. For information about the caching
options see:
https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/.
Possible values include: 'none', 'readOnly', 'readWrite'
:type caching: str or ~azure.batch.models.CachingType
:param disk_size_gb: Required. The initial disk size in gigabytes.
:type disk_size_gb: int
:param storage_account_type: The storage Account type to be used for the
data disk. If omitted, the default is "standard_lrs". Possible values
include: 'StandardLRS', 'PremiumLRS'
:type storage_account_type: str or ~azure.batch.models.StorageAccountType
"""
_validation = {
'lun': {'required': True},
'disk_size_gb': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'caching': {'key': 'caching', 'type': 'CachingType'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'},
}
def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None:
super(DataDisk, self).__init__(**kwargs)
self.lun = lun
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
[docs]class DeleteCertificateError(Model):
"""An error encountered by the Batch service when deleting a Certificate.
:param code: An identifier for the Certificate deletion error. Codes are
invariant and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the Certificate deletion error,
intended to be suitable for display in a user interface.
:type message: str
:param values: A list of additional error details related to the
Certificate deletion error. This list includes details such as the active
Pools and Compute Nodes referencing this Certificate. However, if a large
number of resources reference the Certificate, the list contains only
about the first hundred.
:type values: list[~azure.batch.models.NameValuePair]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'values': {'key': 'values', 'type': '[NameValuePair]'},
}
def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None:
super(DeleteCertificateError, self).__init__(**kwargs)
self.code = code
self.message = message
self.values = values
[docs]class DiskEncryptionConfiguration(Model):
"""The disk encryption configuration applied on compute nodes in the pool.
Disk encryption configuration is not supported on Linux pool created with
Shared Image Gallery Image.
:param targets: The list of disk targets Batch Service will encrypt on the
compute node. If omitted, no disks on the compute nodes in the pool will
be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows
pool, "OsDisk" and "TemporaryDisk" must be specified.
:type targets: list[str or ~azure.batch.models.DiskEncryptionTarget]
"""
_attribute_map = {
'targets': {'key': 'targets', 'type': '[DiskEncryptionTarget]'},
}
def __init__(self, *, targets=None, **kwargs) -> None:
super(DiskEncryptionConfiguration, self).__init__(**kwargs)
self.targets = targets
[docs]class EnvironmentSetting(Model):
"""An environment variable to be set on a Task process.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the environment variable.
:type name: str
:param value: The value of the environment variable.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, name: str, value: str=None, **kwargs) -> None:
super(EnvironmentSetting, self).__init__(**kwargs)
self.name = name
self.value = value
[docs]class ErrorMessage(Model):
"""An error message received in an Azure Batch error response.
:param lang: The language code of the error message.
:type lang: str
:param value: The text of the message.
:type value: str
"""
_attribute_map = {
'lang': {'key': 'lang', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, lang: str=None, value: str=None, **kwargs) -> None:
super(ErrorMessage, self).__init__(**kwargs)
self.lang = lang
self.value = value
[docs]class ExitCodeMapping(Model):
"""How the Batch service should respond if a Task exits with a particular exit
code.
All required parameters must be populated in order to send to Azure.
:param code: Required. A process exit code.
:type code: int
:param exit_options: Required. How the Batch service should respond if the
Task exits with this exit code.
:type exit_options: ~azure.batch.models.ExitOptions
"""
_validation = {
'code': {'required': True},
'exit_options': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'int'},
'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'},
}
def __init__(self, *, code: int, exit_options, **kwargs) -> None:
super(ExitCodeMapping, self).__init__(**kwargs)
self.code = code
self.exit_options = exit_options
[docs]class ExitCodeRangeMapping(Model):
"""A range of exit codes and how the Batch service should respond to exit
codes within that range.
All required parameters must be populated in order to send to Azure.
:param start: Required. The first exit code in the range.
:type start: int
:param end: Required. The last exit code in the range.
:type end: int
:param exit_options: Required. How the Batch service should respond if the
Task exits with an exit code in the range start to end (inclusive).
:type exit_options: ~azure.batch.models.ExitOptions
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
'exit_options': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'int'},
'end': {'key': 'end', 'type': 'int'},
'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'},
}
def __init__(self, *, start: int, end: int, exit_options, **kwargs) -> None:
super(ExitCodeRangeMapping, self).__init__(**kwargs)
self.start = start
self.end = end
self.exit_options = exit_options
[docs]class ExitConditions(Model):
"""Specifies how the Batch service should respond when the Task completes.
:param exit_codes: A list of individual Task exit codes and how the Batch
service should respond to them.
:type exit_codes: list[~azure.batch.models.ExitCodeMapping]
:param exit_code_ranges: A list of Task exit code ranges and how the Batch
service should respond to them.
:type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping]
:param pre_processing_error: How the Batch service should respond if the
Task fails to start due to an error.
:type pre_processing_error: ~azure.batch.models.ExitOptions
:param file_upload_error: How the Batch service should respond if a file
upload error occurs. If the Task exited with an exit code that was
specified via exitCodes or exitCodeRanges, and then encountered a file
upload error, then the action specified by the exit code takes precedence.
:type file_upload_error: ~azure.batch.models.ExitOptions
:param default: How the Batch service should respond if the Task fails
with an exit condition not covered by any of the other properties. This
value is used if the Task exits with any nonzero exit code not listed in
the exitCodes or exitCodeRanges collection, with a pre-processing error if
the preProcessingError property is not present, or with a file upload
error if the fileUploadError property is not present. If you want
non-default behavior on exit code 0, you must list it explicitly using the
exitCodes or exitCodeRanges collection.
:type default: ~azure.batch.models.ExitOptions
"""
_attribute_map = {
'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'},
'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'},
'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'},
'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'},
'default': {'key': 'default', 'type': 'ExitOptions'},
}
def __init__(self, *, exit_codes=None, exit_code_ranges=None, pre_processing_error=None, file_upload_error=None, default=None, **kwargs) -> None:
super(ExitConditions, self).__init__(**kwargs)
self.exit_codes = exit_codes
self.exit_code_ranges = exit_code_ranges
self.pre_processing_error = pre_processing_error
self.file_upload_error = file_upload_error
self.default = default
[docs]class ExitOptions(Model):
"""Specifies how the Batch service responds to a particular exit condition.
:param job_action: An action to take on the Job containing the Task, if
the Task completes with the given exit condition and the Job's
onTaskFailed property is 'performExitOptionsJobAction'. The default is
none for exit code 0 and terminate for all other exit conditions. If the
Job's onTaskFailed property is noaction, then specifying this property
returns an error and the add Task request fails with an invalid property
value error; if you are calling the REST API directly, the HTTP status
code is 400 (Bad Request). Possible values include: 'none', 'disable',
'terminate'
:type job_action: str or ~azure.batch.models.JobAction
:param dependency_action: An action that the Batch service performs on
Tasks that depend on this Task. Possible values are 'satisfy' (allowing
dependent tasks to progress) and 'block' (dependent tasks continue to
wait). Batch does not yet support cancellation of dependent tasks.
Possible values include: 'satisfy', 'block'
:type dependency_action: str or ~azure.batch.models.DependencyAction
"""
_attribute_map = {
'job_action': {'key': 'jobAction', 'type': 'JobAction'},
'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'},
}
def __init__(self, *, job_action=None, dependency_action=None, **kwargs) -> None:
super(ExitOptions, self).__init__(**kwargs)
self.job_action = job_action
self.dependency_action = dependency_action
[docs]class FileDeleteFromComputeNodeOptions(Model):
"""Additional parameters for delete_from_compute_node operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class FileDeleteFromTaskOptions(Model):
"""Additional parameters for delete_from_task operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(FileDeleteFromTaskOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class FileGetFromComputeNodeOptions(Model):
"""Additional parameters for get_from_compute_node operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param ocp_range: The byte range to be retrieved. The default is to
retrieve the entire file. The format is bytes=startRange-endRange.
:type ocp_range: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'ocp_range': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(FileGetFromComputeNodeOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.ocp_range = ocp_range
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class FileGetFromTaskOptions(Model):
"""Additional parameters for get_from_task operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param ocp_range: The byte range to be retrieved. The default is to
retrieve the entire file. The format is bytes=startRange-endRange.
:type ocp_range: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'ocp_range': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(FileGetFromTaskOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.ocp_range = ocp_range
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class FileGetPropertiesFromComputeNodeOptions(Model):
"""Additional parameters for get_properties_from_compute_node operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class FileGetPropertiesFromTaskOptions(Model):
"""Additional parameters for get_properties_from_task operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class FileListFromComputeNodeOptions(Model):
"""Additional parameters for list_from_compute_node operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files.
:type filter: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 files can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(FileListFromComputeNodeOptions, self).__init__(**kwargs)
self.filter = filter
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class FileListFromTaskOptions(Model):
"""Additional parameters for list_from_task operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files.
:type filter: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 files can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(FileListFromTaskOptions, self).__init__(**kwargs)
self.filter = filter
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class FileProperties(Model):
"""The properties of a file on a Compute Node.
All required parameters must be populated in order to send to Azure.
:param creation_time: The file creation time. The creation time is not
returned for files on Linux Compute Nodes.
:type creation_time: datetime
:param last_modified: Required. The time at which the file was last
modified.
:type last_modified: datetime
:param content_length: Required. The length of the file.
:type content_length: long
:param content_type: The content type of the file.
:type content_type: str
:param file_mode: The file mode attribute in octal format. The file mode
is returned only for files on Linux Compute Nodes.
:type file_mode: str
"""
_validation = {
'last_modified': {'required': True},
'content_length': {'required': True},
}
_attribute_map = {
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'content_length': {'key': 'contentLength', 'type': 'long'},
'content_type': {'key': 'contentType', 'type': 'str'},
'file_mode': {'key': 'fileMode', 'type': 'str'},
}
def __init__(self, *, last_modified, content_length: int, creation_time=None, content_type: str=None, file_mode: str=None, **kwargs) -> None:
super(FileProperties, self).__init__(**kwargs)
self.creation_time = creation_time
self.last_modified = last_modified
self.content_length = content_length
self.content_type = content_type
self.file_mode = file_mode
[docs]class ImageReference(Model):
"""A reference to an Azure Virtual Machines Marketplace Image or a custom
Azure Virtual Machine Image. To get the list of all Azure Marketplace Image
references verified by Azure Batch, see the 'List supported Images'
operation.
:param publisher: The publisher of the Azure Virtual Machines Marketplace
Image. For example, Canonical or MicrosoftWindowsServer.
:type publisher: str
:param offer: The offer type of the Azure Virtual Machines Marketplace
Image. For example, UbuntuServer or WindowsServer.
:type offer: str
:param sku: The SKU of the Azure Virtual Machines Marketplace Image. For
example, 18.04-LTS or 2019-Datacenter.
:type sku: str
:param version: The version of the Azure Virtual Machines Marketplace
Image. A value of 'latest' can be specified to select the latest version
of an Image. If omitted, the default is 'latest'.
:type version: str
:param virtual_machine_image_id: The ARM resource identifier of the Shared
Image Gallery Image. Compute Nodes in the Pool will be created using this
Image Id. This is of the
form/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{versionId}.
This property is mutually exclusive with other ImageReference properties.
For Virtual Machine Image it must be in the same region and subscription
as the Azure Batch account. The Shared Image Gallery Image must have
replicas in the same region as the Azure Batch account. For information
about the firewall settings for the Batch Compute Node agent to
communicate with the Batch service see
https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.
:type virtual_machine_image_id: str
"""
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'},
}
def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None:
super(ImageReference, self).__init__(**kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
self.virtual_machine_image_id = virtual_machine_image_id
[docs]class InboundEndpoint(Model):
"""An inbound endpoint on a Compute Node.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the endpoint.
:type name: str
:param protocol: Required. The protocol of the endpoint. Possible values
include: 'tcp', 'udp'
:type protocol: str or ~azure.batch.models.InboundEndpointProtocol
:param public_ip_address: Required. The public IP address of the Compute
Node.
:type public_ip_address: str
:param public_fqdn: Required. The public fully qualified domain name for
the Compute Node.
:type public_fqdn: str
:param frontend_port: Required. The public port number of the endpoint.
:type frontend_port: int
:param backend_port: Required. The backend port number of the endpoint.
:type backend_port: int
"""
_validation = {
'name': {'required': True},
'protocol': {'required': True},
'public_ip_address': {'required': True},
'public_fqdn': {'required': True},
'frontend_port': {'required': True},
'backend_port': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'},
'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'},
'public_fqdn': {'key': 'publicFQDN', 'type': 'str'},
'frontend_port': {'key': 'frontendPort', 'type': 'int'},
'backend_port': {'key': 'backendPort', 'type': 'int'},
}
def __init__(self, *, name: str, protocol, public_ip_address: str, public_fqdn: str, frontend_port: int, backend_port: int, **kwargs) -> None:
super(InboundEndpoint, self).__init__(**kwargs)
self.name = name
self.protocol = protocol
self.public_ip_address = public_ip_address
self.public_fqdn = public_fqdn
self.frontend_port = frontend_port
self.backend_port = backend_port
[docs]class InboundNATPool(Model):
"""A inbound NAT Pool that can be used to address specific ports on Compute
Nodes in a Batch Pool externally.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the endpoint. The name must be unique
within a Batch Pool, can contain letters, numbers, underscores, periods,
and hyphens. Names must start with a letter or number, must end with a
letter, number, or underscore, and cannot exceed 77 characters. If any
invalid values are provided the request fails with HTTP status code 400.
:type name: str
:param protocol: Required. The protocol of the endpoint. Possible values
include: 'tcp', 'udp'
:type protocol: str or ~azure.batch.models.InboundEndpointProtocol
:param backend_port: Required. The port number on the Compute Node. This
must be unique within a Batch Pool. Acceptable values are between 1 and
65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any
reserved values are provided the request fails with HTTP status code 400.
:type backend_port: int
:param frontend_port_range_start: Required. The first port number in the
range of external ports that will be used to provide inbound access to the
backendPort on individual Compute Nodes. Acceptable values range between 1
and 65534 except ports from 50000 to 55000 which are reserved. All ranges
within a Pool must be distinct and cannot overlap. Each range must contain
at least 40 ports. If any reserved or overlapping values are provided the
request fails with HTTP status code 400.
:type frontend_port_range_start: int
:param frontend_port_range_end: Required. The last port number in the
range of external ports that will be used to provide inbound access to the
backendPort on individual Compute Nodes. Acceptable values range between 1
and 65534 except ports from 50000 to 55000 which are reserved by the Batch
service. All ranges within a Pool must be distinct and cannot overlap.
Each range must contain at least 40 ports. If any reserved or overlapping
values are provided the request fails with HTTP status code 400.
:type frontend_port_range_end: int
:param network_security_group_rules: A list of network security group
rules that will be applied to the endpoint. The maximum number of rules
that can be specified across all the endpoints on a Batch Pool is 25. If
no network security group rules are specified, a default rule will be
created to allow inbound access to the specified backendPort. If the
maximum number of network security group rules is exceeded the request
fails with HTTP status code 400.
:type network_security_group_rules:
list[~azure.batch.models.NetworkSecurityGroupRule]
"""
_validation = {
'name': {'required': True},
'protocol': {'required': True},
'backend_port': {'required': True},
'frontend_port_range_start': {'required': True},
'frontend_port_range_end': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'},
'backend_port': {'key': 'backendPort', 'type': 'int'},
'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'},
'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'},
'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'},
}
def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None:
super(InboundNATPool, self).__init__(**kwargs)
self.name = name
self.protocol = protocol
self.backend_port = backend_port
self.frontend_port_range_start = frontend_port_range_start
self.frontend_port_range_end = frontend_port_range_end
self.network_security_group_rules = network_security_group_rules
[docs]class JobAddOptions(Model):
"""Additional parameters for add operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobAddOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobAddParameter(Model):
"""An Azure Batch Job to add.
All required parameters must be populated in order to send to Azure.
:param id: Required. A string that uniquely identifies the Job within the
Account. The ID can contain any combination of alphanumeric characters
including hyphens and underscores, and cannot contain more than 64
characters. The ID is case-preserving and case-insensitive (that is, you
may not have two IDs within an Account that differ only by case).
:type id: str
:param display_name: The display name for the Job. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param priority: The priority of the Job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. The default value is 0.
:type priority: int
:param constraints: The execution constraints for the Job.
:type constraints: ~azure.batch.models.JobConstraints
:param job_manager_task: Details of a Job Manager Task to be launched when
the Job is started. If the Job does not specify a Job Manager Task, the
user must explicitly add Tasks to the Job. If the Job does specify a Job
Manager Task, the Batch service creates the Job Manager Task when the Job
is created, and will try to schedule the Job Manager Task before
scheduling other Tasks in the Job. The Job Manager Task's typical purpose
is to control and/or monitor Job execution, for example by deciding what
additional Tasks to run, determining when the work is complete, etc.
(However, a Job Manager Task is not restricted to these activities - it is
a fully-fledged Task in the system and perform whatever actions are
required for the Job.) For example, a Job Manager Task might download a
file specified as a parameter, analyze the contents of that file and
submit additional Tasks based on those contents.
:type job_manager_task: ~azure.batch.models.JobManagerTask
:param job_preparation_task: The Job Preparation Task. If a Job has a Job
Preparation Task, the Batch service will run the Job Preparation Task on a
Node before starting any Tasks of that Job on that Compute Node.
:type job_preparation_task: ~azure.batch.models.JobPreparationTask
:param job_release_task: The Job Release Task. A Job Release Task cannot
be specified without also specifying a Job Preparation Task for the Job.
The Batch service runs the Job Release Task on the Nodes that have run the
Job Preparation Task. The primary purpose of the Job Release Task is to
undo changes to Compute Nodes made by the Job Preparation Task. Example
activities include deleting local files, or shutting down services that
were started as part of Job preparation.
:type job_release_task: ~azure.batch.models.JobReleaseTask
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all Tasks in
the Job (including the Job Manager, Job Preparation and Job Release
Tasks). Individual Tasks can override an environment setting specified
here by specifying the same setting name with a different value.
:type common_environment_settings:
list[~azure.batch.models.EnvironmentSetting]
:param pool_info: Required. The Pool on which the Batch service runs the
Job's Tasks.
:type pool_info: ~azure.batch.models.PoolInformation
:param on_all_tasks_complete: The action the Batch service should take
when all Tasks in the Job are in the completed state. Note that if a Job
contains no Tasks, then all Tasks are considered complete. This option is
therefore most commonly used with a Job Manager task; if you want to use
automatic Job termination without a Job Manager, you should initially set
onAllTasksComplete to noaction and update the Job properties to set
onAllTasksComplete to terminatejob once you have finished adding Tasks.
The default is noaction. Possible values include: 'noAction',
'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
:param on_task_failure: The action the Batch service should take when any
Task in the Job fails. A Task is considered to have failed if has a
failureInfo. A failureInfo is set if the Task completes with a non-zero
exit code after exhausting its retry count, or if there was an error
starting the Task, for example due to a resource file download error. The
default is noaction. Possible values include: 'noAction',
'performExitOptionsJobAction'
:type on_task_failure: str or ~azure.batch.models.OnTaskFailure
:param metadata: A list of name-value pairs associated with the Job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param uses_task_dependencies: Whether Tasks in the Job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param network_configuration: The network configuration for the Job.
:type network_configuration: ~azure.batch.models.JobNetworkConfiguration
"""
_validation = {
'id': {'required': True},
'pool_info': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'},
}
def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, network_configuration=None, **kwargs) -> None:
super(JobAddParameter, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.priority = priority
self.constraints = constraints
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.pool_info = pool_info
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.metadata = metadata
self.uses_task_dependencies = uses_task_dependencies
self.network_configuration = network_configuration
[docs]class JobConstraints(Model):
"""The execution constraints for a Job.
:param max_wall_clock_time: The maximum elapsed time that the Job may run,
measured from the time the Job is created. If the Job does not complete
within the time limit, the Batch service terminates it and any Tasks that
are still running. In this case, the termination reason will be
MaxWallClockTimeExpiry. If this property is not specified, there is no
time limit on how long the Job may run.
:type max_wall_clock_time: timedelta
:param max_task_retry_count: The maximum number of times each Task may be
retried. The Batch service retries a Task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try each Task once, and may then retry up to this
limit. For example, if the maximum retry count is 3, Batch tries a Task up
to 4 times (one initial try and 3 retries). If the maximum retry count is
0, the Batch service does not retry Tasks. If the maximum retry count is
-1, the Batch service retries Tasks without limit. The default value is 0
(no retries).
:type max_task_retry_count: int
"""
_attribute_map = {
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
}
def __init__(self, *, max_wall_clock_time=None, max_task_retry_count: int=None, **kwargs) -> None:
super(JobConstraints, self).__init__(**kwargs)
self.max_wall_clock_time = max_wall_clock_time
self.max_task_retry_count = max_task_retry_count
[docs]class JobDeleteOptions(Model):
"""Additional parameters for delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobDeleteOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobDisableOptions(Model):
"""Additional parameters for disable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobDisableOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobDisableParameter(Model):
"""Options when disabling a Job.
All required parameters must be populated in order to send to Azure.
:param disable_tasks: Required. What to do with active Tasks associated
with the Job. Possible values include: 'requeue', 'terminate', 'wait'
:type disable_tasks: str or ~azure.batch.models.DisableJobOption
"""
_validation = {
'disable_tasks': {'required': True},
}
_attribute_map = {
'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'},
}
def __init__(self, *, disable_tasks, **kwargs) -> None:
super(JobDisableParameter, self).__init__(**kwargs)
self.disable_tasks = disable_tasks
[docs]class JobEnableOptions(Model):
"""Additional parameters for enable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobEnableOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobGetAllLifetimeStatisticsOptions(Model):
"""Additional parameters for get_all_lifetime_statistics operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobGetOptions, self).__init__(**kwargs)
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobGetTaskCountsOptions(Model):
"""Additional parameters for get_task_counts operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobGetTaskCountsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobListFromJobScheduleOptions(Model):
"""Additional parameters for list_from_job_schedule operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Jobs can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobListFromJobScheduleOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.expand = expand
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobListOptions(Model):
"""Additional parameters for list operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Jobs can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobListOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.expand = expand
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobListPreparationAndReleaseTaskStatusOptions(Model):
"""Additional parameters for list_preparation_and_release_task_status
operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Tasks can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobManagerTask(Model):
"""Specifies details of a Job Manager Task.
The Job Manager Task is automatically started when the Job is created. The
Batch service tries to schedule the Job Manager Task before any other Tasks
in the Job. When shrinking a Pool, the Batch service tries to preserve
Nodes where Job Manager Tasks are running for as long as possible (that is,
Compute Nodes running 'normal' Tasks are removed before Compute Nodes
running Job Manager Tasks). When a Job Manager Task fails and needs to be
restarted, the system tries to schedule it at the highest priority. If
there are no idle Compute Nodes available, the system may terminate one of
the running Tasks in the Pool and return it to the queue in order to make
room for the Job Manager Task to restart. Note that a Job Manager Task in
one Job does not have priority over Tasks in other Jobs. Across Jobs, only
Job level priorities are observed. For example, if a Job Manager in a
priority 0 Job needs to be restarted, it will not displace Tasks of a
priority 1 Job. Batch will retry Tasks when a recovery operation is
triggered on a Node. Examples of recovery operations include (but are not
limited to) when an unhealthy Node is rebooted or a Compute Node
disappeared due to host failure. Retries due to recovery operations are
independent of and are not counted against the maxTaskRetryCount. Even if
the maxTaskRetryCount is 0, an internal retry due to a recovery operation
may occur. Because of this, all Tasks should be idempotent. This means
Tasks need to tolerate being interrupted and restarted without causing any
corruption or duplicate data. The best practice for long running Tasks is
to use some form of checkpointing.
All required parameters must be populated in order to send to Azure.
:param id: Required. A string that uniquely identifies the Job Manager
Task within the Job. The ID can contain any combination of alphanumeric
characters including hyphens and underscores and cannot contain more than
64 characters.
:type id: str
:param display_name: The display name of the Job Manager Task. It need not
be unique and can contain any Unicode characters up to a maximum length of
1024.
:type display_name: str
:param command_line: Required. The command line of the Job Manager Task.
The command line does not run under a shell, and therefore cannot take
advantage of shell features such as environment variable expansion. If you
want to take advantage of such features, you should invoke the shell in
the command line, for example using "cmd /c MyCommand" in Windows or
"/bin/sh -c MyCommand" in Linux. If the command line refers to file paths,
it should use a relative path (relative to the Task working directory), or
use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
Job Manager Task runs. If the Pool that will run this Task has
containerConfiguration set, this must be set as well. If the Pool that
will run this Task doesn't have containerConfiguration set, this must not
be set. When this is specified, all directories recursively below the
AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node)
are mapped into the container, all Task environment variables are mapped
into the container, and the Task command line is executed in the
container. Files produced in the container outside of
AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning
that Batch file APIs will not be able to access those files.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line. Files listed
under this element are located in the Task's working directory. There is a
maximum size for the list of resource files. When the max size is
exceeded, the request will fail and the response error code will be
RequestEntityTooLarge. If this occurs, the collection of ResourceFiles
must be reduced in size. This can be achieved using .zip files,
Application Packages, or Docker Containers.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param output_files: A list of files that the Batch service will upload
from the Compute Node after running the command line. For multi-instance
Tasks, the files will only be uploaded from the Compute Node on which the
primary Task is executed.
:type output_files: list[~azure.batch.models.OutputFile]
:param environment_settings: A list of environment variable settings for
the Job Manager Task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param constraints: Constraints that apply to the Job Manager Task.
:type constraints: ~azure.batch.models.TaskConstraints
:param kill_job_on_completion: Whether completion of the Job Manager Task
signifies completion of the entire Job. If true, when the Job Manager Task
completes, the Batch service marks the Job as complete. If any Tasks are
still running at this time (other than Job Release), those Tasks are
terminated. If false, the completion of the Job Manager Task does not
affect the Job status. In this case, you should either use the
onAllTasksComplete attribute to terminate the Job, or have a client or
user terminate the Job explicitly. An example of this is if the Job
Manager creates a set of Tasks but then takes no further role in their
execution. The default value is true. If you are using the
onAllTasksComplete and onTaskFailure attributes to control Job lifetime,
and using the Job Manager Task only to create the Tasks for the Job (not
to monitor progress), then it is important to set killJobOnCompletion to
false.
:type kill_job_on_completion: bool
:param user_identity: The user identity under which the Job Manager Task
runs. If omitted, the Task runs as a non-administrative user unique to the
Task.
:type user_identity: ~azure.batch.models.UserIdentity
:param run_exclusive: Whether the Job Manager Task requires exclusive use
of the Compute Node where it runs. If true, no other Tasks will run on the
same Node for as long as the Job Manager is running. If false, other Tasks
can run simultaneously with the Job Manager on a Compute Node. The Job
Manager Task counts normally against the Compute Node's concurrent Task
limit, so this is only relevant if the Compute Node allows multiple
concurrent Tasks. The default value is true.
:type run_exclusive: bool
:param application_package_references: A list of Application Packages that
the Batch service will deploy to the Compute Node before running the
command line. Application Packages are downloaded and deployed to a shared
directory, not the Task working directory. Therefore, if a referenced
Application Package is already on the Compute Node, and is up to date,
then it is not re-downloaded; the existing copy on the Compute Node is
used. If a referenced Application Package cannot be installed, for example
because the package has been deleted or because download failed, the Task
fails.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param authentication_token_settings: The settings for an authentication
token that the Task can use to perform Batch service operations. If this
property is set, the Batch service provides the Task with an
authentication token which can be used to authenticate Batch service
operations without requiring an Account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the Task can carry out using the token depend on the settings. For
example, a Task can request Job permissions in order to add other Tasks to
the Job, or check the status of the Job or of other Tasks under the Job.
:type authentication_token_settings:
~azure.batch.models.AuthenticationTokenSettings
:param allow_low_priority_node: Whether the Job Manager Task may run on a
low-priority Compute Node. The default value is true.
:type allow_low_priority_node: bool
"""
_validation = {
'id': {'required': True},
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'run_exclusive': {'key': 'runExclusive', 'type': 'bool'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'},
}
def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node: bool=None, **kwargs) -> None:
super(JobManagerTask, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.constraints = constraints
self.kill_job_on_completion = kill_job_on_completion
self.user_identity = user_identity
self.run_exclusive = run_exclusive
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
self.allow_low_priority_node = allow_low_priority_node
[docs]class JobNetworkConfiguration(Model):
"""The network configuration for the Job.
All required parameters must be populated in order to send to Azure.
:param subnet_id: Required. The ARM resource identifier of the virtual
network subnet which Compute Nodes running Tasks from the Job will join
for the duration of the Task. This will only work with a
VirtualMachineConfiguration Pool. The virtual network must be in the same
region and subscription as the Azure Batch Account. The specified subnet
should have enough free IP addresses to accommodate the number of Compute
Nodes which will run Tasks from the Job. This can be up to the number of
Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal
must have the 'Classic Virtual Machine Contributor' Role-Based Access
Control (RBAC) role for the specified VNet so that Azure Batch service can
schedule Tasks on the Nodes. This can be verified by checking if the
specified VNet has any associated Network Security Groups (NSG). If
communication to the Nodes in the specified subnet is denied by an NSG,
then the Batch service will set the state of the Compute Nodes to
unusable. This is of the form
/subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}.
If the specified VNet has any associated Network Security Groups (NSG),
then a few reserved system ports must be enabled for inbound communication
from the Azure Batch service. For Pools created with a Virtual Machine
configuration, enable ports 29876 and 29877, as well as port 22 for Linux
and port 3389 for Windows. Port 443 is also required to be open for
outbound connections for communications to Azure Storage. For more details
see:
https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
:type subnet_id: str
"""
_validation = {
'subnet_id': {'required': True},
}
_attribute_map = {
'subnet_id': {'key': 'subnetId', 'type': 'str'},
}
def __init__(self, *, subnet_id: str, **kwargs) -> None:
super(JobNetworkConfiguration, self).__init__(**kwargs)
self.subnet_id = subnet_id
[docs]class JobPatchOptions(Model):
"""Additional parameters for patch operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobPatchOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobPatchParameter(Model):
"""The set of changes to be made to a Job.
:param priority: The priority of the Job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. If omitted, the priority of the Job is left unchanged.
:type priority: int
:param on_all_tasks_complete: The action the Batch service should take
when all Tasks in the Job are in the completed state. If omitted, the
completion behavior is left unchanged. You may not change the value from
terminatejob to noaction - that is, once you have engaged automatic Job
termination, you cannot turn it off again. If you try to do this, the
request fails with an 'invalid property value' error response; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
Possible values include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
:param constraints: The execution constraints for the Job. If omitted, the
existing execution constraints are left unchanged.
:type constraints: ~azure.batch.models.JobConstraints
:param pool_info: The Pool on which the Batch service runs the Job's
Tasks. You may change the Pool for a Job only when the Job is disabled.
The Patch Job call will fail if you include the poolInfo element and the
Job is not disabled. If you specify an autoPoolSpecification in the
poolInfo, only the keepAlive property of the autoPoolSpecification can be
updated, and then only if the autoPoolSpecification has a
poolLifetimeOption of Job (other job properties can be updated as normal).
If omitted, the Job continues to run on its current Pool.
:type pool_info: ~azure.batch.models.PoolInformation
:param metadata: A list of name-value pairs associated with the Job as
metadata. If omitted, the existing Job metadata is left unchanged.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, priority: int=None, on_all_tasks_complete=None, constraints=None, pool_info=None, metadata=None, **kwargs) -> None:
super(JobPatchParameter, self).__init__(**kwargs)
self.priority = priority
self.on_all_tasks_complete = on_all_tasks_complete
self.constraints = constraints
self.pool_info = pool_info
self.metadata = metadata
[docs]class JobPreparationTask(Model):
"""A Job Preparation Task to run before any Tasks of the Job on any given
Compute Node.
You can use Job Preparation to prepare a Node to run Tasks for the Job.
Activities commonly performed in Job Preparation include: Downloading
common resource files used by all the Tasks in the Job. The Job Preparation
Task can download these common resource files to the shared location on the
Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the
Node so that all Tasks of that Job can communicate with it. If the Job
Preparation Task fails (that is, exhausts its retry count before exiting
with exit code 0), Batch will not run Tasks of this Job on the Node. The
Compute Node remains ineligible to run Tasks of this Job until it is
reimaged. The Compute Node remains active and can be used for other Jobs.
The Job Preparation Task can run multiple times on the same Node.
Therefore, you should write the Job Preparation Task to handle
re-execution. If the Node is rebooted, the Job Preparation Task is run
again on the Compute Node before scheduling any other Task of the Job, if
rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did
not previously complete. If the Node is reimaged, the Job Preparation Task
is run again before scheduling any Task of the Job. Batch will retry Tasks
when a recovery operation is triggered on a Node. Examples of recovery
operations include (but are not limited to) when an unhealthy Node is
rebooted or a Compute Node disappeared due to host failure. Retries due to
recovery operations are independent of and are not counted against the
maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry
due to a recovery operation may occur. Because of this, all Tasks should be
idempotent. This means Tasks need to tolerate being interrupted and
restarted without causing any corruption or duplicate data. The best
practice for long running Tasks is to use some form of checkpointing.
All required parameters must be populated in order to send to Azure.
:param id: A string that uniquely identifies the Job Preparation Task
within the Job. The ID can contain any combination of alphanumeric
characters including hyphens and underscores and cannot contain more than
64 characters. If you do not specify this property, the Batch service
assigns a default value of 'jobpreparation'. No other Task in the Job can
have the same ID as the Job Preparation Task. If you try to submit a Task
with the same id, the Batch service rejects the request with error code
TaskIdSameAsJobPreparationTask; if you are calling the REST API directly,
the HTTP status code is 409 (Conflict).
:type id: str
:param command_line: Required. The command line of the Job Preparation
Task. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux. If the command line refers to file
paths, it should use a relative path (relative to the Task working
directory), or use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
Job Preparation Task runs. When this is specified, all directories
recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch
directories on the node) are mapped into the container, all Task
environment variables are mapped into the container, and the Task command
line is executed in the container. Files produced in the container outside
of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning
that Batch file APIs will not be able to access those files.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line. Files listed
under this element are located in the Task's working directory. There is
a maximum size for the list of resource files. When the max size is
exceeded, the request will fail and the response error code will be
RequestEntityTooLarge. If this occurs, the collection of ResourceFiles
must be reduced in size. This can be achieved using .zip files,
Application Packages, or Docker Containers.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the Job Preparation Task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param constraints: Constraints that apply to the Job Preparation Task.
:type constraints: ~azure.batch.models.TaskConstraints
:param wait_for_success: Whether the Batch service should wait for the Job
Preparation Task to complete successfully before scheduling any other
Tasks of the Job on the Compute Node. A Job Preparation Task has completed
successfully if it exits with exit code 0. If true and the Job Preparation
Task fails on a Node, the Batch service retries the Job Preparation Task
up to its maximum retry count (as specified in the constraints element).
If the Task has still not completed successfully after all retries, then
the Batch service will not schedule Tasks of the Job to the Node. The Node
remains active and eligible to run Tasks of other Jobs. If false, the
Batch service will not wait for the Job Preparation Task to complete. In
this case, other Tasks of the Job can start executing on the Compute Node
while the Job Preparation Task is still running; and even if the Job
Preparation Task fails, new Tasks will continue to be scheduled on the
Compute Node. The default value is true.
:type wait_for_success: bool
:param user_identity: The user identity under which the Job Preparation
Task runs. If omitted, the Task runs as a non-administrative user unique
to the Task on Windows Compute Nodes, or a non-administrative user unique
to the Pool on Linux Compute Nodes.
:type user_identity: ~azure.batch.models.UserIdentity
:param rerun_on_node_reboot_after_success: Whether the Batch service
should rerun the Job Preparation Task after a Compute Node reboots. The
Job Preparation Task is always rerun if a Compute Node is reimaged, or if
the Job Preparation Task did not complete (e.g. because the reboot
occurred while the Task was running). Therefore, you should always write a
Job Preparation Task to be idempotent and to behave correctly if run
multiple times. The default value is true.
:type rerun_on_node_reboot_after_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'},
}
def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None, rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None:
super(JobPreparationTask, self).__init__(**kwargs)
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.constraints = constraints
self.wait_for_success = wait_for_success
self.user_identity = user_identity
self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success
[docs]class JobReleaseTask(Model):
"""A Job Release Task to run on Job completion on any Compute Node where the
Job has run.
The Job Release Task runs when the Job ends, because of one of the
following: The user calls the Terminate Job API, or the Delete Job API
while the Job is still active, the Job's maximum wall clock time constraint
is reached, and the Job is still active, or the Job's Job Manager Task
completed, and the Job is configured to terminate when the Job Manager
completes. The Job Release Task runs on each Node where Tasks of the Job
have run and the Job Preparation Task ran and completed. If you reimage a
Node after it has run the Job Preparation Task, and the Job ends without
any further Tasks of the Job running on that Node (and hence the Job
Preparation Task does not re-run), then the Job Release Task does not run
on that Compute Node. If a Node reboots while the Job Release Task is still
running, the Job Release Task runs again when the Compute Node starts up.
The Job is not marked as complete until all Job Release Tasks have
completed. The Job Release Task runs in the background. It does not occupy
a scheduling slot; that is, it does not count towards the maxTasksPerNode
limit specified on the Pool.
All required parameters must be populated in order to send to Azure.
:param id: A string that uniquely identifies the Job Release Task within
the Job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters. If you do not specify this property, the Batch service assigns
a default value of 'jobrelease'. No other Task in the Job can have the
same ID as the Job Release Task. If you try to submit a Task with the same
id, the Batch service rejects the request with error code
TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the
HTTP status code is 409 (Conflict).
:type id: str
:param command_line: Required. The command line of the Job Release Task.
The command line does not run under a shell, and therefore cannot take
advantage of shell features such as environment variable expansion. If you
want to take advantage of such features, you should invoke the shell in
the command line, for example using "cmd /c MyCommand" in Windows or
"/bin/sh -c MyCommand" in Linux. If the command line refers to file paths,
it should use a relative path (relative to the Task working directory), or
use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
Job Release Task runs. When this is specified, all directories recursively
below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on
the node) are mapped into the container, all Task environment variables
are mapped into the container, and the Task command line is executed in
the container. Files produced in the container outside of
AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning
that Batch file APIs will not be able to access those files.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line. There is a
maximum size for the list of resource files. When the max size is
exceeded, the request will fail and the response error code will be
RequestEntityTooLarge. If this occurs, the collection of ResourceFiles
must be reduced in size. This can be achieved using .zip files,
Application Packages, or Docker Containers. Files listed under this
element are located in the Task's working directory.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the Job Release Task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param max_wall_clock_time: The maximum elapsed time that the Job Release
Task may run on a given Compute Node, measured from the time the Task
starts. If the Task does not complete within the time limit, the Batch
service terminates it. The default value is 15 minutes. You may not
specify a timeout longer than 15 minutes. If you do, the Batch service
rejects it with an error; if you are calling the REST API directly, the
HTTP status code is 400 (Bad Request).
:type max_wall_clock_time: timedelta
:param retention_time: The minimum time to retain the Task directory for
the Job Release Task on the Compute Node. After this time, the Batch
service may delete the Task directory and all its contents. The default is
7 days, i.e. the Task directory will be retained for 7 days unless the
Compute Node is removed or the Job is deleted.
:type retention_time: timedelta
:param user_identity: The user identity under which the Job Release Task
runs. If omitted, the Task runs as a non-administrative user unique to the
Task.
:type user_identity: ~azure.batch.models.UserIdentity
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'retention_time': {'key': 'retentionTime', 'type': 'duration'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
}
def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None, **kwargs) -> None:
super(JobReleaseTask, self).__init__(**kwargs)
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.max_wall_clock_time = max_wall_clock_time
self.retention_time = retention_time
self.user_identity = user_identity
[docs]class JobScheduleAddOptions(Model):
"""Additional parameters for add operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobScheduleAddOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobScheduleAddParameter(Model):
"""A Job Schedule that allows recurring Jobs by specifying when to run Jobs
and a specification used to create each Job.
All required parameters must be populated in order to send to Azure.
:param id: Required. A string that uniquely identifies the schedule within
the Account. The ID can contain any combination of alphanumeric characters
including hyphens and underscores, and cannot contain more than 64
characters. The ID is case-preserving and case-insensitive (that is, you
may not have two IDs within an Account that differ only by case).
:type id: str
:param display_name: The display name for the schedule. The display name
need not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param schedule: Required. The schedule according to which Jobs will be
created.
:type schedule: ~azure.batch.models.Schedule
:param job_specification: Required. The details of the Jobs to be created
on this schedule.
:type job_specification: ~azure.batch.models.JobSpecification
:param metadata: A list of name-value pairs associated with the schedule
as metadata. The Batch service does not assign any meaning to metadata; it
is solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_validation = {
'id': {'required': True},
'schedule': {'required': True},
'job_specification': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, id: str, schedule, job_specification, display_name: str=None, metadata=None, **kwargs) -> None:
super(JobScheduleAddParameter, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.schedule = schedule
self.job_specification = job_specification
self.metadata = metadata
[docs]class JobScheduleDeleteOptions(Model):
"""Additional parameters for delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleDeleteOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleDisableOptions(Model):
"""Additional parameters for disable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleDisableOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleEnableOptions(Model):
"""Additional parameters for enable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleEnableOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleExistsOptions(Model):
"""Additional parameters for exists operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleExistsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleGetOptions, self).__init__(**kwargs)
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleListOptions(Model):
"""Additional parameters for list operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Job Schedules can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(JobScheduleListOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.expand = expand
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class JobSchedulePatchOptions(Model):
"""Additional parameters for patch operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobSchedulePatchOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobSchedulePatchParameter(Model):
"""The set of changes to be made to a Job Schedule.
:param schedule: The schedule according to which Jobs will be created. If
you do not specify this element, the existing schedule is left unchanged.
:type schedule: ~azure.batch.models.Schedule
:param job_specification: The details of the Jobs to be created on this
schedule. Updates affect only Jobs that are started after the update has
taken place. Any currently active Job continues with the older
specification.
:type job_specification: ~azure.batch.models.JobSpecification
:param metadata: A list of name-value pairs associated with the Job
Schedule as metadata. If you do not specify this element, existing
metadata is left unchanged.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_attribute_map = {
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, schedule=None, job_specification=None, metadata=None, **kwargs) -> None:
super(JobSchedulePatchParameter, self).__init__(**kwargs)
self.schedule = schedule
self.job_specification = job_specification
self.metadata = metadata
[docs]class JobScheduleStatistics(Model):
"""Resource usage statistics for a Job Schedule.
All required parameters must be populated in order to send to Azure.
:param url: Required. The URL of the statistics.
:type url: str
:param start_time: Required. The start time of the time range covered by
the statistics.
:type start_time: datetime
:param last_update_time: Required. The time at which the statistics were
last updated. All statistics are limited to the range between startTime
and lastUpdateTime.
:type last_update_time: datetime
:param user_cpu_time: Required. The total user mode CPU time (summed
across all cores and all Compute Nodes) consumed by all Tasks in all Jobs
created under the schedule.
:type user_cpu_time: timedelta
:param kernel_cpu_time: Required. The total kernel mode CPU time (summed
across all cores and all Compute Nodes) consumed by all Tasks in all Jobs
created under the schedule.
:type kernel_cpu_time: timedelta
:param wall_clock_time: Required. The total wall clock time of all the
Tasks in all the Jobs created under the schedule. The wall clock time is
the elapsed time from when the Task started running on a Compute Node to
when it finished (or to the last time the statistics were updated, if the
Task had not finished by then). If a Task was retried, this includes the
wall clock time of all the Task retries.
:type wall_clock_time: timedelta
:param read_iops: Required. The total number of disk read operations made
by all Tasks in all Jobs created under the schedule.
:type read_iops: long
:param write_iops: Required. The total number of disk write operations
made by all Tasks in all Jobs created under the schedule.
:type write_iops: long
:param read_io_gi_b: Required. The total gibibytes read from disk by all
Tasks in all Jobs created under the schedule.
:type read_io_gi_b: float
:param write_io_gi_b: Required. The total gibibytes written to disk by all
Tasks in all Jobs created under the schedule.
:type write_io_gi_b: float
:param num_succeeded_tasks: Required. The total number of Tasks
successfully completed during the given time range in Jobs created under
the schedule. A Task completes successfully if it returns exit code 0.
:type num_succeeded_tasks: long
:param num_failed_tasks: Required. The total number of Tasks that failed
during the given time range in Jobs created under the schedule. A Task
fails if it exhausts its maximum retry count without returning exit code
0.
:type num_failed_tasks: long
:param num_task_retries: Required. The total number of retries during the
given time range on all Tasks in all Jobs created under the schedule.
:type num_task_retries: long
:param wait_time: Required. The total wait time of all Tasks in all Jobs
created under the schedule. The wait time for a Task is defined as the
elapsed time between the creation of the Task and the start of Task
execution. (If the Task is retried due to failures, the wait time is the
time to the most recent Task execution.). This value is only reported in
the Account lifetime statistics; it is not included in the Job statistics.
:type wait_time: timedelta
"""
_validation = {
'url': {'required': True},
'start_time': {'required': True},
'last_update_time': {'required': True},
'user_cpu_time': {'required': True},
'kernel_cpu_time': {'required': True},
'wall_clock_time': {'required': True},
'read_iops': {'required': True},
'write_iops': {'required': True},
'read_io_gi_b': {'required': True},
'write_io_gi_b': {'required': True},
'num_succeeded_tasks': {'required': True},
'num_failed_tasks': {'required': True},
'num_task_retries': {'required': True},
'wait_time': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'},
'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'},
'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'},
'read_iops': {'key': 'readIOps', 'type': 'long'},
'write_iops': {'key': 'writeIOps', 'type': 'long'},
'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'},
'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'},
'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'},
'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'},
'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'},
'wait_time': {'key': 'waitTime', 'type': 'duration'},
}
def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None:
super(JobScheduleStatistics, self).__init__(**kwargs)
self.url = url
self.start_time = start_time
self.last_update_time = last_update_time
self.user_cpu_time = user_cpu_time
self.kernel_cpu_time = kernel_cpu_time
self.wall_clock_time = wall_clock_time
self.read_iops = read_iops
self.write_iops = write_iops
self.read_io_gi_b = read_io_gi_b
self.write_io_gi_b = write_io_gi_b
self.num_succeeded_tasks = num_succeeded_tasks
self.num_failed_tasks = num_failed_tasks
self.num_task_retries = num_task_retries
self.wait_time = wait_time
[docs]class JobScheduleTerminateOptions(Model):
"""Additional parameters for terminate operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleTerminateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleUpdateOptions(Model):
"""Additional parameters for update operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobScheduleUpdateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobScheduleUpdateParameter(Model):
"""The set of changes to be made to a Job Schedule.
All required parameters must be populated in order to send to Azure.
:param schedule: Required. The schedule according to which Jobs will be
created. If you do not specify this element, it is equivalent to passing
the default schedule: that is, a single Job scheduled to run immediately.
:type schedule: ~azure.batch.models.Schedule
:param job_specification: Required. Details of the Jobs to be created on
this schedule. Updates affect only Jobs that are started after the update
has taken place. Any currently active Job continues with the older
specification.
:type job_specification: ~azure.batch.models.JobSpecification
:param metadata: A list of name-value pairs associated with the Job
Schedule as metadata. If you do not specify this element, it takes the
default value of an empty list; in effect, any existing metadata is
deleted.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_validation = {
'schedule': {'required': True},
'job_specification': {'required': True},
}
_attribute_map = {
'schedule': {'key': 'schedule', 'type': 'Schedule'},
'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, schedule, job_specification, metadata=None, **kwargs) -> None:
super(JobScheduleUpdateParameter, self).__init__(**kwargs)
self.schedule = schedule
self.job_specification = job_specification
self.metadata = metadata
[docs]class JobSchedulingError(Model):
"""An error encountered by the Batch service when scheduling a Job.
All required parameters must be populated in order to send to Azure.
:param category: Required. The category of the Job scheduling error.
Possible values include: 'userError', 'serverError'
:type category: str or ~azure.batch.models.ErrorCategory
:param code: An identifier for the Job scheduling error. Codes are
invariant and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the Job scheduling error, intended to
be suitable for display in a user interface.
:type message: str
:param details: A list of additional error details related to the
scheduling error.
:type details: list[~azure.batch.models.NameValuePair]
"""
_validation = {
'category': {'required': True},
}
_attribute_map = {
'category': {'key': 'category', 'type': 'ErrorCategory'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[NameValuePair]'},
}
def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None:
super(JobSchedulingError, self).__init__(**kwargs)
self.category = category
self.code = code
self.message = message
self.details = details
[docs]class JobSpecification(Model):
"""Specifies details of the Jobs to be created on a schedule.
All required parameters must be populated in order to send to Azure.
:param priority: The priority of Jobs created under this schedule.
Priority values can range from -1000 to 1000, with -1000 being the lowest
priority and 1000 being the highest priority. The default value is 0. This
priority is used as the default for all Jobs under the Job Schedule. You
can update a Job's priority after it has been created using by using the
update Job API.
:type priority: int
:param display_name: The display name for Jobs created under this
schedule. The name need not be unique and can contain any Unicode
characters up to a maximum length of 1024.
:type display_name: str
:param uses_task_dependencies: Whether Tasks in the Job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param on_all_tasks_complete: The action the Batch service should take
when all Tasks in a Job created under this schedule are in the completed
state. Note that if a Job contains no Tasks, then all Tasks are considered
complete. This option is therefore most commonly used with a Job Manager
task; if you want to use automatic Job termination without a Job Manager,
you should initially set onAllTasksComplete to noaction and update the Job
properties to set onAllTasksComplete to terminatejob once you have
finished adding Tasks. The default is noaction. Possible values include:
'noAction', 'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
:param on_task_failure: The action the Batch service should take when any
Task fails in a Job created under this schedule. A Task is considered to
have failed if it have failed if has a failureInfo. A failureInfo is set
if the Task completes with a non-zero exit code after exhausting its retry
count, or if there was an error starting the Task, for example due to a
resource file download error. The default is noaction. Possible values
include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or ~azure.batch.models.OnTaskFailure
:param network_configuration: The network configuration for the Job.
:type network_configuration: ~azure.batch.models.JobNetworkConfiguration
:param constraints: The execution constraints for Jobs created under this
schedule.
:type constraints: ~azure.batch.models.JobConstraints
:param job_manager_task: The details of a Job Manager Task to be launched
when a Job is started under this schedule. If the Job does not specify a
Job Manager Task, the user must explicitly add Tasks to the Job using the
Task API. If the Job does specify a Job Manager Task, the Batch service
creates the Job Manager Task when the Job is created, and will try to
schedule the Job Manager Task before scheduling other Tasks in the Job.
:type job_manager_task: ~azure.batch.models.JobManagerTask
:param job_preparation_task: The Job Preparation Task for Jobs created
under this schedule. If a Job has a Job Preparation Task, the Batch
service will run the Job Preparation Task on a Node before starting any
Tasks of that Job on that Compute Node.
:type job_preparation_task: ~azure.batch.models.JobPreparationTask
:param job_release_task: The Job Release Task for Jobs created under this
schedule. The primary purpose of the Job Release Task is to undo changes
to Nodes made by the Job Preparation Task. Example activities include
deleting local files, or shutting down services that were started as part
of Job preparation. A Job Release Task cannot be specified without also
specifying a Job Preparation Task for the Job. The Batch service runs the
Job Release Task on the Compute Nodes that have run the Job Preparation
Task.
:type job_release_task: ~azure.batch.models.JobReleaseTask
:param common_environment_settings: A list of common environment variable
settings. These environment variables are set for all Tasks in Jobs
created under this schedule (including the Job Manager, Job Preparation
and Job Release Tasks). Individual Tasks can override an environment
setting specified here by specifying the same setting name with a
different value.
:type common_environment_settings:
list[~azure.batch.models.EnvironmentSetting]
:param pool_info: Required. The Pool on which the Batch service runs the
Tasks of Jobs created under this schedule.
:type pool_info: ~azure.batch.models.PoolInformation
:param metadata: A list of name-value pairs associated with each Job
created under this schedule as metadata. The Batch service does not assign
any meaning to metadata; it is solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_validation = {
'pool_info': {'required': True},
}
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'display_name': {'key': 'displayName', 'type': 'str'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None:
super(JobSpecification, self).__init__(**kwargs)
self.priority = priority
self.display_name = display_name
self.uses_task_dependencies = uses_task_dependencies
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.network_configuration = network_configuration
self.constraints = constraints
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.pool_info = pool_info
self.metadata = metadata
[docs]class JobStatistics(Model):
"""Resource usage statistics for a Job.
All required parameters must be populated in order to send to Azure.
:param url: Required. The URL of the statistics.
:type url: str
:param start_time: Required. The start time of the time range covered by
the statistics.
:type start_time: datetime
:param last_update_time: Required. The time at which the statistics were
last updated. All statistics are limited to the range between startTime
and lastUpdateTime.
:type last_update_time: datetime
:param user_cpu_time: Required. The total user mode CPU time (summed
across all cores and all Compute Nodes) consumed by all Tasks in the Job.
:type user_cpu_time: timedelta
:param kernel_cpu_time: Required. The total kernel mode CPU time (summed
across all cores and all Compute Nodes) consumed by all Tasks in the Job.
:type kernel_cpu_time: timedelta
:param wall_clock_time: Required. The total wall clock time of all Tasks
in the Job. The wall clock time is the elapsed time from when the Task
started running on a Compute Node to when it finished (or to the last time
the statistics were updated, if the Task had not finished by then). If a
Task was retried, this includes the wall clock time of all the Task
retries.
:type wall_clock_time: timedelta
:param read_iops: Required. The total number of disk read operations made
by all Tasks in the Job.
:type read_iops: long
:param write_iops: Required. The total number of disk write operations
made by all Tasks in the Job.
:type write_iops: long
:param read_io_gi_b: Required. The total amount of data in GiB read from
disk by all Tasks in the Job.
:type read_io_gi_b: float
:param write_io_gi_b: Required. The total amount of data in GiB written to
disk by all Tasks in the Job.
:type write_io_gi_b: float
:param num_succeeded_tasks: Required. The total number of Tasks
successfully completed in the Job during the given time range. A Task
completes successfully if it returns exit code 0.
:type num_succeeded_tasks: long
:param num_failed_tasks: Required. The total number of Tasks in the Job
that failed during the given time range. A Task fails if it exhausts its
maximum retry count without returning exit code 0.
:type num_failed_tasks: long
:param num_task_retries: Required. The total number of retries on all the
Tasks in the Job during the given time range.
:type num_task_retries: long
:param wait_time: Required. The total wait time of all Tasks in the Job.
The wait time for a Task is defined as the elapsed time between the
creation of the Task and the start of Task execution. (If the Task is
retried due to failures, the wait time is the time to the most recent Task
execution.) This value is only reported in the Account lifetime
statistics; it is not included in the Job statistics.
:type wait_time: timedelta
"""
_validation = {
'url': {'required': True},
'start_time': {'required': True},
'last_update_time': {'required': True},
'user_cpu_time': {'required': True},
'kernel_cpu_time': {'required': True},
'wall_clock_time': {'required': True},
'read_iops': {'required': True},
'write_iops': {'required': True},
'read_io_gi_b': {'required': True},
'write_io_gi_b': {'required': True},
'num_succeeded_tasks': {'required': True},
'num_failed_tasks': {'required': True},
'num_task_retries': {'required': True},
'wait_time': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'},
'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'},
'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'},
'read_iops': {'key': 'readIOps', 'type': 'long'},
'write_iops': {'key': 'writeIOps', 'type': 'long'},
'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'},
'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'},
'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'},
'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'},
'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'},
'wait_time': {'key': 'waitTime', 'type': 'duration'},
}
def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None:
super(JobStatistics, self).__init__(**kwargs)
self.url = url
self.start_time = start_time
self.last_update_time = last_update_time
self.user_cpu_time = user_cpu_time
self.kernel_cpu_time = kernel_cpu_time
self.wall_clock_time = wall_clock_time
self.read_iops = read_iops
self.write_iops = write_iops
self.read_io_gi_b = read_io_gi_b
self.write_io_gi_b = write_io_gi_b
self.num_succeeded_tasks = num_succeeded_tasks
self.num_failed_tasks = num_failed_tasks
self.num_task_retries = num_task_retries
self.wait_time = wait_time
[docs]class JobTerminateOptions(Model):
"""Additional parameters for terminate operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobTerminateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobTerminateParameter(Model):
"""Options when terminating a Job.
:param terminate_reason: The text you want to appear as the Job's
TerminateReason. The default is 'UserTerminate'.
:type terminate_reason: str
"""
_attribute_map = {
'terminate_reason': {'key': 'terminateReason', 'type': 'str'},
}
def __init__(self, *, terminate_reason: str=None, **kwargs) -> None:
super(JobTerminateParameter, self).__init__(**kwargs)
self.terminate_reason = terminate_reason
[docs]class JobUpdateOptions(Model):
"""Additional parameters for update operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(JobUpdateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class JobUpdateParameter(Model):
"""The set of changes to be made to a Job.
All required parameters must be populated in order to send to Azure.
:param priority: The priority of the Job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. If omitted, it is set to the default value 0.
:type priority: int
:param constraints: The execution constraints for the Job. If omitted, the
constraints are cleared.
:type constraints: ~azure.batch.models.JobConstraints
:param pool_info: Required. The Pool on which the Batch service runs the
Job's Tasks. You may change the Pool for a Job only when the Job is
disabled. The Update Job call will fail if you include the poolInfo
element and the Job is not disabled. If you specify an
autoPoolSpecification in the poolInfo, only the keepAlive property of the
autoPoolSpecification can be updated, and then only if the
autoPoolSpecification has a poolLifetimeOption of Job (other job
properties can be updated as normal).
:type pool_info: ~azure.batch.models.PoolInformation
:param metadata: A list of name-value pairs associated with the Job as
metadata. If omitted, it takes the default value of an empty list; in
effect, any existing metadata is deleted.
:type metadata: list[~azure.batch.models.MetadataItem]
:param on_all_tasks_complete: The action the Batch service should take
when all Tasks in the Job are in the completed state. If omitted, the
completion behavior is set to noaction. If the current value is
terminatejob, this is an error because a Job's completion behavior may not
be changed from terminatejob to noaction. You may not change the value
from terminatejob to noaction - that is, once you have engaged automatic
Job termination, you cannot turn it off again. If you try to do this, the
request fails and Batch returns status code 400 (Bad Request) and an
'invalid property value' error response. If you do not specify this
element in a PUT request, it is equivalent to passing noaction. This is an
error if the current value is terminatejob. Possible values include:
'noAction', 'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
"""
_validation = {
'pool_info': {'required': True},
}
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
}
def __init__(self, *, pool_info, priority: int=None, constraints=None, metadata=None, on_all_tasks_complete=None, **kwargs) -> None:
super(JobUpdateParameter, self).__init__(**kwargs)
self.priority = priority
self.constraints = constraints
self.pool_info = pool_info
self.metadata = metadata
self.on_all_tasks_complete = on_all_tasks_complete
[docs]class LinuxUserConfiguration(Model):
"""Properties used to create a user Account on a Linux Compute Node.
:param uid: The user ID of the user Account. The uid and gid properties
must be specified together or not at all. If not specified the underlying
operating system picks the uid.
:type uid: int
:param gid: The group ID for the user Account. The uid and gid properties
must be specified together or not at all. If not specified the underlying
operating system picks the gid.
:type gid: int
:param ssh_private_key: The SSH private key for the user Account. The
private key must not be password protected. The private key is used to
automatically configure asymmetric-key based authentication for SSH
between Compute Nodes in a Linux Pool when the Pool's
enableInterNodeCommunication property is true (it is ignored if
enableInterNodeCommunication is false). It does this by placing the key
pair into the user's .ssh directory. If not specified, password-less SSH
is not configured between Compute Nodes (no modification of the user's
.ssh directory is done).
:type ssh_private_key: str
"""
_attribute_map = {
'uid': {'key': 'uid', 'type': 'int'},
'gid': {'key': 'gid', 'type': 'int'},
'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'},
}
def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None:
super(LinuxUserConfiguration, self).__init__(**kwargs)
self.uid = uid
self.gid = gid
self.ssh_private_key = ssh_private_key
[docs]class MountConfiguration(Model):
"""The file system to mount on each node.
:param azure_blob_file_system_configuration: The Azure Storage Container
to mount using blob FUSE on each node. This property is mutually exclusive
with all other properties.
:type azure_blob_file_system_configuration:
~azure.batch.models.AzureBlobFileSystemConfiguration
:param nfs_mount_configuration: The NFS file system to mount on each node.
This property is mutually exclusive with all other properties.
:type nfs_mount_configuration: ~azure.batch.models.NFSMountConfiguration
:param cifs_mount_configuration: The CIFS/SMB file system to mount on each
node. This property is mutually exclusive with all other properties.
:type cifs_mount_configuration: ~azure.batch.models.CIFSMountConfiguration
:param azure_file_share_configuration: The Azure File Share to mount on
each node. This property is mutually exclusive with all other properties.
:type azure_file_share_configuration:
~azure.batch.models.AzureFileShareConfiguration
"""
_attribute_map = {
'azure_blob_file_system_configuration': {'key': 'azureBlobFileSystemConfiguration', 'type': 'AzureBlobFileSystemConfiguration'},
'nfs_mount_configuration': {'key': 'nfsMountConfiguration', 'type': 'NFSMountConfiguration'},
'cifs_mount_configuration': {'key': 'cifsMountConfiguration', 'type': 'CIFSMountConfiguration'},
'azure_file_share_configuration': {'key': 'azureFileShareConfiguration', 'type': 'AzureFileShareConfiguration'},
}
def __init__(self, *, azure_blob_file_system_configuration=None, nfs_mount_configuration=None, cifs_mount_configuration=None, azure_file_share_configuration=None, **kwargs) -> None:
super(MountConfiguration, self).__init__(**kwargs)
self.azure_blob_file_system_configuration = azure_blob_file_system_configuration
self.nfs_mount_configuration = nfs_mount_configuration
self.cifs_mount_configuration = cifs_mount_configuration
self.azure_file_share_configuration = azure_file_share_configuration
[docs]class MultiInstanceSettings(Model):
"""Settings which specify how to run a multi-instance Task.
Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI
case, if any of the subtasks fail (for example due to exiting with a
non-zero exit code) the entire multi-instance Task fails. The
multi-instance Task is then terminated and retried, up to its retry limit.
All required parameters must be populated in order to send to Azure.
:param number_of_instances: The number of Compute Nodes required by the
Task. If omitted, the default is 1.
:type number_of_instances: int
:param coordination_command_line: Required. The command line to run on all
the Compute Nodes to enable them to coordinate when the primary runs the
main Task command. A typical coordination command line launches a
background service and verifies that the service is ready to process
inter-node messages.
:type coordination_command_line: str
:param common_resource_files: A list of files that the Batch service will
download before running the coordination command line. The difference
between common resource files and Task resource files is that common
resource files are downloaded for all subtasks including the primary,
whereas Task resource files are downloaded only for the primary. Also note
that these resource files are not downloaded to the Task working
directory, but instead are downloaded to the Task root directory (one
directory above the working directory). There is a maximum size for the
list of resource files. When the max size is exceeded, the request will
fail and the response error code will be RequestEntityTooLarge. If this
occurs, the collection of ResourceFiles must be reduced in size. This can
be achieved using .zip files, Application Packages, or Docker Containers.
:type common_resource_files: list[~azure.batch.models.ResourceFile]
"""
_validation = {
'coordination_command_line': {'required': True},
}
_attribute_map = {
'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'},
'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'},
'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'},
}
def __init__(self, *, coordination_command_line: str, number_of_instances: int=None, common_resource_files=None, **kwargs) -> None:
super(MultiInstanceSettings, self).__init__(**kwargs)
self.number_of_instances = number_of_instances
self.coordination_command_line = coordination_command_line
self.common_resource_files = common_resource_files
[docs]class NameValuePair(Model):
"""Represents a name-value pair.
:param name: The name in the name-value pair.
:type name: str
:param value: The value in the name-value pair.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, name: str=None, value: str=None, **kwargs) -> None:
super(NameValuePair, self).__init__(**kwargs)
self.name = name
self.value = value
[docs]class NetworkConfiguration(Model):
"""The network configuration for a Pool.
:param subnet_id: The ARM resource identifier of the virtual network
subnet which the Compute Nodes of the Pool will join. This is of the form
/subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}.
The virtual network must be in the same region and subscription as the
Azure Batch Account. The specified subnet should have enough free IP
addresses to accommodate the number of Compute Nodes in the Pool. If the
subnet doesn't have enough free IP addresses, the Pool will partially
allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch'
service principal must have the 'Classic Virtual Machine Contributor'
Role-Based Access Control (RBAC) role for the specified VNet. The
specified subnet must allow communication from the Azure Batch service to
be able to schedule Tasks on the Nodes. This can be verified by checking
if the specified VNet has any associated Network Security Groups (NSG). If
communication to the Nodes in the specified subnet is denied by an NSG,
then the Batch service will set the state of the Compute Nodes to
unusable. For Pools created with virtualMachineConfiguration only ARM
virtual networks ('Microsoft.Network/virtualNetworks') are supported, but
for Pools created with cloudServiceConfiguration both ARM and classic
virtual networks are supported. If the specified VNet has any associated
Network Security Groups (NSG), then a few reserved system ports must be
enabled for inbound communication. For Pools created with a virtual
machine configuration, enable ports 29876 and 29877, as well as port 22
for Linux and port 3389 for Windows. For Pools created with a cloud
service configuration, enable ports 10100, 20100, and 30100. Also enable
outbound connections to Azure Storage on port 443. For more details see:
https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
:type subnet_id: str
:param dynamic_vnet_assignment_scope: The scope of dynamic vnet
assignment. Possible values include: 'none', 'job'
:type dynamic_vnet_assignment_scope: str or
~azure.batch.models.DynamicVNetAssignmentScope
:param endpoint_configuration: The configuration for endpoints on Compute
Nodes in the Batch Pool. Pool endpoint configuration is only supported on
Pools with the virtualMachineConfiguration property.
:type endpoint_configuration:
~azure.batch.models.PoolEndpointConfiguration
:param public_ip_address_configuration: The Public IPAddress configuration
for Compute Nodes in the Batch Pool. Public IP configuration property is
only supported on Pools with the virtualMachineConfiguration property.
:type public_ip_address_configuration:
~azure.batch.models.PublicIPAddressConfiguration
"""
_attribute_map = {
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'},
'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'},
'public_ip_address_configuration': {'key': 'publicIPAddressConfiguration', 'type': 'PublicIPAddressConfiguration'},
}
def __init__(self, *, subnet_id: str=None, dynamic_vnet_assignment_scope=None, endpoint_configuration=None, public_ip_address_configuration=None, **kwargs) -> None:
super(NetworkConfiguration, self).__init__(**kwargs)
self.subnet_id = subnet_id
self.dynamic_vnet_assignment_scope = dynamic_vnet_assignment_scope
self.endpoint_configuration = endpoint_configuration
self.public_ip_address_configuration = public_ip_address_configuration
[docs]class NetworkSecurityGroupRule(Model):
"""A network security group rule to apply to an inbound endpoint.
All required parameters must be populated in order to send to Azure.
:param priority: Required. The priority for this rule. Priorities within a
Pool must be unique and are evaluated in order of priority. The lower the
number the higher the priority. For example, rules could be specified with
order numbers of 150, 250, and 350. The rule with the order number of 150
takes precedence over the rule that has an order of 250. Allowed
priorities are 150 to 4096. If any reserved or duplicate values are
provided the request fails with HTTP status code 400.
:type priority: int
:param access: Required. The action that should be taken for a specified
IP address, subnet range or tag. Possible values include: 'allow', 'deny'
:type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess
:param source_address_prefix: Required. The source address prefix or tag
to match for the rule. Valid values are a single IP address (i.e.
10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all
addresses). If any other values are provided the request fails with HTTP
status code 400.
:type source_address_prefix: str
:param source_port_ranges: The source port ranges to match for the rule.
Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22),
or a port range (i.e. 100-200). The ports must be in the range of 0 to
65535. Each entry in this collection must not overlap any other entry
(either a range or an individual port). If any other values are provided
the request fails with HTTP status code 400. The default value is '*'.
:type source_port_ranges: list[str]
"""
_validation = {
'priority': {'required': True},
'access': {'required': True},
'source_address_prefix': {'required': True},
}
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'},
'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'},
'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'},
}
def __init__(self, *, priority: int, access, source_address_prefix: str, source_port_ranges=None, **kwargs) -> None:
super(NetworkSecurityGroupRule, self).__init__(**kwargs)
self.priority = priority
self.access = access
self.source_address_prefix = source_address_prefix
self.source_port_ranges = source_port_ranges
[docs]class NFSMountConfiguration(Model):
"""Information used to connect to an NFS file system.
All required parameters must be populated in order to send to Azure.
:param source: Required. The URI of the file system to mount.
:type source: str
:param relative_mount_path: Required. The relative path on the compute
node where the file system will be mounted. All file systems are mounted
relative to the Batch mounts directory, accessible via the
AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:type relative_mount_path: str
:param mount_options: Additional command line options to pass to the mount
command. These are 'net use' options in Windows and 'mount' options in
Linux.
:type mount_options: str
"""
_validation = {
'source': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
'mount_options': {'key': 'mountOptions', 'type': 'str'},
}
def __init__(self, *, source: str, relative_mount_path: str, mount_options: str=None, **kwargs) -> None:
super(NFSMountConfiguration, self).__init__(**kwargs)
self.source = source
self.relative_mount_path = relative_mount_path
self.mount_options = mount_options
[docs]class NodeCounts(Model):
"""The number of Compute Nodes in each Compute Node state.
All required parameters must be populated in order to send to Azure.
:param creating: Required. The number of Compute Nodes in the creating
state.
:type creating: int
:param idle: Required. The number of Compute Nodes in the idle state.
:type idle: int
:param offline: Required. The number of Compute Nodes in the offline
state.
:type offline: int
:param preempted: Required. The number of Compute Nodes in the preempted
state.
:type preempted: int
:param rebooting: Required. The count of Compute Nodes in the rebooting
state.
:type rebooting: int
:param reimaging: Required. The number of Compute Nodes in the reimaging
state.
:type reimaging: int
:param running: Required. The number of Compute Nodes in the running
state.
:type running: int
:param starting: Required. The number of Compute Nodes in the starting
state.
:type starting: int
:param start_task_failed: Required. The number of Compute Nodes in the
startTaskFailed state.
:type start_task_failed: int
:param leaving_pool: Required. The number of Compute Nodes in the
leavingPool state.
:type leaving_pool: int
:param unknown: Required. The number of Compute Nodes in the unknown
state.
:type unknown: int
:param unusable: Required. The number of Compute Nodes in the unusable
state.
:type unusable: int
:param waiting_for_start_task: Required. The number of Compute Nodes in
the waitingForStartTask state.
:type waiting_for_start_task: int
:param total: Required. The total number of Compute Nodes.
:type total: int
"""
_validation = {
'creating': {'required': True},
'idle': {'required': True},
'offline': {'required': True},
'preempted': {'required': True},
'rebooting': {'required': True},
'reimaging': {'required': True},
'running': {'required': True},
'starting': {'required': True},
'start_task_failed': {'required': True},
'leaving_pool': {'required': True},
'unknown': {'required': True},
'unusable': {'required': True},
'waiting_for_start_task': {'required': True},
'total': {'required': True},
}
_attribute_map = {
'creating': {'key': 'creating', 'type': 'int'},
'idle': {'key': 'idle', 'type': 'int'},
'offline': {'key': 'offline', 'type': 'int'},
'preempted': {'key': 'preempted', 'type': 'int'},
'rebooting': {'key': 'rebooting', 'type': 'int'},
'reimaging': {'key': 'reimaging', 'type': 'int'},
'running': {'key': 'running', 'type': 'int'},
'starting': {'key': 'starting', 'type': 'int'},
'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'},
'leaving_pool': {'key': 'leavingPool', 'type': 'int'},
'unknown': {'key': 'unknown', 'type': 'int'},
'unusable': {'key': 'unusable', 'type': 'int'},
'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'},
'total': {'key': 'total', 'type': 'int'},
}
def __init__(self, *, creating: int, idle: int, offline: int, preempted: int, rebooting: int, reimaging: int, running: int, starting: int, start_task_failed: int, leaving_pool: int, unknown: int, unusable: int, waiting_for_start_task: int, total: int, **kwargs) -> None:
super(NodeCounts, self).__init__(**kwargs)
self.creating = creating
self.idle = idle
self.offline = offline
self.preempted = preempted
self.rebooting = rebooting
self.reimaging = reimaging
self.running = running
self.starting = starting
self.start_task_failed = start_task_failed
self.leaving_pool = leaving_pool
self.unknown = unknown
self.unusable = unusable
self.waiting_for_start_task = waiting_for_start_task
self.total = total
[docs]class NodeDisableSchedulingParameter(Model):
"""Options for disabling scheduling on a Compute Node.
:param node_disable_scheduling_option: What to do with currently running
Tasks when disabling Task scheduling on the Compute Node. The default
value is requeue. Possible values include: 'requeue', 'terminate',
'taskCompletion'
:type node_disable_scheduling_option: str or
~azure.batch.models.DisableComputeNodeSchedulingOption
"""
_attribute_map = {
'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'},
}
def __init__(self, *, node_disable_scheduling_option=None, **kwargs) -> None:
super(NodeDisableSchedulingParameter, self).__init__(**kwargs)
self.node_disable_scheduling_option = node_disable_scheduling_option
[docs]class NodeFile(Model):
"""Information about a file or directory on a Compute Node.
:param name: The file path.
:type name: str
:param url: The URL of the file.
:type url: str
:param is_directory: Whether the object represents a directory.
:type is_directory: bool
:param properties: The file properties.
:type properties: ~azure.batch.models.FileProperties
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'is_directory': {'key': 'isDirectory', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'FileProperties'},
}
def __init__(self, *, name: str=None, url: str=None, is_directory: bool=None, properties=None, **kwargs) -> None:
super(NodeFile, self).__init__(**kwargs)
self.name = name
self.url = url
self.is_directory = is_directory
self.properties = properties
[docs]class NodeRebootParameter(Model):
"""Options for rebooting a Compute Node.
:param node_reboot_option: When to reboot the Compute Node and what to do
with currently running Tasks. The default value is requeue. Possible
values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData'
:type node_reboot_option: str or
~azure.batch.models.ComputeNodeRebootOption
"""
_attribute_map = {
'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'},
}
def __init__(self, *, node_reboot_option=None, **kwargs) -> None:
super(NodeRebootParameter, self).__init__(**kwargs)
self.node_reboot_option = node_reboot_option
[docs]class NodeReimageParameter(Model):
"""Options for reimaging a Compute Node.
:param node_reimage_option: When to reimage the Compute Node and what to
do with currently running Tasks. The default value is requeue. Possible
values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData'
:type node_reimage_option: str or
~azure.batch.models.ComputeNodeReimageOption
"""
_attribute_map = {
'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'},
}
def __init__(self, *, node_reimage_option=None, **kwargs) -> None:
super(NodeReimageParameter, self).__init__(**kwargs)
self.node_reimage_option = node_reimage_option
[docs]class NodeRemoveParameter(Model):
"""Options for removing Compute Nodes from a Pool.
All required parameters must be populated in order to send to Azure.
:param node_list: Required. A list containing the IDs of the Compute Nodes
to be removed from the specified Pool.
:type node_list: list[str]
:param resize_timeout: The timeout for removal of Compute Nodes to the
Pool. The default value is 15 minutes. The minimum value is 5 minutes. If
you specify a value less than 5 minutes, the Batch service returns an
error; if you are calling the REST API directly, the HTTP status code is
400 (Bad Request).
:type resize_timeout: timedelta
:param node_deallocation_option: Determines what to do with a Compute Node
and its running task(s) after it has been selected for deallocation. The
default value is requeue. Possible values include: 'requeue', 'terminate',
'taskCompletion', 'retainedData'
:type node_deallocation_option: str or
~azure.batch.models.ComputeNodeDeallocationOption
"""
_validation = {
'node_list': {'required': True, 'max_items': 100},
}
_attribute_map = {
'node_list': {'key': 'nodeList', 'type': '[str]'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'},
}
def __init__(self, *, node_list, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None:
super(NodeRemoveParameter, self).__init__(**kwargs)
self.node_list = node_list
self.resize_timeout = resize_timeout
self.node_deallocation_option = node_deallocation_option
[docs]class NodeUpdateUserParameter(Model):
"""The set of changes to be made to a user Account on a Compute Node.
:param password: The password of the Account. The password is required for
Windows Compute Nodes (those created with 'cloudServiceConfiguration', or
created with 'virtualMachineConfiguration' using a Windows Image
reference). For Linux Compute Nodes, the password can optionally be
specified along with the sshPublicKey property. If omitted, any existing
password is removed.
:type password: str
:param expiry_time: The time at which the Account should expire. If
omitted, the default is 1 day from the current time. For Linux Compute
Nodes, the expiryTime has a precision up to a day.
:type expiry_time: datetime
:param ssh_public_key: The SSH public key that can be used for remote
login to the Compute Node. The public key should be compatible with
OpenSSH encoding and should be base 64 encoded. This property can be
specified only for Linux Compute Nodes. If this is specified for a Windows
Compute Node, then the Batch service rejects the request; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
If omitted, any existing SSH public key is removed.
:type ssh_public_key: str
"""
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'},
}
def __init__(self, *, password: str=None, expiry_time=None, ssh_public_key: str=None, **kwargs) -> None:
super(NodeUpdateUserParameter, self).__init__(**kwargs)
self.password = password
self.expiry_time = expiry_time
self.ssh_public_key = ssh_public_key
[docs]class OutputFile(Model):
"""A specification for uploading files from an Azure Batch Compute Node to
another location after the Batch service has finished executing the Task
process.
All required parameters must be populated in order to send to Azure.
:param file_pattern: Required. A pattern indicating which file(s) to
upload. Both relative and absolute paths are supported. Relative paths are
relative to the Task working directory. The following wildcards are
supported: * matches 0 or more characters (for example pattern abc* would
match abc or abcdef), ** matches any directory, ? matches any single
character, [abc] matches one character in the brackets, and [a-c] matches
one character in the range. Brackets can include a negation to match any
character not specified (for example [!abc] matches any character but a,
b, or c). If a file name starts with "." it is ignored by default but may
be matched by specifying it explicitly (for example *.gif will not match
.a.gif, but .*.gif will). A simple example: **\\*.txt matches any file
that does not start in '.' and ends with .txt in the Task working
directory or any subdirectory. If the filename contains a wildcard
character it can be escaped using brackets (for example abc[*] would match
a file named abc*). Note that both \\ and / are treated as directory
separators on Windows, but only / is on Linux. Environment variables
(%var% on Windows or $var on Linux) are expanded prior to the pattern
being applied.
:type file_pattern: str
:param destination: Required. The destination for the output file(s).
:type destination: ~azure.batch.models.OutputFileDestination
:param upload_options: Required. Additional options for the upload
operation, including under what conditions to perform the upload.
:type upload_options: ~azure.batch.models.OutputFileUploadOptions
"""
_validation = {
'file_pattern': {'required': True},
'destination': {'required': True},
'upload_options': {'required': True},
}
_attribute_map = {
'file_pattern': {'key': 'filePattern', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'OutputFileDestination'},
'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'},
}
def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None:
super(OutputFile, self).__init__(**kwargs)
self.file_pattern = file_pattern
self.destination = destination
self.upload_options = upload_options
[docs]class OutputFileBlobContainerDestination(Model):
"""Specifies a file upload destination within an Azure blob storage container.
All required parameters must be populated in order to send to Azure.
:param path: The destination blob or virtual directory within the Azure
Storage container. If filePattern refers to a specific file (i.e. contains
no wildcards), then path is the name of the blob to which to upload that
file. If filePattern contains one or more wildcards (and therefore may
match multiple files), then path is the name of the blob virtual directory
(which is prepended to each blob name) to which to upload the file(s). If
omitted, file(s) are uploaded to the root of the container with a blob
name matching their file name.
:type path: str
:param container_url: Required. The URL of the container within Azure Blob
Storage to which to upload the file(s). The URL must include a Shared
Access Signature (SAS) granting write permissions to the container.
:type container_url: str
"""
_validation = {
'container_url': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'container_url': {'key': 'containerUrl', 'type': 'str'},
}
def __init__(self, *, container_url: str, path: str=None, **kwargs) -> None:
super(OutputFileBlobContainerDestination, self).__init__(**kwargs)
self.path = path
self.container_url = container_url
[docs]class OutputFileDestination(Model):
"""The destination to which a file should be uploaded.
:param container: A location in Azure blob storage to which files are
uploaded.
:type container: ~azure.batch.models.OutputFileBlobContainerDestination
"""
_attribute_map = {
'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'},
}
def __init__(self, *, container=None, **kwargs) -> None:
super(OutputFileDestination, self).__init__(**kwargs)
self.container = container
[docs]class OutputFileUploadOptions(Model):
"""Details about an output file upload operation, including under what
conditions to perform the upload.
All required parameters must be populated in order to send to Azure.
:param upload_condition: Required. The conditions under which the Task
output file or set of files should be uploaded. The default is
taskcompletion. Possible values include: 'taskSuccess', 'taskFailure',
'taskCompletion'
:type upload_condition: str or
~azure.batch.models.OutputFileUploadCondition
"""
_validation = {
'upload_condition': {'required': True},
}
_attribute_map = {
'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'},
}
def __init__(self, *, upload_condition, **kwargs) -> None:
super(OutputFileUploadOptions, self).__init__(**kwargs)
self.upload_condition = upload_condition
[docs]class PoolAddOptions(Model):
"""Additional parameters for add operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolAddOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolAddParameter(Model):
"""A Pool in the Azure Batch service to add.
All required parameters must be populated in order to send to Azure.
:param id: Required. A string that uniquely identifies the Pool within the
Account. The ID can contain any combination of alphanumeric characters
including hyphens and underscores, and cannot contain more than 64
characters. The ID is case-preserving and case-insensitive (that is, you
may not have two Pool IDs within an Account that differ only by case).
:type id: str
:param display_name: The display name for the Pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: Required. The size of virtual machines in the Pool. All
virtual machines in a Pool are the same size. For information about
available sizes of virtual machines for Cloud Services Pools (pools
created with cloudServiceConfiguration), see Sizes for Cloud Services
(https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for Pools using Images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the Pool. This property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch Account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration:
~azure.batch.models.CloudServiceConfiguration
:param virtual_machine_configuration: The virtual machine configuration
for the Pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration:
~azure.batch.models.VirtualMachineConfiguration
:param resize_timeout: The timeout for allocation of Compute Nodes to the
Pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service returns an error; if you are calling the REST API
directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated Compute
Nodes in the Pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
Compute Nodes in the Pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the Pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the Pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of Compute
Nodes in the Pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the Pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information. For more information about specifying this formula, see
'Automatically scale Compute Nodes in an Azure Batch Pool'
(https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the Pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the Pool permits direct
communication between Compute Nodes. Enabling inter-node communication
limits the maximum size of the Pool due to deployment restrictions on the
Compute Nodes of the Pool. This may result in the Pool not reaching its
desired size. The default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the Pool.
:type network_configuration: ~azure.batch.models.NetworkConfiguration
:param start_task: A Task specified to run on each Compute Node as it
joins the Pool. The Task runs when the Compute Node is added to the Pool
or when the Compute Node is restarted.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: The list of Certificates to be installed on
each Compute Node in the Pool. For Windows Nodes, the Batch service
installs the Certificates to the specified Certificate store and location.
For Linux Compute Nodes, the Certificates are stored in a directory inside
the Task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this
location. For Certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and Certificates are placed in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: The list of Packages to be
installed on each Compute Node in the Pool. Changes to Package references
affect all new Nodes joining the Pool, but do not affect Compute Nodes
that are already in the Pool until they are rebooted or reimaged. There is
a maximum of 10 Package references on any given Pool.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param application_licenses: The list of application licenses the Batch
service will make available on each Compute Node in the Pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
Pool creation will fail.
:type application_licenses: list[str]
:param max_tasks_per_node: The maximum number of Tasks that can run
concurrently on a single Compute Node in the Pool. The default value is 1.
The maximum value is the smaller of 4 times the number of cores of the
vmSize of the Pool or 256.
:type max_tasks_per_node: int
:param task_scheduling_policy: How Tasks are distributed across Compute
Nodes in a Pool. If not specified, the default is spread.
:type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy
:param user_accounts: The list of user Accounts to be created on each
Compute Node in the Pool.
:type user_accounts: list[~azure.batch.models.UserAccount]
:param metadata: A list of name-value pairs associated with the Pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param mount_configuration: Mount storage using specified file system for
the entire lifetime of the pool. Mount the storage using Azure fileshare,
NFS, CIFS or Blobfuse based file system.
:type mount_configuration: list[~azure.batch.models.MountConfiguration]
"""
_validation = {
'id': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'},
}
def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, mount_configuration=None, **kwargs) -> None:
super(PoolAddParameter, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.vm_size = vm_size
self.cloud_service_configuration = cloud_service_configuration
self.virtual_machine_configuration = virtual_machine_configuration
self.resize_timeout = resize_timeout
self.target_dedicated_nodes = target_dedicated_nodes
self.target_low_priority_nodes = target_low_priority_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
self.enable_inter_node_communication = enable_inter_node_communication
self.network_configuration = network_configuration
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.application_licenses = application_licenses
self.max_tasks_per_node = max_tasks_per_node
self.task_scheduling_policy = task_scheduling_policy
self.user_accounts = user_accounts
self.metadata = metadata
self.mount_configuration = mount_configuration
[docs]class PoolDeleteOptions(Model):
"""Additional parameters for delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolDeleteOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolDisableAutoScaleOptions(Model):
"""Additional parameters for disable_auto_scale operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolDisableAutoScaleOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolEnableAutoScaleOptions(Model):
"""Additional parameters for enable_auto_scale operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolEnableAutoScaleOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolEnableAutoScaleParameter(Model):
"""Options for enabling automatic scaling on a Pool.
:param auto_scale_formula: The formula for the desired number of Compute
Nodes in the Pool. The formula is checked for validity before it is
applied to the Pool. If the formula is not valid, the Batch service
rejects the request with detailed error information. For more information
about specifying this formula, see Automatically scale Compute Nodes in an
Azure Batch Pool
(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the Pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service rejects the request with an
invalid property value error; if you are calling the REST API directly,
the HTTP status code is 400 (Bad Request). If you specify a new interval,
then the existing autoscale evaluation schedule will be stopped and a new
autoscale evaluation schedule will be started, with its starting time
being the time when this request was issued.
:type auto_scale_evaluation_interval: timedelta
"""
_attribute_map = {
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
}
def __init__(self, *, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, **kwargs) -> None:
super(PoolEnableAutoScaleParameter, self).__init__(**kwargs)
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
[docs]class PoolEndpointConfiguration(Model):
"""The endpoint configuration for a Pool.
All required parameters must be populated in order to send to Azure.
:param inbound_nat_pools: Required. A list of inbound NAT Pools that can
be used to address specific ports on an individual Compute Node
externally. The maximum number of inbound NAT Pools per Batch Pool is 5.
If the maximum number of inbound NAT Pools is exceeded the request fails
with HTTP status code 400.
:type inbound_nat_pools: list[~azure.batch.models.InboundNATPool]
"""
_validation = {
'inbound_nat_pools': {'required': True},
}
_attribute_map = {
'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'},
}
def __init__(self, *, inbound_nat_pools, **kwargs) -> None:
super(PoolEndpointConfiguration, self).__init__(**kwargs)
self.inbound_nat_pools = inbound_nat_pools
[docs]class PoolEvaluateAutoScaleOptions(Model):
"""Additional parameters for evaluate_auto_scale operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolEvaluateAutoScaleParameter(Model):
"""Options for evaluating an automatic scaling formula on a Pool.
All required parameters must be populated in order to send to Azure.
:param auto_scale_formula: Required. The formula for the desired number of
Compute Nodes in the Pool. The formula is validated and its results
calculated, but it is not applied to the Pool. To apply the formula to the
Pool, 'Enable automatic scaling on a Pool'. For more information about
specifying this formula, see Automatically scale Compute Nodes in an Azure
Batch Pool
(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
:type auto_scale_formula: str
"""
_validation = {
'auto_scale_formula': {'required': True},
}
_attribute_map = {
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
}
def __init__(self, *, auto_scale_formula: str, **kwargs) -> None:
super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs)
self.auto_scale_formula = auto_scale_formula
[docs]class PoolExistsOptions(Model):
"""Additional parameters for exists operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolExistsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolGetAllLifetimeStatisticsOptions(Model):
"""Additional parameters for get_all_lifetime_statistics operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolGetOptions, self).__init__(**kwargs)
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolListOptions(Model):
"""Additional parameters for list operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Pools can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolListOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.expand = expand
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolListUsageMetricsOptions(Model):
"""Additional parameters for list_usage_metrics operation.
:param start_time: The earliest time from which to include metrics. This
must be at least two and a half hours before the current time. If not
specified this defaults to the start time of the last aggregation interval
currently available.
:type start_time: datetime
:param end_time: The latest time from which to include metrics. This must
be at least two hours before the current time. If not specified this
defaults to the end time of the last aggregation interval currently
available.
:type end_time: datetime
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics.
:type filter: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 results will be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'start_time': {'key': '', 'type': 'iso-8601'},
'end_time': {'key': '', 'type': 'iso-8601'},
'filter': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, start_time=None, end_time=None, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolListUsageMetricsOptions, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.filter = filter
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolNodeCounts(Model):
"""The number of Compute Nodes in each state for a Pool.
All required parameters must be populated in order to send to Azure.
:param pool_id: Required. The ID of the Pool.
:type pool_id: str
:param dedicated: The number of dedicated Compute Nodes in each state.
:type dedicated: ~azure.batch.models.NodeCounts
:param low_priority: The number of low priority Compute Nodes in each
state.
:type low_priority: ~azure.batch.models.NodeCounts
"""
_validation = {
'pool_id': {'required': True},
}
_attribute_map = {
'pool_id': {'key': 'poolId', 'type': 'str'},
'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'},
'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'},
}
def __init__(self, *, pool_id: str, dedicated=None, low_priority=None, **kwargs) -> None:
super(PoolNodeCounts, self).__init__(**kwargs)
self.pool_id = pool_id
self.dedicated = dedicated
self.low_priority = low_priority
[docs]class PoolPatchOptions(Model):
"""Additional parameters for patch operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolPatchOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolPatchParameter(Model):
"""The set of changes to be made to a Pool.
:param start_task: A Task to run on each Compute Node as it joins the
Pool. The Task runs when the Compute Node is added to the Pool or when the
Compute Node is restarted. If this element is present, it overwrites any
existing StartTask. If omitted, any existing StartTask is left unchanged.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: A list of Certificates to be installed on
each Compute Node in the Pool. If this element is present, it replaces any
existing Certificate references configured on the Pool. If omitted, any
existing Certificate references are left unchanged. For Windows Nodes, the
Batch service installs the Certificates to the specified Certificate store
and location. For Linux Compute Nodes, the Certificates are stored in a
directory inside the Task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this
location. For Certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and Certificates are placed in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: A list of Packages to be installed
on each Compute Node in the Pool. Changes to Package references affect all
new Nodes joining the Pool, but do not affect Compute Nodes that are
already in the Pool until they are rebooted or reimaged. If this element
is present, it replaces any existing Package references. If you specify an
empty collection, then all Package references are removed from the Pool.
If omitted, any existing Package references are left unchanged.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param metadata: A list of name-value pairs associated with the Pool as
metadata. If this element is present, it replaces any existing metadata
configured on the Pool. If you specify an empty collection, any metadata
is removed from the Pool. If omitted, any existing metadata is left
unchanged.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_attribute_map = {
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, start_task=None, certificate_references=None, application_package_references=None, metadata=None, **kwargs) -> None:
super(PoolPatchParameter, self).__init__(**kwargs)
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.metadata = metadata
[docs]class PoolRemoveNodesOptions(Model):
"""Additional parameters for remove_nodes operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolRemoveNodesOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolResizeOptions(Model):
"""Additional parameters for resize operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolResizeOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolResizeParameter(Model):
"""Options for changing the size of a Pool.
:param target_dedicated_nodes: The desired number of dedicated Compute
Nodes in the Pool.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
Compute Nodes in the Pool.
:type target_low_priority_nodes: int
:param resize_timeout: The timeout for allocation of Nodes to the Pool or
removal of Compute Nodes from the Pool. The default value is 15 minutes.
The minimum value is 5 minutes. If you specify a value less than 5
minutes, the Batch service returns an error; if you are calling the REST
API directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param node_deallocation_option: Determines what to do with a Compute Node
and its running task(s) if the Pool size is decreasing. The default value
is requeue. Possible values include: 'requeue', 'terminate',
'taskCompletion', 'retainedData'
:type node_deallocation_option: str or
~azure.batch.models.ComputeNodeDeallocationOption
"""
_attribute_map = {
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'},
}
def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None:
super(PoolResizeParameter, self).__init__(**kwargs)
self.target_dedicated_nodes = target_dedicated_nodes
self.target_low_priority_nodes = target_low_priority_nodes
self.resize_timeout = resize_timeout
self.node_deallocation_option = node_deallocation_option
[docs]class PoolSpecification(Model):
"""Specification for creating a new Pool.
All required parameters must be populated in order to send to Azure.
:param display_name: The display name for the Pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: Required. The size of the virtual machines in the Pool.
All virtual machines in a Pool are the same size. For information about
available sizes of virtual machines in Pools, see Choose a VM size for
Compute Nodes in an Azure Batch Pool
(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the Pool. This property must be specified if the Pool needs to be created
with Azure PaaS VMs. This property and virtualMachineConfiguration are
mutually exclusive and one of the properties must be specified. If neither
is specified then the Batch service returns an error; if you are calling
the REST API directly, the HTTP status code is 400 (Bad Request). This
property cannot be specified if the Batch Account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration:
~azure.batch.models.CloudServiceConfiguration
:param virtual_machine_configuration: The virtual machine configuration
for the Pool. This property must be specified if the Pool needs to be
created with Azure IaaS VMs. This property and cloudServiceConfiguration
are mutually exclusive and one of the properties must be specified. If
neither is specified then the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type virtual_machine_configuration:
~azure.batch.models.VirtualMachineConfiguration
:param max_tasks_per_node: The maximum number of Tasks that can run
concurrently on a single Compute Node in the Pool. The default value is 1.
The maximum value is the smaller of 4 times the number of cores of the
vmSize of the Pool or 256.
:type max_tasks_per_node: int
:param task_scheduling_policy: How Tasks are distributed across Compute
Nodes in a Pool. If not specified, the default is spread.
:type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy
:param resize_timeout: The timeout for allocation of Compute Nodes to the
Pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service rejects the request with an error; if you are calling
the REST API directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated Compute
Nodes in the Pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
Compute Nodes in the Pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the Pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
element is required. The Pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: The formula for the desired number of Compute
Nodes in the Pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the Pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information.
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the Pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service rejects the request with an
invalid property value error; if you are calling the REST API directly,
the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the Pool permits direct
communication between Compute Nodes. Enabling inter-node communication
limits the maximum size of the Pool due to deployment restrictions on the
Compute Nodes of the Pool. This may result in the Pool not reaching its
desired size. The default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the Pool.
:type network_configuration: ~azure.batch.models.NetworkConfiguration
:param start_task: A Task to run on each Compute Node as it joins the
Pool. The Task runs when the Compute Node is added to the Pool or when the
Compute Node is restarted.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: A list of Certificates to be installed on
each Compute Node in the Pool. For Windows Nodes, the Batch service
installs the Certificates to the specified Certificate store and location.
For Linux Compute Nodes, the Certificates are stored in a directory inside
the Task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this
location. For Certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and Certificates are placed in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: The list of Packages to be
installed on each Compute Node in the Pool. Changes to Package references
affect all new Nodes joining the Pool, but do not affect Compute Nodes
that are already in the Pool until they are rebooted or reimaged. There is
a maximum of 10 Package references on any given Pool.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param application_licenses: The list of application licenses the Batch
service will make available on each Compute Node in the Pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
Pool creation will fail. The permitted licenses available on the Pool are
'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each
application license added to the Pool.
:type application_licenses: list[str]
:param user_accounts: The list of user Accounts to be created on each
Compute Node in the Pool.
:type user_accounts: list[~azure.batch.models.UserAccount]
:param metadata: A list of name-value pairs associated with the Pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param mount_configuration: A list of file systems to mount on each node
in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.
:type mount_configuration: list[~azure.batch.models.MountConfiguration]
"""
_validation = {
'vm_size': {'required': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'},
}
def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, mount_configuration=None, **kwargs) -> None:
super(PoolSpecification, self).__init__(**kwargs)
self.display_name = display_name
self.vm_size = vm_size
self.cloud_service_configuration = cloud_service_configuration
self.virtual_machine_configuration = virtual_machine_configuration
self.max_tasks_per_node = max_tasks_per_node
self.task_scheduling_policy = task_scheduling_policy
self.resize_timeout = resize_timeout
self.target_dedicated_nodes = target_dedicated_nodes
self.target_low_priority_nodes = target_low_priority_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
self.enable_inter_node_communication = enable_inter_node_communication
self.network_configuration = network_configuration
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.application_licenses = application_licenses
self.user_accounts = user_accounts
self.metadata = metadata
self.mount_configuration = mount_configuration
[docs]class PoolStatistics(Model):
"""Contains utilization and resource usage statistics for the lifetime of a
Pool.
All required parameters must be populated in order to send to Azure.
:param url: Required. The URL for the statistics.
:type url: str
:param start_time: Required. The start time of the time range covered by
the statistics.
:type start_time: datetime
:param last_update_time: Required. The time at which the statistics were
last updated. All statistics are limited to the range between startTime
and lastUpdateTime.
:type last_update_time: datetime
:param usage_stats: Statistics related to Pool usage, such as the amount
of core-time used.
:type usage_stats: ~azure.batch.models.UsageStatistics
:param resource_stats: Statistics related to resource consumption by
Compute Nodes in the Pool.
:type resource_stats: ~azure.batch.models.ResourceStatistics
"""
_validation = {
'url': {'required': True},
'start_time': {'required': True},
'last_update_time': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'},
'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'},
}
def __init__(self, *, url: str, start_time, last_update_time, usage_stats=None, resource_stats=None, **kwargs) -> None:
super(PoolStatistics, self).__init__(**kwargs)
self.url = url
self.start_time = start_time
self.last_update_time = last_update_time
self.usage_stats = usage_stats
self.resource_stats = resource_stats
[docs]class PoolStopResizeOptions(Model):
"""Additional parameters for stop_resize operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(PoolStopResizeOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class PoolUpdatePropertiesOptions(Model):
"""Additional parameters for update_properties operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolUpdatePropertiesOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class PoolUpdatePropertiesParameter(Model):
"""The set of changes to be made to a Pool.
All required parameters must be populated in order to send to Azure.
:param start_task: A Task to run on each Compute Node as it joins the
Pool. The Task runs when the Compute Node is added to the Pool or when the
Compute Node is restarted. If this element is present, it overwrites any
existing StartTask. If omitted, any existing StartTask is removed from the
Pool.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: Required. A list of Certificates to be
installed on each Compute Node in the Pool. This list replaces any
existing Certificate references configured on the Pool. If you specify an
empty collection, any existing Certificate references are removed from the
Pool. For Windows Nodes, the Batch service installs the Certificates to
the specified Certificate store and location. For Linux Compute Nodes, the
Certificates are stored in a directory inside the Task working directory
and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the
Task to query for this location. For Certificates with visibility of
'remoteUser', a 'certs' directory is created in the user's home directory
(e.g., /home/{user-name}/certs) and Certificates are placed in that
directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: Required. The list of Application
Packages to be installed on each Compute Node in the Pool. The list
replaces any existing Application Package references on the Pool. Changes
to Application Package references affect all new Compute Nodes joining the
Pool, but do not affect Compute Nodes that are already in the Pool until
they are rebooted or reimaged. There is a maximum of 10 Application
Package references on any given Pool. If omitted, or if you specify an
empty collection, any existing Application Packages references are removed
from the Pool. A maximum of 10 references may be specified on a given
Pool.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param metadata: Required. A list of name-value pairs associated with the
Pool as metadata. This list replaces any existing metadata configured on
the Pool. If omitted, or if you specify an empty collection, any existing
metadata is removed from the Pool.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_validation = {
'certificate_references': {'required': True},
'application_package_references': {'required': True},
'metadata': {'required': True},
}
_attribute_map = {
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, *, certificate_references, application_package_references, metadata, start_task=None, **kwargs) -> None:
super(PoolUpdatePropertiesParameter, self).__init__(**kwargs)
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.metadata = metadata
[docs]class PoolUsageMetrics(Model):
"""Usage metrics for a Pool across an aggregation interval.
All required parameters must be populated in order to send to Azure.
:param pool_id: Required. The ID of the Pool whose metrics are aggregated
in this entry.
:type pool_id: str
:param start_time: Required. The start time of the aggregation interval
covered by this entry.
:type start_time: datetime
:param end_time: Required. The end time of the aggregation interval
covered by this entry.
:type end_time: datetime
:param vm_size: Required. The size of virtual machines in the Pool. All
VMs in a Pool are the same size. For information about available sizes of
virtual machines in Pools, see Choose a VM size for Compute Nodes in an
Azure Batch Pool
(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
:type vm_size: str
:param total_core_hours: Required. The total core hours used in the Pool
during this aggregation interval.
:type total_core_hours: float
"""
_validation = {
'pool_id': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
'vm_size': {'required': True},
'total_core_hours': {'required': True},
}
_attribute_map = {
'pool_id': {'key': 'poolId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'},
}
def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, **kwargs) -> None:
super(PoolUsageMetrics, self).__init__(**kwargs)
self.pool_id = pool_id
self.start_time = start_time
self.end_time = end_time
self.vm_size = vm_size
self.total_core_hours = total_core_hours
[docs]class PublicIPAddressConfiguration(Model):
"""The public IP Address configuration of the networking configuration of a
Pool.
:param provision: The provisioning type for Public IP Addresses for the
Pool. The default value is BatchManaged. Possible values include:
'batchManaged', 'userManaged', 'noPublicIPAddresses'
:type provision: str or ~azure.batch.models.IPAddressProvisioningType
:param ip_address_ids: The list of public IPs which the Batch service will
use when provisioning Compute Nodes. The number of IPs specified here
limits the maximum size of the Pool - 50 dedicated nodes or 20
low-priority nodes can be allocated for each public IP. For example, a
pool needing 150 dedicated VMs would need at least 3 public IPs specified.
Each element of this collection is of the form:
/subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.
:type ip_address_ids: list[str]
"""
_attribute_map = {
'provision': {'key': 'provision', 'type': 'IPAddressProvisioningType'},
'ip_address_ids': {'key': 'ipAddressIds', 'type': '[str]'},
}
def __init__(self, *, provision=None, ip_address_ids=None, **kwargs) -> None:
super(PublicIPAddressConfiguration, self).__init__(**kwargs)
self.provision = provision
self.ip_address_ids = ip_address_ids
[docs]class RecentJob(Model):
"""Information about the most recent Job to run under the Job Schedule.
:param id: The ID of the Job.
:type id: str
:param url: The URL of the Job.
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(self, *, id: str=None, url: str=None, **kwargs) -> None:
super(RecentJob, self).__init__(**kwargs)
self.id = id
self.url = url
[docs]class ResizeError(Model):
"""An error that occurred when resizing a Pool.
:param code: An identifier for the Pool resize error. Codes are invariant
and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the Pool resize error, intended to be
suitable for display in a user interface.
:type message: str
:param values: A list of additional error details related to the Pool
resize error.
:type values: list[~azure.batch.models.NameValuePair]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'values': {'key': 'values', 'type': '[NameValuePair]'},
}
def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None:
super(ResizeError, self).__init__(**kwargs)
self.code = code
self.message = message
self.values = values
[docs]class ResourceFile(Model):
"""A single file or multiple files to be downloaded to a Compute Node.
:param auto_storage_container_name: The storage container name in the auto
storage Account. The autoStorageContainerName, storageContainerUrl and
httpUrl properties are mutually exclusive and one of them must be
specified.
:type auto_storage_container_name: str
:param storage_container_url: The URL of the blob container within Azure
Blob Storage. The autoStorageContainerName, storageContainerUrl and
httpUrl properties are mutually exclusive and one of them must be
specified. This URL must be readable and listable using anonymous access;
that is, the Batch service does not present any credentials when
downloading blobs from the container. There are two ways to get such a URL
for a container in Azure storage: include a Shared Access Signature (SAS)
granting read and list permissions on the container, or set the ACL for
the container to allow public access.
:type storage_container_url: str
:param http_url: The URL of the file to download. The
autoStorageContainerName, storageContainerUrl and httpUrl properties are
mutually exclusive and one of them must be specified. If the URL points to
Azure Blob Storage, it must be readable using anonymous access; that is,
the Batch service does not present any credentials when downloading the
blob. There are two ways to get such a URL for a blob in Azure storage:
include a Shared Access Signature (SAS) granting read permissions on the
blob, or set the ACL for the blob or its container to allow public access.
:type http_url: str
:param blob_prefix: The blob prefix to use when downloading blobs from an
Azure Storage container. Only the blobs whose names begin with the
specified prefix will be downloaded. The property is valid only when
autoStorageContainerName or storageContainerUrl is used. This prefix can
be a partial filename or a subdirectory. If a prefix is not specified, all
the files in the container will be downloaded.
:type blob_prefix: str
:param file_path: The location on the Compute Node to which to download
the file(s), relative to the Task's working directory. If the httpUrl
property is specified, the filePath is required and describes the path
which the file will be downloaded to, including the filename. Otherwise,
if the autoStorageContainerName or storageContainerUrl property is
specified, filePath is optional and is the directory to download the files
to. In the case where filePath is used as a directory, any directory
structure already associated with the input data will be retained in full
and appended to the specified filePath directory. The specified relative
path cannot break out of the Task's working directory (for example by
using '..').
:type file_path: str
:param file_mode: The file permission mode attribute in octal format. This
property applies only to files being downloaded to Linux Compute Nodes. It
will be ignored if it is specified for a resourceFile which will be
downloaded to a Windows Compute Node. If this property is not specified
for a Linux Compute Node, then a default value of 0770 is applied to the
file.
:type file_mode: str
"""
_attribute_map = {
'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'},
'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'},
'http_url': {'key': 'httpUrl', 'type': 'str'},
'blob_prefix': {'key': 'blobPrefix', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
'file_mode': {'key': 'fileMode', 'type': 'str'},
}
def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, **kwargs) -> None:
super(ResourceFile, self).__init__(**kwargs)
self.auto_storage_container_name = auto_storage_container_name
self.storage_container_url = storage_container_url
self.http_url = http_url
self.blob_prefix = blob_prefix
self.file_path = file_path
self.file_mode = file_mode
[docs]class ResourceStatistics(Model):
"""Statistics related to resource consumption by Compute Nodes in a Pool.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. The start time of the time range covered by
the statistics.
:type start_time: datetime
:param last_update_time: Required. The time at which the statistics were
last updated. All statistics are limited to the range between startTime
and lastUpdateTime.
:type last_update_time: datetime
:param avg_cpu_percentage: Required. The average CPU usage across all
Compute Nodes in the Pool (percentage per node).
:type avg_cpu_percentage: float
:param avg_memory_gi_b: Required. The average memory usage in GiB across
all Compute Nodes in the Pool.
:type avg_memory_gi_b: float
:param peak_memory_gi_b: Required. The peak memory usage in GiB across all
Compute Nodes in the Pool.
:type peak_memory_gi_b: float
:param avg_disk_gi_b: Required. The average used disk space in GiB across
all Compute Nodes in the Pool.
:type avg_disk_gi_b: float
:param peak_disk_gi_b: Required. The peak used disk space in GiB across
all Compute Nodes in the Pool.
:type peak_disk_gi_b: float
:param disk_read_iops: Required. The total number of disk read operations
across all Compute Nodes in the Pool.
:type disk_read_iops: long
:param disk_write_iops: Required. The total number of disk write
operations across all Compute Nodes in the Pool.
:type disk_write_iops: long
:param disk_read_gi_b: Required. The total amount of data in GiB of disk
reads across all Compute Nodes in the Pool.
:type disk_read_gi_b: float
:param disk_write_gi_b: Required. The total amount of data in GiB of disk
writes across all Compute Nodes in the Pool.
:type disk_write_gi_b: float
:param network_read_gi_b: Required. The total amount of data in GiB of
network reads across all Compute Nodes in the Pool.
:type network_read_gi_b: float
:param network_write_gi_b: Required. The total amount of data in GiB of
network writes across all Compute Nodes in the Pool.
:type network_write_gi_b: float
"""
_validation = {
'start_time': {'required': True},
'last_update_time': {'required': True},
'avg_cpu_percentage': {'required': True},
'avg_memory_gi_b': {'required': True},
'peak_memory_gi_b': {'required': True},
'avg_disk_gi_b': {'required': True},
'peak_disk_gi_b': {'required': True},
'disk_read_iops': {'required': True},
'disk_write_iops': {'required': True},
'disk_read_gi_b': {'required': True},
'disk_write_gi_b': {'required': True},
'network_read_gi_b': {'required': True},
'network_write_gi_b': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'},
'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'},
'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'},
'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'},
'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'},
'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'},
'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'},
'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'},
'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'},
'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'},
'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'},
}
def __init__(self, *, start_time, last_update_time, avg_cpu_percentage: float, avg_memory_gi_b: float, peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, disk_read_iops: int, disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, **kwargs) -> None:
super(ResourceStatistics, self).__init__(**kwargs)
self.start_time = start_time
self.last_update_time = last_update_time
self.avg_cpu_percentage = avg_cpu_percentage
self.avg_memory_gi_b = avg_memory_gi_b
self.peak_memory_gi_b = peak_memory_gi_b
self.avg_disk_gi_b = avg_disk_gi_b
self.peak_disk_gi_b = peak_disk_gi_b
self.disk_read_iops = disk_read_iops
self.disk_write_iops = disk_write_iops
self.disk_read_gi_b = disk_read_gi_b
self.disk_write_gi_b = disk_write_gi_b
self.network_read_gi_b = network_read_gi_b
self.network_write_gi_b = network_write_gi_b
[docs]class Schedule(Model):
"""The schedule according to which Jobs will be created.
:param do_not_run_until: The earliest time at which any Job may be created
under this Job Schedule. If you do not specify a doNotRunUntil time, the
schedule becomes ready to create Jobs immediately.
:type do_not_run_until: datetime
:param do_not_run_after: A time after which no Job will be created under
this Job Schedule. The schedule will move to the completed state as soon
as this deadline is past and there is no active Job under this Job
Schedule. If you do not specify a doNotRunAfter time, and you are creating
a recurring Job Schedule, the Job Schedule will remain active until you
explicitly terminate it.
:type do_not_run_after: datetime
:param start_window: The time interval, starting from the time at which
the schedule indicates a Job should be created, within which a Job must be
created. If a Job is not created within the startWindow interval, then the
'opportunity' is lost; no Job will be created until the next recurrence of
the schedule. If the schedule is recurring, and the startWindow is longer
than the recurrence interval, then this is equivalent to an infinite
startWindow, because the Job that is 'due' in one recurrenceInterval is
not carried forward into the next recurrence interval. The default is
infinite. The minimum value is 1 minute. If you specify a lower value, the
Batch service rejects the schedule with an error; if you are calling the
REST API directly, the HTTP status code is 400 (Bad Request).
:type start_window: timedelta
:param recurrence_interval: The time interval between the start times of
two successive Jobs under the Job Schedule. A Job Schedule can have at
most one active Job under it at any given time. Because a Job Schedule can
have at most one active Job under it at any given time, if it is time to
create a new Job under a Job Schedule, but the previous Job is still
running, the Batch service will not create the new Job until the previous
Job finishes. If the previous Job does not finish within the startWindow
period of the new recurrenceInterval, then no new Job will be scheduled
for that interval. For recurring Jobs, you should normally specify a
jobManagerTask in the jobSpecification. If you do not use jobManagerTask,
you will need an external process to monitor when Jobs are created, add
Tasks to the Jobs and terminate the Jobs ready for the next recurrence.
The default is that the schedule does not recur: one Job is created,
within the startWindow after the doNotRunUntil time, and the schedule is
complete as soon as that Job finishes. The minimum value is 1 minute. If
you specify a lower value, the Batch service rejects the schedule with an
error; if you are calling the REST API directly, the HTTP status code is
400 (Bad Request).
:type recurrence_interval: timedelta
"""
_attribute_map = {
'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'},
'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'},
'start_window': {'key': 'startWindow', 'type': 'duration'},
'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'},
}
def __init__(self, *, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None, **kwargs) -> None:
super(Schedule, self).__init__(**kwargs)
self.do_not_run_until = do_not_run_until
self.do_not_run_after = do_not_run_after
self.start_window = start_window
self.recurrence_interval = recurrence_interval
[docs]class StartTask(Model):
"""A Task which is run when a Node joins a Pool in the Azure Batch service, or
when the Compute Node is rebooted or reimaged.
Batch will retry Tasks when a recovery operation is triggered on a Node.
Examples of recovery operations include (but are not limited to) when an
unhealthy Node is rebooted or a Compute Node disappeared due to host
failure. Retries due to recovery operations are independent of and are not
counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0,
an internal retry due to a recovery operation may occur. Because of this,
all Tasks should be idempotent. This means Tasks need to tolerate being
interrupted and restarted without causing any corruption or duplicate data.
The best practice for long running Tasks is to use some form of
checkpointing. In some cases the StartTask may be re-run even though the
Compute Node was not rebooted. Special care should be taken to avoid
StartTasks which create breakaway process or install/launch services from
the StartTask working directory, as this will block Batch from being able
to re-run the StartTask.
All required parameters must be populated in order to send to Azure.
:param command_line: Required. The command line of the StartTask. The
command line does not run under a shell, and therefore cannot take
advantage of shell features such as environment variable expansion. If you
want to take advantage of such features, you should invoke the shell in
the command line, for example using "cmd /c MyCommand" in Windows or
"/bin/sh -c MyCommand" in Linux. If the command line refers to file paths,
it should use a relative path (relative to the Task working directory), or
use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
StartTask runs. When this is specified, all directories recursively below
the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the
node) are mapped into the container, all Task environment variables are
mapped into the container, and the Task command line is executed in the
container. Files produced in the container outside of
AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning
that Batch file APIs will not be able to access those files.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line. There is a
maximum size for the list of resource files. When the max size is
exceeded, the request will fail and the response error code will be
RequestEntityTooLarge. If this occurs, the collection of ResourceFiles
must be reduced in size. This can be achieved using .zip files,
Application Packages, or Docker Containers. Files listed under this
element are located in the Task's working directory.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the StartTask.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param user_identity: The user identity under which the StartTask runs. If
omitted, the Task runs as a non-administrative user unique to the Task.
:type user_identity: ~azure.batch.models.UserIdentity
:param max_task_retry_count: The maximum number of times the Task may be
retried. The Batch service retries a Task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the Task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the Task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the Task. If the maximum retry count is
-1, the Batch service retries the Task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
StartTask to complete successfully (that is, to exit with exit code 0)
before scheduling any Tasks on the Compute Node. If true and the StartTask
fails on a Node, the Batch service retries the StartTask up to its maximum
retry count (maxTaskRetryCount). If the Task has still not completed
successfully after all retries, then the Batch service marks the Node
unusable, and will not schedule Tasks to it. This condition can be
detected via the Compute Node state and failure info details. If false,
the Batch service will not wait for the StartTask to complete. In this
case, other Tasks can start executing on the Compute Node while the
StartTask is still running; and even if the StartTask fails, new Tasks
will continue to be scheduled on the Compute Node. The default is true.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None:
super(StartTask, self).__init__(**kwargs)
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
[docs]class TaskAddCollectionOptions(Model):
"""Additional parameters for add_collection operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(TaskAddCollectionOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class TaskAddCollectionParameter(Model):
"""A collection of Azure Batch Tasks to add.
All required parameters must be populated in order to send to Azure.
:param value: Required. The collection of Tasks to add. The maximum count
of Tasks is 100. The total serialized size of this collection must be less
than 1MB. If it is greater than 1MB (for example if each Task has 100's of
resource files or environment variables), the request will fail with code
'RequestBodyTooLarge' and should be retried again with fewer Tasks.
:type value: list[~azure.batch.models.TaskAddParameter]
"""
_validation = {
'value': {'required': True, 'max_items': 100},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TaskAddParameter]'},
}
def __init__(self, *, value, **kwargs) -> None:
super(TaskAddCollectionParameter, self).__init__(**kwargs)
self.value = value
[docs]class TaskAddCollectionResult(Model):
"""The result of adding a collection of Tasks to a Job.
:param value: The results of the add Task collection operation.
:type value: list[~azure.batch.models.TaskAddResult]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[TaskAddResult]'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(TaskAddCollectionResult, self).__init__(**kwargs)
self.value = value
[docs]class TaskAddOptions(Model):
"""Additional parameters for add operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(TaskAddOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class TaskAddParameter(Model):
"""An Azure Batch Task to add.
Batch will retry Tasks when a recovery operation is triggered on a Node.
Examples of recovery operations include (but are not limited to) when an
unhealthy Node is rebooted or a Compute Node disappeared due to host
failure. Retries due to recovery operations are independent of and are not
counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0,
an internal retry due to a recovery operation may occur. Because of this,
all Tasks should be idempotent. This means Tasks need to tolerate being
interrupted and restarted without causing any corruption or duplicate data.
The best practice for long running Tasks is to use some form of
checkpointing.
All required parameters must be populated in order to send to Azure.
:param id: Required. A string that uniquely identifies the Task within the
Job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores, and cannot contain more than 64
characters. The ID is case-preserving and case-insensitive (that is, you
may not have two IDs within a Job that differ only by case).
:type id: str
:param display_name: A display name for the Task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param command_line: Required. The command line of the Task. For
multi-instance Tasks, the command line is executed as the primary Task,
after the primary Task and all subtasks have finished executing the
coordination command line. The command line does not run under a shell,
and therefore cannot take advantage of shell features such as environment
variable expansion. If you want to take advantage of such features, you
should invoke the shell in the command line, for example using "cmd /c
MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command
line refers to file paths, it should use a relative path (relative to the
Task working directory), or use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
Task runs. If the Pool that will run this Task has containerConfiguration
set, this must be set as well. If the Pool that will run this Task doesn't
have containerConfiguration set, this must not be set. When this is
specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR
(the root of Azure Batch directories on the node) are mapped into the
container, all Task environment variables are mapped into the container,
and the Task command line is executed in the container. Files produced in
the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to
the host disk, meaning that Batch file APIs will not be able to access
those files.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param exit_conditions: How the Batch service should respond when the Task
completes.
:type exit_conditions: ~azure.batch.models.ExitConditions
:param resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line. For
multi-instance Tasks, the resource files will only be downloaded to the
Compute Node on which the primary Task is executed. There is a maximum
size for the list of resource files. When the max size is exceeded, the
request will fail and the response error code will be
RequestEntityTooLarge. If this occurs, the collection of ResourceFiles
must be reduced in size. This can be achieved using .zip files,
Application Packages, or Docker Containers.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param output_files: A list of files that the Batch service will upload
from the Compute Node after running the command line. For multi-instance
Tasks, the files will only be uploaded from the Compute Node on which the
primary Task is executed.
:type output_files: list[~azure.batch.models.OutputFile]
:param environment_settings: A list of environment variable settings for
the Task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param affinity_info: A locality hint that can be used by the Batch
service to select a Compute Node on which to start the new Task.
:type affinity_info: ~azure.batch.models.AffinityInformation
:param constraints: The execution constraints that apply to this Task. If
you do not specify constraints, the maxTaskRetryCount is the
maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite,
and the retentionTime is 7 days.
:type constraints: ~azure.batch.models.TaskConstraints
:param user_identity: The user identity under which the Task runs. If
omitted, the Task runs as a non-administrative user unique to the Task.
:type user_identity: ~azure.batch.models.UserIdentity
:param multi_instance_settings: An object that indicates that the Task is
a multi-instance Task, and contains information about how to run the
multi-instance Task.
:type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings
:param depends_on: The Tasks that this Task depends on. This Task will not
be scheduled until all Tasks that it depends on have completed
successfully. If any of those Tasks fail and exhaust their retry counts,
this Task will never be scheduled. If the Job does not have
usesTaskDependencies set to true, and this element is present, the request
fails with error code TaskDependenciesNotSpecifiedOnJob.
:type depends_on: ~azure.batch.models.TaskDependencies
:param application_package_references: A list of Packages that the Batch
service will deploy to the Compute Node before running the command line.
Application packages are downloaded and deployed to a shared directory,
not the Task working directory. Therefore, if a referenced package is
already on the Node, and is up to date, then it is not re-downloaded; the
existing copy on the Compute Node is used. If a referenced Package cannot
be installed, for example because the package has been deleted or because
download failed, the Task fails.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param authentication_token_settings: The settings for an authentication
token that the Task can use to perform Batch service operations. If this
property is set, the Batch service provides the Task with an
authentication token which can be used to authenticate Batch service
operations without requiring an Account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the Task can carry out using the token depend on the settings. For
example, a Task can request Job permissions in order to add other Tasks to
the Job, or check the status of the Job or of other Tasks under the Job.
:type authentication_token_settings:
~azure.batch.models.AuthenticationTokenSettings
"""
_validation = {
'id': {'required': True},
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
}
def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None:
super(TaskAddParameter, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.command_line = command_line
self.container_settings = container_settings
self.exit_conditions = exit_conditions
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.multi_instance_settings = multi_instance_settings
self.depends_on = depends_on
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
[docs]class TaskAddResult(Model):
"""Result for a single Task added as part of an add Task collection operation.
All required parameters must be populated in order to send to Azure.
:param status: Required. The status of the add Task request. Possible
values include: 'success', 'clientError', 'serverError'
:type status: str or ~azure.batch.models.TaskAddStatus
:param task_id: Required. The ID of the Task for which this is the result.
:type task_id: str
:param e_tag: The ETag of the Task, if the Task was successfully added.
You can use this to detect whether the Task has changed between requests.
In particular, you can be pass the ETag with an Update Task request to
specify that your changes should take effect only if nobody else has
modified the Job in the meantime.
:type e_tag: str
:param last_modified: The last modified time of the Task.
:type last_modified: datetime
:param location: The URL of the Task, if the Task was successfully added.
:type location: str
:param error: The error encountered while attempting to add the Task.
:type error: ~azure.batch.models.BatchError
"""
_validation = {
'status': {'required': True},
'task_id': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'TaskAddStatus'},
'task_id': {'key': 'taskId', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'location': {'key': 'location', 'type': 'str'},
'error': {'key': 'error', 'type': 'BatchError'},
}
def __init__(self, *, status, task_id: str, e_tag: str=None, last_modified=None, location: str=None, error=None, **kwargs) -> None:
super(TaskAddResult, self).__init__(**kwargs)
self.status = status
self.task_id = task_id
self.e_tag = e_tag
self.last_modified = last_modified
self.location = location
self.error = error
[docs]class TaskConstraints(Model):
"""Execution constraints to apply to a Task.
:param max_wall_clock_time: The maximum elapsed time that the Task may
run, measured from the time the Task starts. If the Task does not complete
within the time limit, the Batch service terminates it. If this is not
specified, there is no time limit on how long the Task may run.
:type max_wall_clock_time: timedelta
:param retention_time: The minimum time to retain the Task directory on
the Compute Node where it ran, from the time it completes execution. After
this time, the Batch service may delete the Task directory and all its
contents. The default is 7 days, i.e. the Task directory will be retained
for 7 days unless the Compute Node is removed or the Job is deleted.
:type retention_time: timedelta
:param max_task_retry_count: The maximum number of times the Task may be
retried. The Batch service retries a Task if its exit code is nonzero.
Note that this value specifically controls the number of retries for the
Task executable due to a nonzero exit code. The Batch service will try the
Task once, and may then retry up to this limit. For example, if the
maximum retry count is 3, Batch tries the Task up to 4 times (one initial
try and 3 retries). If the maximum retry count is 0, the Batch service
does not retry the Task after the first attempt. If the maximum retry
count is -1, the Batch service retries the Task without limit.
:type max_task_retry_count: int
"""
_attribute_map = {
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'retention_time': {'key': 'retentionTime', 'type': 'duration'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
}
def __init__(self, *, max_wall_clock_time=None, retention_time=None, max_task_retry_count: int=None, **kwargs) -> None:
super(TaskConstraints, self).__init__(**kwargs)
self.max_wall_clock_time = max_wall_clock_time
self.retention_time = retention_time
self.max_task_retry_count = max_task_retry_count
[docs]class TaskContainerSettings(Model):
"""The container settings for a Task.
All required parameters must be populated in order to send to Azure.
:param container_run_options: Additional options to the container create
command. These additional options are supplied as arguments to the "docker
create" command, in addition to those controlled by the Batch Service.
:type container_run_options: str
:param image_name: Required. The Image to use to create the container in
which the Task will run. This is the full Image reference, as would be
specified to "docker pull". If no tag is provided as part of the Image
name, the tag ":latest" is used as a default.
:type image_name: str
:param registry: The private registry which contains the container Image.
This setting can be omitted if was already provided at Pool creation.
:type registry: ~azure.batch.models.ContainerRegistry
:param working_directory: The location of the container Task working
directory. The default is 'taskWorkingDirectory'. Possible values include:
'taskWorkingDirectory', 'containerImageDefault'
:type working_directory: str or
~azure.batch.models.ContainerWorkingDirectory
"""
_validation = {
'image_name': {'required': True},
}
_attribute_map = {
'container_run_options': {'key': 'containerRunOptions', 'type': 'str'},
'image_name': {'key': 'imageName', 'type': 'str'},
'registry': {'key': 'registry', 'type': 'ContainerRegistry'},
'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'},
}
def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, working_directory=None, **kwargs) -> None:
super(TaskContainerSettings, self).__init__(**kwargs)
self.container_run_options = container_run_options
self.image_name = image_name
self.registry = registry
self.working_directory = working_directory
[docs]class TaskCounts(Model):
"""The Task counts for a Job.
All required parameters must be populated in order to send to Azure.
:param active: Required. The number of Tasks in the active state.
:type active: int
:param running: Required. The number of Tasks in the running or preparing
state.
:type running: int
:param completed: Required. The number of Tasks in the completed state.
:type completed: int
:param succeeded: Required. The number of Tasks which succeeded. A Task
succeeds if its result (found in the executionInfo property) is 'success'.
:type succeeded: int
:param failed: Required. The number of Tasks which failed. A Task fails if
its result (found in the executionInfo property) is 'failure'.
:type failed: int
"""
_validation = {
'active': {'required': True},
'running': {'required': True},
'completed': {'required': True},
'succeeded': {'required': True},
'failed': {'required': True},
}
_attribute_map = {
'active': {'key': 'active', 'type': 'int'},
'running': {'key': 'running', 'type': 'int'},
'completed': {'key': 'completed', 'type': 'int'},
'succeeded': {'key': 'succeeded', 'type': 'int'},
'failed': {'key': 'failed', 'type': 'int'},
}
def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None:
super(TaskCounts, self).__init__(**kwargs)
self.active = active
self.running = running
self.completed = completed
self.succeeded = succeeded
self.failed = failed
[docs]class TaskDeleteOptions(Model):
"""Additional parameters for delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(TaskDeleteOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class TaskDependencies(Model):
"""Specifies any dependencies of a Task. Any Task that is explicitly specified
or within a dependency range must complete before the dependant Task will
be scheduled.
:param task_ids: The list of Task IDs that this Task depends on. All Tasks
in this list must complete successfully before the dependent Task can be
scheduled. The taskIds collection is limited to 64000 characters total
(i.e. the combined length of all Task IDs). If the taskIds collection
exceeds the maximum length, the Add Task request fails with error code
TaskDependencyListTooLong. In this case consider using Task ID ranges
instead.
:type task_ids: list[str]
:param task_id_ranges: The list of Task ID ranges that this Task depends
on. All Tasks in all ranges must complete successfully before the
dependent Task can be scheduled.
:type task_id_ranges: list[~azure.batch.models.TaskIdRange]
"""
_attribute_map = {
'task_ids': {'key': 'taskIds', 'type': '[str]'},
'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'},
}
def __init__(self, *, task_ids=None, task_id_ranges=None, **kwargs) -> None:
super(TaskDependencies, self).__init__(**kwargs)
self.task_ids = task_ids
self.task_id_ranges = task_id_ranges
[docs]class TaskGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(TaskGetOptions, self).__init__(**kwargs)
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class TaskIdRange(Model):
"""A range of Task IDs that a Task can depend on. All Tasks with IDs in the
range must complete successfully before the dependent Task can be
scheduled.
The start and end of the range are inclusive. For example, if a range has
start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'.
All required parameters must be populated in order to send to Azure.
:param start: Required. The first Task ID in the range.
:type start: int
:param end: Required. The last Task ID in the range.
:type end: int
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'int'},
'end': {'key': 'end', 'type': 'int'},
}
def __init__(self, *, start: int, end: int, **kwargs) -> None:
super(TaskIdRange, self).__init__(**kwargs)
self.start = start
self.end = end
[docs]class TaskListOptions(Model):
"""Additional parameters for list operation.
:param filter: An OData $filter clause. For more information on
constructing this filter, see
https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 Tasks can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'expand': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(TaskListOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.expand = expand
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class TaskListSubtasksOptions(Model):
"""Additional parameters for list_subtasks operation.
:param select: An OData $select clause.
:type select: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'select': {'key': '', 'type': 'str'},
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(TaskListSubtasksOptions, self).__init__(**kwargs)
self.select = select
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
[docs]class TaskReactivateOptions(Model):
"""Additional parameters for reactivate operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(TaskReactivateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class TaskSchedulingPolicy(Model):
"""Specifies how Tasks should be distributed across Compute Nodes.
All required parameters must be populated in order to send to Azure.
:param node_fill_type: Required. How Tasks are distributed across Compute
Nodes in a Pool. If not specified, the default is spread. Possible values
include: 'spread', 'pack'
:type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType
"""
_validation = {
'node_fill_type': {'required': True},
}
_attribute_map = {
'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'},
}
def __init__(self, *, node_fill_type, **kwargs) -> None:
super(TaskSchedulingPolicy, self).__init__(**kwargs)
self.node_fill_type = node_fill_type
[docs]class TaskStatistics(Model):
"""Resource usage statistics for a Task.
All required parameters must be populated in order to send to Azure.
:param url: Required. The URL of the statistics.
:type url: str
:param start_time: Required. The start time of the time range covered by
the statistics.
:type start_time: datetime
:param last_update_time: Required. The time at which the statistics were
last updated. All statistics are limited to the range between startTime
and lastUpdateTime.
:type last_update_time: datetime
:param user_cpu_time: Required. The total user mode CPU time (summed
across all cores and all Compute Nodes) consumed by the Task.
:type user_cpu_time: timedelta
:param kernel_cpu_time: Required. The total kernel mode CPU time (summed
across all cores and all Compute Nodes) consumed by the Task.
:type kernel_cpu_time: timedelta
:param wall_clock_time: Required. The total wall clock time of the Task.
The wall clock time is the elapsed time from when the Task started running
on a Compute Node to when it finished (or to the last time the statistics
were updated, if the Task had not finished by then). If the Task was
retried, this includes the wall clock time of all the Task retries.
:type wall_clock_time: timedelta
:param read_iops: Required. The total number of disk read operations made
by the Task.
:type read_iops: long
:param write_iops: Required. The total number of disk write operations
made by the Task.
:type write_iops: long
:param read_io_gi_b: Required. The total gibibytes read from disk by the
Task.
:type read_io_gi_b: float
:param write_io_gi_b: Required. The total gibibytes written to disk by the
Task.
:type write_io_gi_b: float
:param wait_time: Required. The total wait time of the Task. The wait time
for a Task is defined as the elapsed time between the creation of the Task
and the start of Task execution. (If the Task is retried due to failures,
the wait time is the time to the most recent Task execution.).
:type wait_time: timedelta
"""
_validation = {
'url': {'required': True},
'start_time': {'required': True},
'last_update_time': {'required': True},
'user_cpu_time': {'required': True},
'kernel_cpu_time': {'required': True},
'wall_clock_time': {'required': True},
'read_iops': {'required': True},
'write_iops': {'required': True},
'read_io_gi_b': {'required': True},
'write_io_gi_b': {'required': True},
'wait_time': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'},
'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'},
'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'},
'read_iops': {'key': 'readIOps', 'type': 'long'},
'write_iops': {'key': 'writeIOps', 'type': 'long'},
'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'},
'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'},
'wait_time': {'key': 'waitTime', 'type': 'duration'},
}
def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time, **kwargs) -> None:
super(TaskStatistics, self).__init__(**kwargs)
self.url = url
self.start_time = start_time
self.last_update_time = last_update_time
self.user_cpu_time = user_cpu_time
self.kernel_cpu_time = kernel_cpu_time
self.wall_clock_time = wall_clock_time
self.read_iops = read_iops
self.write_iops = write_iops
self.read_io_gi_b = read_io_gi_b
self.write_io_gi_b = write_io_gi_b
self.wait_time = wait_time
[docs]class TaskTerminateOptions(Model):
"""Additional parameters for terminate operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(TaskTerminateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class TaskUpdateOptions(Model):
"""Additional parameters for update operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
'if_match': {'key': '', 'type': 'str'},
'if_none_match': {'key': '', 'type': 'str'},
'if_modified_since': {'key': '', 'type': 'rfc-1123'},
'if_unmodified_since': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None:
super(TaskUpdateOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
[docs]class TaskUpdateParameter(Model):
"""The set of changes to be made to a Task.
:param constraints: Constraints that apply to this Task. If omitted, the
Task is given the default constraints. For multi-instance Tasks, updating
the retention time applies only to the primary Task and not subtasks.
:type constraints: ~azure.batch.models.TaskConstraints
"""
_attribute_map = {
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
}
def __init__(self, *, constraints=None, **kwargs) -> None:
super(TaskUpdateParameter, self).__init__(**kwargs)
self.constraints = constraints
[docs]class UploadBatchServiceLogsConfiguration(Model):
"""The Azure Batch service log files upload configuration for a Compute Node.
All required parameters must be populated in order to send to Azure.
:param container_url: Required. The URL of the container within Azure Blob
Storage to which to upload the Batch Service log file(s). The URL must
include a Shared Access Signature (SAS) granting write permissions to the
container. The SAS duration must allow enough time for the upload to
finish. The start time for SAS is optional and recommended to not be
specified.
:type container_url: str
:param start_time: Required. The start of the time range from which to
upload Batch Service log file(s). Any log file containing a log message in
the time range will be uploaded. This means that the operation might
retrieve more logs than have been requested since the entire log file is
always uploaded, but the operation should not retrieve fewer logs than
have been requested.
:type start_time: datetime
:param end_time: The end of the time range from which to upload Batch
Service log file(s). Any log file containing a log message in the time
range will be uploaded. This means that the operation might retrieve more
logs than have been requested since the entire log file is always
uploaded, but the operation should not retrieve fewer logs than have been
requested. If omitted, the default is to upload all logs available after
the startTime.
:type end_time: datetime
"""
_validation = {
'container_url': {'required': True},
'start_time': {'required': True},
}
_attribute_map = {
'container_url': {'key': 'containerUrl', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(self, *, container_url: str, start_time, end_time=None, **kwargs) -> None:
super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs)
self.container_url = container_url
self.start_time = start_time
self.end_time = end_time
[docs]class UploadBatchServiceLogsResult(Model):
"""The result of uploading Batch service log files from a specific Compute
Node.
All required parameters must be populated in order to send to Azure.
:param virtual_directory_name: Required. The virtual directory within
Azure Blob Storage container to which the Batch Service log file(s) will
be uploaded. The virtual directory name is part of the blob name for each
log file uploaded, and it is built based poolId, nodeId and a unique
identifier.
:type virtual_directory_name: str
:param number_of_files_uploaded: Required. The number of log files which
will be uploaded.
:type number_of_files_uploaded: int
"""
_validation = {
'virtual_directory_name': {'required': True},
'number_of_files_uploaded': {'required': True},
}
_attribute_map = {
'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'},
'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'},
}
def __init__(self, *, virtual_directory_name: str, number_of_files_uploaded: int, **kwargs) -> None:
super(UploadBatchServiceLogsResult, self).__init__(**kwargs)
self.virtual_directory_name = virtual_directory_name
self.number_of_files_uploaded = number_of_files_uploaded
[docs]class UsageStatistics(Model):
"""Statistics related to Pool usage information.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. The start time of the time range covered by
the statistics.
:type start_time: datetime
:param last_update_time: Required. The time at which the statistics were
last updated. All statistics are limited to the range between startTime
and lastUpdateTime.
:type last_update_time: datetime
:param dedicated_core_time: Required. The aggregated wall-clock time of
the dedicated Compute Node cores being part of the Pool.
:type dedicated_core_time: timedelta
"""
_validation = {
'start_time': {'required': True},
'last_update_time': {'required': True},
'dedicated_core_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'},
}
def __init__(self, *, start_time, last_update_time, dedicated_core_time, **kwargs) -> None:
super(UsageStatistics, self).__init__(**kwargs)
self.start_time = start_time
self.last_update_time = last_update_time
self.dedicated_core_time = dedicated_core_time
[docs]class UserAccount(Model):
"""Properties used to create a user used to execute Tasks on an Azure Batch
Compute Node.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the user Account.
:type name: str
:param password: Required. The password for the user Account.
:type password: str
:param elevation_level: The elevation level of the user Account. The
default value is nonAdmin. Possible values include: 'nonAdmin', 'admin'
:type elevation_level: str or ~azure.batch.models.ElevationLevel
:param linux_user_configuration: The Linux-specific user configuration for
the user Account. This property is ignored if specified on a Windows Pool.
If not specified, the user is created with the default options.
:type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration
:param windows_user_configuration: The Windows-specific user configuration
for the user Account. This property can only be specified if the user is
on a Windows Pool. If not specified and on a Windows Pool, the user is
created with the default options.
:type windows_user_configuration:
~azure.batch.models.WindowsUserConfiguration
"""
_validation = {
'name': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'},
'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'},
'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'},
}
def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None:
super(UserAccount, self).__init__(**kwargs)
self.name = name
self.password = password
self.elevation_level = elevation_level
self.linux_user_configuration = linux_user_configuration
self.windows_user_configuration = windows_user_configuration
[docs]class UserIdentity(Model):
"""The definition of the user identity under which the Task is run.
Specify either the userName or autoUser property, but not both.
:param user_name: The name of the user identity under which the Task is
run. The userName and autoUser properties are mutually exclusive; you must
specify one but not both.
:type user_name: str
:param auto_user: The auto user under which the Task is run. The userName
and autoUser properties are mutually exclusive; you must specify one but
not both.
:type auto_user: ~azure.batch.models.AutoUserSpecification
"""
_attribute_map = {
'user_name': {'key': 'username', 'type': 'str'},
'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'},
}
def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None:
super(UserIdentity, self).__init__(**kwargs)
self.user_name = user_name
self.auto_user = auto_user
[docs]class VirtualMachineConfiguration(Model):
"""The configuration for Compute Nodes in a Pool based on the Azure Virtual
Machines infrastructure.
All required parameters must be populated in order to send to Azure.
:param image_reference: Required. A reference to the Azure Virtual
Machines Marketplace Image or the custom Virtual Machine Image to use.
:type image_reference: ~azure.batch.models.ImageReference
:param node_agent_sku_id: Required. The SKU of the Batch Compute Node
agent to be provisioned on Compute Nodes in the Pool. The Batch Compute
Node agent is a program that runs on each Compute Node in the Pool, and
provides the command-and-control interface between the Compute Node and
the Batch service. There are different implementations of the Compute Node
agent, known as SKUs, for different operating systems. You must specify a
Compute Node agent SKU which matches the selected Image reference. To get
the list of supported Compute Node agent SKUs along with their list of
verified Image references, see the 'List supported Compute Node agent
SKUs' operation.
:type node_agent_sku_id: str
:param windows_configuration: Windows operating system settings on the
virtual machine. This property must not be specified if the imageReference
property specifies a Linux OS Image.
:type windows_configuration: ~azure.batch.models.WindowsConfiguration
:param data_disks: The configuration for data disks attached to the
Compute Nodes in the Pool. This property must be specified if the Compute
Nodes in the Pool need to have empty data disks attached to them. This
cannot be updated. Each Compute Node gets its own disk (the disk is not a
file share). Existing disks cannot be attached, each attached disk is
empty. When the Compute Node is removed from the Pool, the disk and all
data associated with it is also deleted. The disk is not formatted after
being attached, it must be formatted before use - for more information see
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux
and
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine.
:type data_disks: list[~azure.batch.models.DataDisk]
:param license_type: The type of on-premises license to be used when
deploying the operating system. This only applies to Images that contain
the Windows operating system, and should only be used when you hold valid
on-premises licenses for the Compute Nodes which will be deployed. If
omitted, no on-premises licensing discount is applied. Values are:
Windows_Server - The on-premises license is for Windows Server.
Windows_Client - The on-premises license is for Windows Client.
:type license_type: str
:param container_configuration: The container configuration for the Pool.
If specified, setup is performed on each Compute Node in the Pool to allow
Tasks to run in containers. All regular Tasks and Job manager Tasks run on
this Pool must specify the containerSettings property, and all other Tasks
may specify it.
:type container_configuration: ~azure.batch.models.ContainerConfiguration
:param disk_encryption_configuration: The disk encryption configuration
for the pool. If specified, encryption is performed on each node in the
pool during node provisioning.
:type disk_encryption_configuration:
~azure.batch.models.DiskEncryptionConfiguration
"""
_validation = {
'image_reference': {'required': True},
'node_agent_sku_id': {'required': True},
}
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'},
'disk_encryption_configuration': {'key': 'diskEncryptionConfiguration', 'type': 'DiskEncryptionConfiguration'},
}
def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, disk_encryption_configuration=None, **kwargs) -> None:
super(VirtualMachineConfiguration, self).__init__(**kwargs)
self.image_reference = image_reference
self.node_agent_sku_id = node_agent_sku_id
self.windows_configuration = windows_configuration
self.data_disks = data_disks
self.license_type = license_type
self.container_configuration = container_configuration
self.disk_encryption_configuration = disk_encryption_configuration
[docs]class WindowsConfiguration(Model):
"""Windows operating system settings to apply to the virtual machine.
:param enable_automatic_updates: Whether automatic updates are enabled on
the virtual machine. If omitted, the default value is true.
:type enable_automatic_updates: bool
"""
_attribute_map = {
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
}
def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None:
super(WindowsConfiguration, self).__init__(**kwargs)
self.enable_automatic_updates = enable_automatic_updates
[docs]class WindowsUserConfiguration(Model):
"""Properties used to create a user Account on a Windows Compute Node.
:param login_mode: The login mode for the user. The default value for
VirtualMachineConfiguration Pools is 'batch' and for
CloudServiceConfiguration Pools is 'interactive'. Possible values include:
'batch', 'interactive'
:type login_mode: str or ~azure.batch.models.LoginMode
"""
_attribute_map = {
'login_mode': {'key': 'loginMode', 'type': 'LoginMode'},
}
def __init__(self, *, login_mode=None, **kwargs) -> None:
super(WindowsUserConfiguration, self).__init__(**kwargs)
self.login_mode = login_mode