# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from . import models
class ComputerVisionAPIConfiguration(Configuration):
"""Configuration for ComputerVisionAPI
Note that all parameters used to create this instance are saved as instance
attributes.
:param azure_region: Supported Azure regions for Cognitive Services
endpoints. Possible values include: 'westus', 'westeurope',
'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus',
'southcentralus', 'northeurope', 'eastasia', 'australiaeast',
'brazilsouth'
:type azure_region: str or
~azure.cognitiveservices.vision.computervision.models.AzureRegions
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, azure_region, credentials):
if azure_region is None:
raise ValueError("Parameter 'azure_region' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/vision/v2.0'
super(ComputerVisionAPIConfiguration, self).__init__(base_url)
self.add_user_agent('azure-cognitiveservices-vision-computervision/{}'.format(VERSION))
self.azure_region = azure_region
self.credentials = credentials
[docs]class ComputerVisionAPI(SDKClient):
"""The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively.
:ivar config: Configuration for client.
:vartype config: ComputerVisionAPIConfiguration
:param azure_region: Supported Azure regions for Cognitive Services
endpoints. Possible values include: 'westus', 'westeurope',
'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus',
'southcentralus', 'northeurope', 'eastasia', 'australiaeast',
'brazilsouth'
:type azure_region: str or
~azure.cognitiveservices.vision.computervision.models.AzureRegions
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, azure_region, credentials):
self.config = ComputerVisionAPIConfiguration(azure_region, credentials)
super(ComputerVisionAPI, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
[docs] def list_models(
self, custom_headers=None, raw=False, **operation_config):
"""This operation returns the list of domain-specific models that are
supported by the Computer Vision API. Currently, the API only supports
one domain-specific model: a celebrity recognizer. A successful
response will be returned in JSON. If the request failed, the response
will contain an error code and a message to help understand what went
wrong.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ListModelsResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.ListModelsResult
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.list_models.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ListModelsResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_models.metadata = {'url': '/models'}
[docs] def analyze_image(
self, url, visual_features=None, details=None, language="en", custom_headers=None, raw=False, **operation_config):
"""This operation extracts a rich set of visual features based on the
image content. Two input methods are supported -- (1) Uploading an
image or (2) specifying an image URL. Within your request, there is an
optional parameter to allow you to choose which features to return. By
default, image categories are returned in the response.
:param url: Publicly reachable URL of an image
:type url: str
:param visual_features: A string indicating what visual feature types
to return. Multiple values should be comma-separated. Valid visual
feature types include:Categories - categorizes image content according
to a taxonomy defined in documentation. Tags - tags the image with a
detailed list of words related to the image content. Description -
describes the image content with a complete English sentence. Faces -
detects if faces are present. If present, generate coordinates, gender
and age. ImageType - detects if image is clipart or a line drawing.
Color - determines the accent color, dominant color, and whether an
image is black&white.Adult - detects if the image is pornographic in
nature (depicts nudity or a sex act). Sexually suggestive content is
also detected.
:type visual_features: list[str or
~azure.cognitiveservices.vision.computervision.models.VisualFeatureTypes]
:param details: A string indicating which domain-specific details to
return. Multiple values should be comma-separated. Valid visual
feature types include:Celebrities - identifies celebrities if detected
in the image.
:type details: list[str or
~azure.cognitiveservices.vision.computervision.models.Details]
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageAnalysis or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.ImageAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.analyze_image.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if visual_features is not None:
query_parameters['visualFeatures'] = self._serialize.query("visual_features", visual_features, '[VisualFeatureTypes]', div=',')
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, '[Details]', div=',')
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
analyze_image.metadata = {'url': '/analyze'}
[docs] def generate_thumbnail(
self, width, height, url, smart_cropping=False, custom_headers=None, raw=False, callback=None, **operation_config):
"""This operation generates a thumbnail image with the user-specified
width and height. By default, the service analyzes the image,
identifies the region of interest (ROI), and generates smart cropping
coordinates based on the ROI. Smart cropping helps when you specify an
aspect ratio that differs from that of the input image. A successful
response contains the thumbnail image binary. If the request failed,
the response contains an error code and a message to help determine
what went wrong.
:param width: Width of the thumbnail. It must be between 1 and 1024.
Recommended minimum of 50.
:type width: int
:param height: Height of the thumbnail. It must be between 1 and 1024.
Recommended minimum of 50.
:type height: int
:param url: Publicly reachable URL of an image
:type url: str
:param smart_cropping: Boolean flag for enabling smart cropping.
:type smart_cropping: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: Generator or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.generate_thumbnail.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['width'] = self._serialize.query("width", width, 'int', maximum=1023, minimum=1)
query_parameters['height'] = self._serialize.query("height", height, 'int', maximum=1023, minimum=1)
if smart_cropping is not None:
query_parameters['smartCropping'] = self._serialize.query("smart_cropping", smart_cropping, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=True, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._client.stream_download(response, callback)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
generate_thumbnail.metadata = {'url': '/generateThumbnail'}
[docs] def recognize_printed_text(
self, url, detect_orientation=True, language="unk", custom_headers=None, raw=False, **operation_config):
"""Optical Character Recognition (OCR) detects printed text in an image
and extracts the recognized characters into a machine-usable character
stream. Upon success, the OCR results will be returned. Upon failure,
the error code together with an error message will be returned. The
error code can be one of InvalidImageUrl, InvalidImageFormat,
InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
InternalServerError.
:param detect_orientation: Whether detect the text orientation in the
image. With detectOrientation=true the OCR service tries to detect the
image orientation and correct it before further processing (e.g. if
it's upside-down).
:type detect_orientation: bool
:param url: Publicly reachable URL of an image
:type url: str
:param language: The BCP-47 language code of the text to be detected
in the image. The default value is 'unk'. Possible values include:
'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de',
'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv',
'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk'
:type language: str or
~azure.cognitiveservices.vision.computervision.models.OcrLanguages
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OcrResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.OcrResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.recognize_printed_text.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['detectOrientation'] = self._serialize.query("detect_orientation", detect_orientation, 'bool')
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'OcrLanguages')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OcrResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
recognize_printed_text.metadata = {'url': '/ocr'}
[docs] def describe_image(
self, url, max_candidates="1", language="en", custom_headers=None, raw=False, **operation_config):
"""This operation generates a description of an image in human readable
language with complete sentences. The description is based on a
collection of content tags, which are also returned by the operation.
More than one description can be generated for each image.
Descriptions are ordered by their confidence score. All descriptions
are in English. Two input methods are supported -- (1) Uploading an
image or (2) specifying an image URL.A successful response will be
returned in JSON. If the request failed, the response will contain an
error code and a message to help understand what went wrong.
:param url: Publicly reachable URL of an image
:type url: str
:param max_candidates: Maximum number of candidate descriptions to be
returned. The default is 1.
:type max_candidates: str
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageDescription or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.ImageDescription
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.describe_image.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if max_candidates is not None:
query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'str')
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
describe_image.metadata = {'url': '/describe'}
[docs] def tag_image(
self, url, language="en", custom_headers=None, raw=False, **operation_config):
"""This operation generates a list of words, or tags, that are relevant to
the content of the supplied image. The Computer Vision API can return
tags based on objects, living beings, scenery or actions found in
images. Unlike categories, tags are not organized according to a
hierarchical classification system, but correspond to image content.
Tags may contain hints to avoid ambiguity or provide context, for
example the tag 'cello' may be accompanied by the hint 'musical
instrument'. All tags are in English.
:param url: Publicly reachable URL of an image
:type url: str
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TagResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.TagResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.tag_image.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TagResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
tag_image.metadata = {'url': '/tag'}
[docs] def analyze_image_by_domain(
self, model, url, language="en", custom_headers=None, raw=False, **operation_config):
"""This operation recognizes content within an image by applying a
domain-specific model. The list of domain-specific models that are
supported by the Computer Vision API can be retrieved using the /models
GET request. Currently, the API only provides a single domain-specific
model: celebrities. Two input methods are supported -- (1) Uploading an
image or (2) specifying an image URL. A successful response will be
returned in JSON. If the request failed, the response will contain an
error code and a message to help understand what went wrong.
:param model: The domain-specific content to recognize.
:type model: str
:param url: Publicly reachable URL of an image
:type url: str
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DomainModelResults or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.DomainModelResults
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.analyze_image_by_domain.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'model': self._serialize.url("model", model, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainModelResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
analyze_image_by_domain.metadata = {'url': '/models/{model}/analyze'}
[docs] def recognize_text(
self, url, mode, custom_headers=None, raw=False, **operation_config):
"""Recognize Text operation. When you use the Recognize Text interface,
the response contains a field called 'Operation-Location'. The
'Operation-Location' field contains the URL that you must use for your
Get Recognize Text Operation Result operation.
:param mode: Type of text to recognize. Possible values include:
'Handwritten', 'Printed'
:type mode: str or
~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode
:param url: Publicly reachable URL of an image
:type url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.recognize_text.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.ComputerVisionErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Operation-Location': 'str',
})
return client_raw_response
recognize_text.metadata = {'url': '/recognizeText'}
[docs] def get_text_operation_result(
self, operation_id, custom_headers=None, raw=False, **operation_config):
"""This interface is used for getting text operation result. The URL to
this interface should be retrieved from 'Operation-Location' field
returned from Recognize Text interface.
:param operation_id: Id of the text operation returned in the response
of the 'Recognize Text'
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TextOperationResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.TextOperationResult
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.get_text_operation_result.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'operationId': self._serialize.url("operation_id", operation_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TextOperationResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_text_operation_result.metadata = {'url': '/textOperations/{operationId}'}
[docs] def analyze_image_in_stream(
self, image, visual_features=None, details=None, language="en", custom_headers=None, raw=False, callback=None, **operation_config):
"""This operation extracts a rich set of visual features based on the
image content.
:param image: An image stream.
:type image: Generator
:param visual_features: A string indicating what visual feature types
to return. Multiple values should be comma-separated. Valid visual
feature types include:Categories - categorizes image content according
to a taxonomy defined in documentation. Tags - tags the image with a
detailed list of words related to the image content. Description -
describes the image content with a complete English sentence. Faces -
detects if faces are present. If present, generate coordinates, gender
and age. ImageType - detects if image is clipart or a line drawing.
Color - determines the accent color, dominant color, and whether an
image is black&white.Adult - detects if the image is pornographic in
nature (depicts nudity or a sex act). Sexually suggestive content is
also detected.
:type visual_features: list[str or
~azure.cognitiveservices.vision.computervision.models.VisualFeatureTypes]
:param details: A string indicating which domain-specific details to
return. Multiple values should be comma-separated. Valid visual
feature types include:Celebrities - identifies celebrities if detected
in the image. Possible values include: 'Celebrities', 'Landmarks'
:type details: str
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageAnalysis or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.ImageAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.analyze_image_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if visual_features is not None:
query_parameters['visualFeatures'] = self._serialize.query("visual_features", visual_features, '[VisualFeatureTypes]', div=',')
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'str')
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
analyze_image_in_stream.metadata = {'url': '/analyze'}
[docs] def generate_thumbnail_in_stream(
self, width, height, image, smart_cropping=False, custom_headers=None, raw=False, callback=None, **operation_config):
"""This operation generates a thumbnail image with the user-specified
width and height. By default, the service analyzes the image,
identifies the region of interest (ROI), and generates smart cropping
coordinates based on the ROI. Smart cropping helps when you specify an
aspect ratio that differs from that of the input image. A successful
response contains the thumbnail image binary. If the request failed,
the response contains an error code and a message to help determine
what went wrong.
:param width: Width of the thumbnail. It must be between 1 and 1024.
Recommended minimum of 50.
:type width: int
:param height: Height of the thumbnail. It must be between 1 and 1024.
Recommended minimum of 50.
:type height: int
:param image: An image stream.
:type image: Generator
:param smart_cropping: Boolean flag for enabling smart cropping.
:type smart_cropping: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: Generator or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.generate_thumbnail_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['width'] = self._serialize.query("width", width, 'int', maximum=1023, minimum=1)
query_parameters['height'] = self._serialize.query("height", height, 'int', maximum=1023, minimum=1)
if smart_cropping is not None:
query_parameters['smartCropping'] = self._serialize.query("smart_cropping", smart_cropping, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=True, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._client.stream_download(response, callback)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
generate_thumbnail_in_stream.metadata = {'url': '/generateThumbnail'}
[docs] def recognize_printed_text_in_stream(
self, image, detect_orientation=True, language="unk", custom_headers=None, raw=False, callback=None, **operation_config):
"""Optical Character Recognition (OCR) detects printed text in an image
and extracts the recognized characters into a machine-usable character
stream. Upon success, the OCR results will be returned. Upon failure,
the error code together with an error message will be returned. The
error code can be one of InvalidImageUrl, InvalidImageFormat,
InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
InternalServerError.
:param detect_orientation: Whether detect the text orientation in the
image. With detectOrientation=true the OCR service tries to detect the
image orientation and correct it before further processing (e.g. if
it's upside-down).
:type detect_orientation: bool
:param image: An image stream.
:type image: Generator
:param language: The BCP-47 language code of the text to be detected
in the image. The default value is 'unk'. Possible values include:
'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de',
'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv',
'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk'
:type language: str or
~azure.cognitiveservices.vision.computervision.models.OcrLanguages
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OcrResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.OcrResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.recognize_printed_text_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'OcrLanguages')
query_parameters['detectOrientation'] = self._serialize.query("detect_orientation", detect_orientation, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OcrResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
recognize_printed_text_in_stream.metadata = {'url': '/ocr'}
[docs] def describe_image_in_stream(
self, image, max_candidates="1", language="en", custom_headers=None, raw=False, callback=None, **operation_config):
"""This operation generates a description of an image in human readable
language with complete sentences. The description is based on a
collection of content tags, which are also returned by the operation.
More than one description can be generated for each image.
Descriptions are ordered by their confidence score. All descriptions
are in English. Two input methods are supported -- (1) Uploading an
image or (2) specifying an image URL.A successful response will be
returned in JSON. If the request failed, the response will contain an
error code and a message to help understand what went wrong.
:param image: An image stream.
:type image: Generator
:param max_candidates: Maximum number of candidate descriptions to be
returned. The default is 1.
:type max_candidates: str
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageDescription or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.ImageDescription
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.describe_image_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if max_candidates is not None:
query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'str')
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
describe_image_in_stream.metadata = {'url': '/describe'}
[docs] def tag_image_in_stream(
self, image, language="en", custom_headers=None, raw=False, callback=None, **operation_config):
"""This operation generates a list of words, or tags, that are relevant to
the content of the supplied image. The Computer Vision API can return
tags based on objects, living beings, scenery or actions found in
images. Unlike categories, tags are not organized according to a
hierarchical classification system, but correspond to image content.
Tags may contain hints to avoid ambiguity or provide context, for
example the tag 'cello' may be accompanied by the hint 'musical
instrument'. All tags are in English.
:param image: An image stream.
:type image: Generator
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TagResult or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.TagResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.tag_image_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TagResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
tag_image_in_stream.metadata = {'url': '/tag'}
[docs] def analyze_image_by_domain_in_stream(
self, model, image, language="en", custom_headers=None, raw=False, callback=None, **operation_config):
"""This operation recognizes content within an image by applying a
domain-specific model. The list of domain-specific models that are
supported by the Computer Vision API can be retrieved using the /models
GET request. Currently, the API only provides a single domain-specific
model: celebrities. Two input methods are supported -- (1) Uploading an
image or (2) specifying an image URL. A successful response will be
returned in JSON. If the request failed, the response will contain an
error code and a message to help understand what went wrong.
:param model: The domain-specific content to recognize.
:type model: str
:param image: An image stream.
:type image: Generator
:param language: The desired language for output generation. If this
parameter is not specified, the default value is
"en".Supported languages:en - English, Default. es -
Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.
Possible values include: 'en', 'es', 'ja', 'pt', 'zh'
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DomainModelResults or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.computervision.models.DomainModelResults
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.analyze_image_by_domain_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'model': self._serialize.url("model", model, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ComputerVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainModelResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
analyze_image_by_domain_in_stream.metadata = {'url': '/models/{model}/analyze'}
[docs] def recognize_text_in_stream(
self, image, mode, custom_headers=None, raw=False, callback=None, **operation_config):
"""Recognize Text operation. When you use the Recognize Text interface,
the response contains a field called 'Operation-Location'. The
'Operation-Location' field contains the URL that you must use for your
Get Recognize Text Operation Result operation.
:param image: An image stream.
:type image: Generator
:param mode: Type of text to recognize. Possible values include:
'Handwritten', 'Printed'
:type mode: str or
~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ComputerVisionErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException>`
"""
# Construct URL
url = self.recognize_text_in_stream.metadata['url']
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.ComputerVisionErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Operation-Location': 'str',
})
return client_raw_response
recognize_text_in_stream.metadata = {'url': '/recognizeText'}