Source code for azure.cognitiveservices.vision.computervision.operations._computer_vision_client_operations

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from .. import models


[docs]class ComputerVisionClientOperationsMixin(object):
[docs] def analyze_image( self, url, visual_features=None, details=None, language="en", description_exclude=None, model_version="latest", custom_headers=None, raw=False, **operation_config): """This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param url: Publicly reachable URL of an image. :type url: str :param visual_features: A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include: Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white. Adult - detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts extreme violence or blood). Sexually suggestive content (aka racy content) is also detected. Objects - detects various objects within an image, including the approximate location. The Objects argument is only available in English. Brands - detects various brands within an image, including the approximate location. The Brands argument is only available in English. :type visual_features: list[str or ~azure.cognitiveservices.vision.computervision.models.VisualFeatureTypes] :param details: A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include: Celebrities - identifies celebrities if detected in the image, Landmarks - identifies notable landmarks in the image. :type details: list[str or ~azure.cognitiveservices.vision.computervision.models.Details] :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param description_exclude: Turn off specified domain models when generating the description. :type description_exclude: list[str or ~azure.cognitiveservices.vision.computervision.models.DescriptionExclude] :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ImageAnalysis or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.ImageAnalysis or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.analyze_image.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if visual_features is not None: query_parameters['visualFeatures'] = self._serialize.query("visual_features", visual_features, '[VisualFeatureTypes]', div=',') if details is not None: query_parameters['details'] = self._serialize.query("details", details, '[Details]', div=',') if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if description_exclude is not None: query_parameters['descriptionExclude'] = self._serialize.query("description_exclude", description_exclude, '[DescriptionExclude]', div=',') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ImageAnalysis', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
analyze_image.metadata = {'url': '/analyze'}
[docs] def describe_image( self, url, max_candidates=1, language="en", description_exclude=None, model_version="latest", custom_headers=None, raw=False, **operation_config): """This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. Descriptions may include results from celebrity and landmark domain models, if applicable. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param url: Publicly reachable URL of an image. :type url: str :param max_candidates: Maximum number of candidate descriptions to be returned. The default is 1. :type max_candidates: int :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param description_exclude: Turn off specified domain models when generating the description. :type description_exclude: list[str or ~azure.cognitiveservices.vision.computervision.models.DescriptionExclude] :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ImageDescription or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.ImageDescription or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.describe_image.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if max_candidates is not None: query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'int') if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if description_exclude is not None: query_parameters['descriptionExclude'] = self._serialize.query("description_exclude", description_exclude, '[DescriptionExclude]', div=',') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ImageDescription', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
describe_image.metadata = {'url': '/describe'}
[docs] def detect_objects( self, url, model_version="latest", custom_headers=None, raw=False, **operation_config): """Performs object detection on the specified image. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param url: Publicly reachable URL of an image. :type url: str :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: DetectResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.DetectResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.detect_objects.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('DetectResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
detect_objects.metadata = {'url': '/detect'}
[docs] def list_models( self, custom_headers=None, raw=False, **operation_config): """This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API supports following domain-specific models: celebrity recognizer, landmark recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ListModelsResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.ListModelsResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.list_models.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ListModelsResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
list_models.metadata = {'url': '/models'}
[docs] def analyze_image_by_domain( self, model, url, language="en", model_version="latest", custom_headers=None, raw=False, **operation_config): """This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API provides following domain-specific models: celebrities, landmarks. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param model: The domain-specific content to recognize. :type model: str :param url: Publicly reachable URL of an image. :type url: str :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: DomainModelResults or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.DomainModelResults or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.analyze_image_by_domain.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'model': self._serialize.url("model", model, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('DomainModelResults', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
analyze_image_by_domain.metadata = {'url': '/models/{model}/analyze'}
[docs] def recognize_printed_text( self, url, detect_orientation=True, language="unk", model_version="latest", custom_headers=None, raw=False, **operation_config): """Optical Character Recognition (OCR) detects text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. :param detect_orientation: Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). :type detect_orientation: bool :param url: Publicly reachable URL of an image. :type url: str :param language: The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' :type language: str or ~azure.cognitiveservices.vision.computervision.models.OcrLanguages :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: OcrResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.OcrResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.recognize_printed_text.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['detectOrientation'] = self._serialize.query("detect_orientation", detect_orientation, 'bool') if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'OcrLanguages') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('OcrResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
recognize_printed_text.metadata = {'url': '/ocr'}
[docs] def tag_image( self, url, language="en", model_version="latest", custom_headers=None, raw=False, **operation_config): """This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag "ascomycete" may be accompanied by the hint "fungus". Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param url: Publicly reachable URL of an image. :type url: str :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: TagResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.TagResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.tag_image.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('TagResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
tag_image.metadata = {'url': '/tag'}
[docs] def generate_thumbnail( self, width, height, url, smart_cropping=False, model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError. :param width: Width of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50. :type width: int :param height: Height of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50. :type height: int :param url: Publicly reachable URL of an image. :type url: str :param smart_cropping: Boolean flag for enabling smart cropping. :type smart_cropping: bool :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: object or ClientRawResponse if raw=true :rtype: Generator or ~msrest.pipeline.ClientRawResponse :raises: :class:`HttpOperationError<msrest.exceptions.HttpOperationError>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.generate_thumbnail.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['width'] = self._serialize.query("width", width, 'int', maximum=1024, minimum=1) query_parameters['height'] = self._serialize.query("height", height, 'int', maximum=1024, minimum=1) if smart_cropping is not None: query_parameters['smartCropping'] = self._serialize.query("smart_cropping", smart_cropping, 'bool') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/octet-stream' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=True, **operation_config) if response.status_code not in [200]: raise HttpOperationError(self._deserialize, response) deserialized = self._client.stream_download(response, callback) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
generate_thumbnail.metadata = {'url': '/generateThumbnail'}
[docs] def get_area_of_interest( self, url, model_version="latest", custom_headers=None, raw=False, **operation_config): """This operation returns a bounding box around the most important area of the image. A successful response will be returned in JSON. If the request failed, the response contains an error code and a message to help determine what went wrong. Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError. :param url: Publicly reachable URL of an image. :type url: str :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: AreaOfInterestResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.AreaOfInterestResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.get_area_of_interest.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('AreaOfInterestResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
get_area_of_interest.metadata = {'url': '/areaOfInterest'}
[docs] def read( self, url, language=None, pages=None, model_version="latest", reading_order="basic", custom_headers=None, raw=False, **operation_config): """Use this interface to get the result of a Read operation, employing the state-of-the-art Optical Character Recognition (OCR) algorithms optimized for text-heavy documents. When you use the Read interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your 'GetReadResult' operation to access OCR results.​. :param url: Publicly reachable URL of an image. :type url: str :param language: The BCP-47 language code of the text in the document. Read supports auto language identification and multi-language documents, so only provide a language code if you would like to force the document to be processed in that specific language. See https://aka.ms/ocr-languages for list of supported languages. Possible values include: 'af', 'ast', 'bi', 'br', 'ca', 'ceb', 'ch', 'co', 'crh', 'cs', 'csb', 'da', 'de', 'en', 'es', 'et', 'eu', 'fi', 'fil', 'fj', 'fr', 'fur', 'fy', 'ga', 'gd', 'gil', 'gl', 'gv', 'hni', 'hsb', 'ht', 'hu', 'ia', 'id', 'it', 'iu', 'ja', 'jv', 'kaa', 'kac', 'kea', 'kha', 'kl', 'ko', 'ku', 'kw', 'lb', 'ms', 'mww', 'nap', 'nl', 'no', 'oc', 'pl', 'pt', 'quc', 'rm', 'sco', 'sl', 'sq', 'sv', 'sw', 'tet', 'tr', 'tt', 'uz', 'vo', 'wae', 'yua', 'za', 'zh-Hans', 'zh-Hant', 'zu' :type language: str or ~azure.cognitiveservices.vision.computervision.models.OcrDetectionLanguage :param pages: Custom page numbers for multi-page documents(PDF/TIFF), input the number of the pages you want to get OCR result. For a range of pages, use a hyphen. Separate each page or range with a comma. :type pages: list[str] :param model_version: Optional parameter to specify the version of the OCR model used for text extraction. Accepted values are: "latest", "latest-preview", "2021-04-12". Defaults to "latest". :type model_version: str :param reading_order: Optional parameter to specify which reading order algorithm should be applied when ordering the extract text elements. Can be either 'basic' or 'natural'. Will default to 'basic' if not specified :type reading_order: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionOcrErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionOcrErrorException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.read.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if pages is not None: query_parameters['pages'] = self._serialize.query("pages", pages, '[str]', div=',') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') if reading_order is not None: query_parameters['readingOrder'] = self._serialize.query("reading_order", reading_order, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [202]: raise models.ComputerVisionOcrErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ 'Operation-Location': 'str', }) return client_raw_response
read.metadata = {'url': '/read/analyze'}
[docs] def get_read_result( self, operation_id, custom_headers=None, raw=False, **operation_config): """This interface is used for getting OCR results of Read operation. The URL to this interface should be retrieved from 'Operation-Location' field returned from Read interface. :param operation_id: Id of read operation returned in the response of the 'Read' interface. :type operation_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ReadOperationResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.ReadOperationResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionOcrErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionOcrErrorException>` """ # Construct URL url = self.get_read_result.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'operationId': self._serialize.url("operation_id", operation_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionOcrErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ReadOperationResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
get_read_result.metadata = {'url': '/read/analyzeResults/{operationId}'}
[docs] def analyze_image_in_stream( self, image, visual_features=None, details=None, language="en", description_exclude=None, model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param image: An image stream. :type image: Generator :param visual_features: A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include: Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white. Adult - detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts extreme violence or blood). Sexually suggestive content (aka racy content) is also detected. Objects - detects various objects within an image, including the approximate location. The Objects argument is only available in English. Brands - detects various brands within an image, including the approximate location. The Brands argument is only available in English. :type visual_features: list[str or ~azure.cognitiveservices.vision.computervision.models.VisualFeatureTypes] :param details: A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include: Celebrities - identifies celebrities if detected in the image, Landmarks - identifies notable landmarks in the image. :type details: list[str or ~azure.cognitiveservices.vision.computervision.models.Details] :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param description_exclude: Turn off specified domain models when generating the description. :type description_exclude: list[str or ~azure.cognitiveservices.vision.computervision.models.DescriptionExclude] :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ImageAnalysis or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.ImageAnalysis or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.analyze_image_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if visual_features is not None: query_parameters['visualFeatures'] = self._serialize.query("visual_features", visual_features, '[VisualFeatureTypes]', div=',') if details is not None: query_parameters['details'] = self._serialize.query("details", details, '[Details]', div=',') if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if description_exclude is not None: query_parameters['descriptionExclude'] = self._serialize.query("description_exclude", description_exclude, '[DescriptionExclude]', div=',') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ImageAnalysis', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
analyze_image_in_stream.metadata = {'url': '/analyze'}
[docs] def get_area_of_interest_in_stream( self, image, model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation returns a bounding box around the most important area of the image. A successful response will be returned in JSON. If the request failed, the response contains an error code and a message to help determine what went wrong. Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError. :param image: An image stream. :type image: Generator :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: AreaOfInterestResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.AreaOfInterestResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.get_area_of_interest_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('AreaOfInterestResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
get_area_of_interest_in_stream.metadata = {'url': '/areaOfInterest'}
[docs] def describe_image_in_stream( self, image, max_candidates=1, language="en", description_exclude=None, model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. Descriptions may include results from celebrity and landmark domain models, if applicable. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param image: An image stream. :type image: Generator :param max_candidates: Maximum number of candidate descriptions to be returned. The default is 1. :type max_candidates: int :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param description_exclude: Turn off specified domain models when generating the description. :type description_exclude: list[str or ~azure.cognitiveservices.vision.computervision.models.DescriptionExclude] :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ImageDescription or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.ImageDescription or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.describe_image_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if max_candidates is not None: query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'int') if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if description_exclude is not None: query_parameters['descriptionExclude'] = self._serialize.query("description_exclude", description_exclude, '[DescriptionExclude]', div=',') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ImageDescription', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
describe_image_in_stream.metadata = {'url': '/describe'}
[docs] def detect_objects_in_stream( self, image, model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """Performs object detection on the specified image. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param image: An image stream. :type image: Generator :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: DetectResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.DetectResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.detect_objects_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('DetectResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
detect_objects_in_stream.metadata = {'url': '/detect'}
[docs] def generate_thumbnail_in_stream( self, width, height, image, smart_cropping=False, model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError. :param width: Width of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50. :type width: int :param height: Height of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50. :type height: int :param image: An image stream. :type image: Generator :param smart_cropping: Boolean flag for enabling smart cropping. :type smart_cropping: bool :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: object or ClientRawResponse if raw=true :rtype: Generator or ~msrest.pipeline.ClientRawResponse :raises: :class:`HttpOperationError<msrest.exceptions.HttpOperationError>` """ # Construct URL url = self.generate_thumbnail_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['width'] = self._serialize.query("width", width, 'int', maximum=1024, minimum=1) query_parameters['height'] = self._serialize.query("height", height, 'int', maximum=1024, minimum=1) if smart_cropping is not None: query_parameters['smartCropping'] = self._serialize.query("smart_cropping", smart_cropping, 'bool') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/octet-stream' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=True, **operation_config) if response.status_code not in [200]: raise HttpOperationError(self._deserialize, response) deserialized = self._client.stream_download(response, callback) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
generate_thumbnail_in_stream.metadata = {'url': '/generateThumbnail'}
[docs] def analyze_image_by_domain_in_stream( self, model, image, language="en", model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API provides following domain-specific models: celebrities, landmarks. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param model: The domain-specific content to recognize. :type model: str :param image: An image stream. :type image: Generator :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: DomainModelResults or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.DomainModelResults or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.analyze_image_by_domain_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'model': self._serialize.url("model", model, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('DomainModelResults', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
analyze_image_by_domain_in_stream.metadata = {'url': '/models/{model}/analyze'}
[docs] def recognize_printed_text_in_stream( self, image, detect_orientation=True, language="unk", model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """Optical Character Recognition (OCR) detects text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. :param detect_orientation: Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). :type detect_orientation: bool :param image: An image stream. :type image: Generator :param language: The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' :type language: str or ~azure.cognitiveservices.vision.computervision.models.OcrLanguages :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: OcrResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.OcrResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.recognize_printed_text_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['detectOrientation'] = self._serialize.query("detect_orientation", detect_orientation, 'bool') if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'OcrLanguages') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('OcrResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
recognize_printed_text_in_stream.metadata = {'url': '/ocr'}
[docs] def tag_image_in_stream( self, image, language="en", model_version="latest", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag "ascomycete" may be accompanied by the hint "fungus". Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. :param image: An image stream. :type image: Generator :param language: The desired language for output generation. If this parameter is not specified, the default value is &quot;en&quot;.Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param model_version: Optional parameter to specify the version of the AI model. Accepted values are: "latest", "2021-04-01". Defaults to "latest". :type model_version: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: TagResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.computervision.models.TagResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionErrorResponseException<azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorResponseException>` """ # Construct URL url = self.tag_image_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ComputerVisionErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('TagResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
tag_image_in_stream.metadata = {'url': '/tag'}
[docs] def read_in_stream( self, image, language=None, pages=None, model_version="latest", reading_order="basic", custom_headers=None, raw=False, callback=None, **operation_config): """Use this interface to get the result of a Read operation, employing the state-of-the-art Optical Character Recognition (OCR) algorithms optimized for text-heavy documents. When you use the Read interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your 'GetReadResult' operation to access OCR results.​. :param image: An image stream. :type image: Generator :param language: The BCP-47 language code of the text in the document. Read supports auto language identification and multi-language documents, so only provide a language code if you would like to force the document to be processed in that specific language. See https://aka.ms/ocr-languages for list of supported languages. Possible values include: 'af', 'ast', 'bi', 'br', 'ca', 'ceb', 'ch', 'co', 'crh', 'cs', 'csb', 'da', 'de', 'en', 'es', 'et', 'eu', 'fi', 'fil', 'fj', 'fr', 'fur', 'fy', 'ga', 'gd', 'gil', 'gl', 'gv', 'hni', 'hsb', 'ht', 'hu', 'ia', 'id', 'it', 'iu', 'ja', 'jv', 'kaa', 'kac', 'kea', 'kha', 'kl', 'ko', 'ku', 'kw', 'lb', 'ms', 'mww', 'nap', 'nl', 'no', 'oc', 'pl', 'pt', 'quc', 'rm', 'sco', 'sl', 'sq', 'sv', 'sw', 'tet', 'tr', 'tt', 'uz', 'vo', 'wae', 'yua', 'za', 'zh-Hans', 'zh-Hant', 'zu' :type language: str or ~azure.cognitiveservices.vision.computervision.models.OcrDetectionLanguage :param pages: Custom page numbers for multi-page documents(PDF/TIFF), input the number of the pages you want to get OCR result. For a range of pages, use a hyphen. Separate each page or range with a comma. :type pages: list[str] :param model_version: Optional parameter to specify the version of the OCR model used for text extraction. Accepted values are: "latest", "latest-preview", "2021-04-12". Defaults to "latest". :type model_version: str :param reading_order: Optional parameter to specify which reading order algorithm should be applied when ordering the extract text elements. Can be either 'basic' or 'natural'. Will default to 'basic' if not specified :type reading_order: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ComputerVisionOcrErrorException<azure.cognitiveservices.vision.computervision.models.ComputerVisionOcrErrorException>` """ # Construct URL url = self.read_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if language is not None: query_parameters['language'] = self._serialize.query("language", language, 'str') if pages is not None: query_parameters['pages'] = self._serialize.query("pages", pages, '[str]', div=',') if model_version is not None: query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str', pattern=r'^(latest|\d{4}-\d{2}-\d{2})(-preview)?$') if reading_order is not None: query_parameters['readingOrder'] = self._serialize.query("reading_order", reading_order, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [202]: raise models.ComputerVisionOcrErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ 'Operation-Location': 'str', }) return client_raw_response
read_in_stream.metadata = {'url': '/read/analyze'}