Source code for azure.cognitiveservices.vision.computervision.models._models_py3

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from msrest.serialization import Model
from msrest.exceptions import HttpOperationError


[docs]class AdultInfo(Model): """An object describing whether the image contains adult-oriented content and/or is racy. :param is_adult_content: A value indicating if the image contains adult-oriented content. :type is_adult_content: bool :param is_racy_content: A value indicating if the image is racy. :type is_racy_content: bool :param is_gory_content: A value indicating if the image is gory. :type is_gory_content: bool :param adult_score: Score from 0 to 1 that indicates how much the content is considered adult-oriented within the image. :type adult_score: float :param racy_score: Score from 0 to 1 that indicates how suggestive is the image. :type racy_score: float :param gore_score: Score from 0 to 1 that indicates how gory is the image. :type gore_score: float """ _attribute_map = { 'is_adult_content': {'key': 'isAdultContent', 'type': 'bool'}, 'is_racy_content': {'key': 'isRacyContent', 'type': 'bool'}, 'is_gory_content': {'key': 'isGoryContent', 'type': 'bool'}, 'adult_score': {'key': 'adultScore', 'type': 'float'}, 'racy_score': {'key': 'racyScore', 'type': 'float'}, 'gore_score': {'key': 'goreScore', 'type': 'float'}, } def __init__(self, *, is_adult_content: bool=None, is_racy_content: bool=None, is_gory_content: bool=None, adult_score: float=None, racy_score: float=None, gore_score: float=None, **kwargs) -> None: super(AdultInfo, self).__init__(**kwargs) self.is_adult_content = is_adult_content self.is_racy_content = is_racy_content self.is_gory_content = is_gory_content self.adult_score = adult_score self.racy_score = racy_score self.gore_score = gore_score
[docs]class AnalyzeResults(Model): """Analyze batch operation result. All required parameters must be populated in order to send to Azure. :param version: Required. Version of schema used for this result. :type version: str :param read_results: Required. Text extracted from the input. :type read_results: list[~azure.cognitiveservices.vision.computervision.models.ReadResult] """ _validation = { 'version': {'required': True}, 'read_results': {'required': True}, } _attribute_map = { 'version': {'key': 'version', 'type': 'str'}, 'read_results': {'key': 'readResults', 'type': '[ReadResult]'}, } def __init__(self, *, version: str, read_results, **kwargs) -> None: super(AnalyzeResults, self).__init__(**kwargs) self.version = version self.read_results = read_results
[docs]class AreaOfInterestResult(Model): """Result of AreaOfInterest operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar area_of_interest: A bounding box for an area of interest inside an image. :vartype area_of_interest: ~azure.cognitiveservices.vision.computervision.models.BoundingRect :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _validation = { 'area_of_interest': {'readonly': True}, } _attribute_map = { 'area_of_interest': {'key': 'areaOfInterest', 'type': 'BoundingRect'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, request_id: str=None, metadata=None, **kwargs) -> None: super(AreaOfInterestResult, self).__init__(**kwargs) self.area_of_interest = None self.request_id = request_id self.metadata = metadata
[docs]class BoundingRect(Model): """A bounding box for an area inside an image. :param x: X-coordinate of the top left point of the area, in pixels. :type x: int :param y: Y-coordinate of the top left point of the area, in pixels. :type y: int :param w: Width measured from the top-left point of the area, in pixels. :type w: int :param h: Height measured from the top-left point of the area, in pixels. :type h: int """ _attribute_map = { 'x': {'key': 'x', 'type': 'int'}, 'y': {'key': 'y', 'type': 'int'}, 'w': {'key': 'w', 'type': 'int'}, 'h': {'key': 'h', 'type': 'int'}, } def __init__(self, *, x: int=None, y: int=None, w: int=None, h: int=None, **kwargs) -> None: super(BoundingRect, self).__init__(**kwargs) self.x = x self.y = y self.w = w self.h = h
[docs]class Category(Model): """An object describing identified category. :param name: Name of the category. :type name: str :param score: Scoring of the category. :type score: float :param detail: Details of the identified category. :type detail: ~azure.cognitiveservices.vision.computervision.models.CategoryDetail """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'score': {'key': 'score', 'type': 'float'}, 'detail': {'key': 'detail', 'type': 'CategoryDetail'}, } def __init__(self, *, name: str=None, score: float=None, detail=None, **kwargs) -> None: super(Category, self).__init__(**kwargs) self.name = name self.score = score self.detail = detail
[docs]class CategoryDetail(Model): """An object describing additional category details. :param celebrities: An array of celebrities if any identified. :type celebrities: list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] :param landmarks: An array of landmarks if any identified. :type landmarks: list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel] """ _attribute_map = { 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, 'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'}, } def __init__(self, *, celebrities=None, landmarks=None, **kwargs) -> None: super(CategoryDetail, self).__init__(**kwargs) self.celebrities = celebrities self.landmarks = landmarks
[docs]class CelebritiesModel(Model): """An object describing possible celebrity identification. :param name: Name of the celebrity. :type name: str :param confidence: Confidence level for the celebrity recognition as a value ranging from 0 to 1. :type confidence: float :param face_rectangle: Location of the identified face in the image. :type face_rectangle: ~azure.cognitiveservices.vision.computervision.models.FaceRectangle """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, } def __init__(self, *, name: str=None, confidence: float=None, face_rectangle=None, **kwargs) -> None: super(CelebritiesModel, self).__init__(**kwargs) self.name = name self.confidence = confidence self.face_rectangle = face_rectangle
[docs]class CelebrityResults(Model): """Result of domain-specific classifications for the domain of celebrities. :param celebrities: List of celebrities recognized in the image. :type celebrities: list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, celebrities=None, request_id: str=None, metadata=None, **kwargs) -> None: super(CelebrityResults, self).__init__(**kwargs) self.celebrities = celebrities self.request_id = request_id self.metadata = metadata
[docs]class ColorInfo(Model): """An object providing additional metadata describing color attributes. :param dominant_color_foreground: Possible dominant foreground color. :type dominant_color_foreground: str :param dominant_color_background: Possible dominant background color. :type dominant_color_background: str :param dominant_colors: An array of possible dominant colors. :type dominant_colors: list[str] :param accent_color: Possible accent color. :type accent_color: str :param is_bw_img: A value indicating if the image is black and white. :type is_bw_img: bool """ _attribute_map = { 'dominant_color_foreground': {'key': 'dominantColorForeground', 'type': 'str'}, 'dominant_color_background': {'key': 'dominantColorBackground', 'type': 'str'}, 'dominant_colors': {'key': 'dominantColors', 'type': '[str]'}, 'accent_color': {'key': 'accentColor', 'type': 'str'}, 'is_bw_img': {'key': 'isBWImg', 'type': 'bool'}, } def __init__(self, *, dominant_color_foreground: str=None, dominant_color_background: str=None, dominant_colors=None, accent_color: str=None, is_bw_img: bool=None, **kwargs) -> None: super(ColorInfo, self).__init__(**kwargs) self.dominant_color_foreground = dominant_color_foreground self.dominant_color_background = dominant_color_background self.dominant_colors = dominant_colors self.accent_color = accent_color self.is_bw_img = is_bw_img
[docs]class ComputerVisionError(Model): """Details about the API request error. All required parameters must be populated in order to send to Azure. :param code: Required. The error code. :type code: object :param message: Required. A message explaining the error reported by the service. :type message: str :param request_id: A unique request identifier. :type request_id: str """ _validation = { 'code': {'required': True}, 'message': {'required': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'object'}, 'message': {'key': 'message', 'type': 'str'}, 'request_id': {'key': 'requestId', 'type': 'str'}, } def __init__(self, *, code, message: str, request_id: str=None, **kwargs) -> None: super(ComputerVisionError, self).__init__(**kwargs) self.code = code self.message = message self.request_id = request_id
[docs]class ComputerVisionErrorException(HttpOperationError): """Server responsed with exception of type: 'ComputerVisionError'. :param deserialize: A deserializer :param response: Server response to be deserialized. """ def __init__(self, deserialize, response, *args): super(ComputerVisionErrorException, self).__init__(deserialize, response, 'ComputerVisionError', *args)
[docs]class DetectedBrand(Model): """A brand detected in an image. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Label for the brand. :vartype name: str :ivar confidence: Confidence score of having observed the brand in the image, as a value ranging from 0 to 1. :vartype confidence: float :ivar rectangle: Approximate location of the detected brand. :vartype rectangle: ~azure.cognitiveservices.vision.computervision.models.BoundingRect """ _validation = { 'name': {'readonly': True}, 'confidence': {'readonly': True}, 'rectangle': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, 'rectangle': {'key': 'rectangle', 'type': 'BoundingRect'}, } def __init__(self, **kwargs) -> None: super(DetectedBrand, self).__init__(**kwargs) self.name = None self.confidence = None self.rectangle = None
[docs]class DetectedObject(Model): """An object detected in an image. Variables are only populated by the server, and will be ignored when sending a request. :ivar rectangle: Approximate location of the detected object. :vartype rectangle: ~azure.cognitiveservices.vision.computervision.models.BoundingRect :param object_property: Label for the object. :type object_property: str :param confidence: Confidence score of having observed the object in the image, as a value ranging from 0 to 1. :type confidence: float :param parent: The parent object, from a taxonomy perspective. The parent object is a more generic form of this object. For example, a 'bulldog' would have a parent of 'dog'. :type parent: ~azure.cognitiveservices.vision.computervision.models.ObjectHierarchy """ _validation = { 'rectangle': {'readonly': True}, } _attribute_map = { 'rectangle': {'key': 'rectangle', 'type': 'BoundingRect'}, 'object_property': {'key': 'object', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, 'parent': {'key': 'parent', 'type': 'ObjectHierarchy'}, } def __init__(self, *, object_property: str=None, confidence: float=None, parent=None, **kwargs) -> None: super(DetectedObject, self).__init__(**kwargs) self.rectangle = None self.object_property = object_property self.confidence = confidence self.parent = parent
[docs]class DetectResult(Model): """Result of a DetectImage call. Variables are only populated by the server, and will be ignored when sending a request. :ivar objects: An array of detected objects. :vartype objects: list[~azure.cognitiveservices.vision.computervision.models.DetectedObject] :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _validation = { 'objects': {'readonly': True}, } _attribute_map = { 'objects': {'key': 'objects', 'type': '[DetectedObject]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, request_id: str=None, metadata=None, **kwargs) -> None: super(DetectResult, self).__init__(**kwargs) self.objects = None self.request_id = request_id self.metadata = metadata
[docs]class DomainModelResults(Model): """Result of image analysis using a specific domain model including additional metadata. :param result: Model-specific response. :type result: object :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'result': {'key': 'result', 'type': 'object'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, result=None, request_id: str=None, metadata=None, **kwargs) -> None: super(DomainModelResults, self).__init__(**kwargs) self.result = result self.request_id = request_id self.metadata = metadata
[docs]class FaceDescription(Model): """An object describing a face identified in the image. :param age: Possible age of the face. :type age: int :param gender: Possible gender of the face. Possible values include: 'Male', 'Female' :type gender: str or ~azure.cognitiveservices.vision.computervision.models.Gender :param face_rectangle: Rectangle in the image containing the identified face. :type face_rectangle: ~azure.cognitiveservices.vision.computervision.models.FaceRectangle """ _attribute_map = { 'age': {'key': 'age', 'type': 'int'}, 'gender': {'key': 'gender', 'type': 'Gender'}, 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, } def __init__(self, *, age: int=None, gender=None, face_rectangle=None, **kwargs) -> None: super(FaceDescription, self).__init__(**kwargs) self.age = age self.gender = gender self.face_rectangle = face_rectangle
[docs]class FaceRectangle(Model): """An object describing face rectangle. :param left: X-coordinate of the top left point of the face, in pixels. :type left: int :param top: Y-coordinate of the top left point of the face, in pixels. :type top: int :param width: Width measured from the top-left point of the face, in pixels. :type width: int :param height: Height measured from the top-left point of the face, in pixels. :type height: int """ _attribute_map = { 'left': {'key': 'left', 'type': 'int'}, 'top': {'key': 'top', 'type': 'int'}, 'width': {'key': 'width', 'type': 'int'}, 'height': {'key': 'height', 'type': 'int'}, } def __init__(self, *, left: int=None, top: int=None, width: int=None, height: int=None, **kwargs) -> None: super(FaceRectangle, self).__init__(**kwargs) self.left = left self.top = top self.width = width self.height = height
[docs]class ImageAnalysis(Model): """Result of AnalyzeImage operation. :param categories: An array indicating identified categories. :type categories: list[~azure.cognitiveservices.vision.computervision.models.Category] :param adult: An object describing whether the image contains adult-oriented content and/or is racy. :type adult: ~azure.cognitiveservices.vision.computervision.models.AdultInfo :param color: An object providing additional metadata describing color attributes. :type color: ~azure.cognitiveservices.vision.computervision.models.ColorInfo :param image_type: An object providing possible image types and matching confidence levels. :type image_type: ~azure.cognitiveservices.vision.computervision.models.ImageType :param tags: A list of tags with confidence level. :type tags: list[~azure.cognitiveservices.vision.computervision.models.ImageTag] :param description: A collection of content tags, along with a list of captions sorted by confidence level, and image metadata. :type description: ~azure.cognitiveservices.vision.computervision.models.ImageDescriptionDetails :param faces: An array of possible faces within the image. :type faces: list[~azure.cognitiveservices.vision.computervision.models.FaceDescription] :param objects: Array of objects describing what was detected in the image. :type objects: list[~azure.cognitiveservices.vision.computervision.models.DetectedObject] :param brands: Array of brands detected in the image. :type brands: list[~azure.cognitiveservices.vision.computervision.models.DetectedBrand] :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'categories': {'key': 'categories', 'type': '[Category]'}, 'adult': {'key': 'adult', 'type': 'AdultInfo'}, 'color': {'key': 'color', 'type': 'ColorInfo'}, 'image_type': {'key': 'imageType', 'type': 'ImageType'}, 'tags': {'key': 'tags', 'type': '[ImageTag]'}, 'description': {'key': 'description', 'type': 'ImageDescriptionDetails'}, 'faces': {'key': 'faces', 'type': '[FaceDescription]'}, 'objects': {'key': 'objects', 'type': '[DetectedObject]'}, 'brands': {'key': 'brands', 'type': '[DetectedBrand]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, categories=None, adult=None, color=None, image_type=None, tags=None, description=None, faces=None, objects=None, brands=None, request_id: str=None, metadata=None, **kwargs) -> None: super(ImageAnalysis, self).__init__(**kwargs) self.categories = categories self.adult = adult self.color = color self.image_type = image_type self.tags = tags self.description = description self.faces = faces self.objects = objects self.brands = brands self.request_id = request_id self.metadata = metadata
[docs]class ImageCaption(Model): """An image caption, i.e. a brief description of what the image depicts. :param text: The text of the caption. :type text: str :param confidence: The level of confidence the service has in the caption. :type confidence: float """ _attribute_map = { 'text': {'key': 'text', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, } def __init__(self, *, text: str=None, confidence: float=None, **kwargs) -> None: super(ImageCaption, self).__init__(**kwargs) self.text = text self.confidence = confidence
[docs]class ImageDescription(Model): """A collection of content tags, along with a list of captions sorted by confidence level, and image metadata. :param tags: A collection of image tags. :type tags: list[str] :param captions: A list of captions, sorted by confidence level. :type captions: list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'tags': {'key': 'description.tags', 'type': '[str]'}, 'captions': {'key': 'description.captions', 'type': '[ImageCaption]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: super(ImageDescription, self).__init__(**kwargs) self.tags = tags self.captions = captions self.request_id = request_id self.metadata = metadata
[docs]class ImageDescriptionDetails(Model): """A collection of content tags, along with a list of captions sorted by confidence level, and image metadata. :param tags: A collection of image tags. :type tags: list[str] :param captions: A list of captions, sorted by confidence level. :type captions: list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] """ _attribute_map = { 'tags': {'key': 'tags', 'type': '[str]'}, 'captions': {'key': 'captions', 'type': '[ImageCaption]'}, } def __init__(self, *, tags=None, captions=None, **kwargs) -> None: super(ImageDescriptionDetails, self).__init__(**kwargs) self.tags = tags self.captions = captions
[docs]class ImageMetadata(Model): """Image metadata. :param width: Image width, in pixels. :type width: int :param height: Image height, in pixels. :type height: int :param format: Image format. :type format: str """ _attribute_map = { 'width': {'key': 'width', 'type': 'int'}, 'height': {'key': 'height', 'type': 'int'}, 'format': {'key': 'format', 'type': 'str'}, } def __init__(self, *, width: int=None, height: int=None, format: str=None, **kwargs) -> None: super(ImageMetadata, self).__init__(**kwargs) self.width = width self.height = height self.format = format
[docs]class ImageTag(Model): """An entity observation in the image, along with the confidence score. :param name: Name of the entity. :type name: str :param confidence: The level of confidence that the entity was observed. :type confidence: float :param hint: Optional hint/details for this tag. :type hint: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, 'hint': {'key': 'hint', 'type': 'str'}, } def __init__(self, *, name: str=None, confidence: float=None, hint: str=None, **kwargs) -> None: super(ImageTag, self).__init__(**kwargs) self.name = name self.confidence = confidence self.hint = hint
[docs]class ImageType(Model): """An object providing possible image types and matching confidence levels. :param clip_art_type: Confidence level that the image is a clip art. :type clip_art_type: int :param line_drawing_type: Confidence level that the image is a line drawing. :type line_drawing_type: int """ _attribute_map = { 'clip_art_type': {'key': 'clipArtType', 'type': 'int'}, 'line_drawing_type': {'key': 'lineDrawingType', 'type': 'int'}, } def __init__(self, *, clip_art_type: int=None, line_drawing_type: int=None, **kwargs) -> None: super(ImageType, self).__init__(**kwargs) self.clip_art_type = clip_art_type self.line_drawing_type = line_drawing_type
[docs]class ImageUrl(Model): """ImageUrl. All required parameters must be populated in order to send to Azure. :param url: Required. Publicly reachable URL of an image. :type url: str """ _validation = { 'url': {'required': True}, } _attribute_map = { 'url': {'key': 'url', 'type': 'str'}, } def __init__(self, *, url: str, **kwargs) -> None: super(ImageUrl, self).__init__(**kwargs) self.url = url
[docs]class LandmarkResults(Model): """Result of domain-specific classifications for the domain of landmarks. :param landmarks: List of landmarks recognized in the image. :type landmarks: list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel] :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, landmarks=None, request_id: str=None, metadata=None, **kwargs) -> None: super(LandmarkResults, self).__init__(**kwargs) self.landmarks = landmarks self.request_id = request_id self.metadata = metadata
[docs]class LandmarksModel(Model): """A landmark recognized in the image. :param name: Name of the landmark. :type name: str :param confidence: Confidence level for the landmark recognition as a value ranging from 0 to 1. :type confidence: float """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, } def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: super(LandmarksModel, self).__init__(**kwargs) self.name = name self.confidence = confidence
[docs]class Line(Model): """An object representing a recognized text line. All required parameters must be populated in order to send to Azure. :param language: The BCP-47 language code of the recognized text line. Only provided where the language of the line differs from the page's. :type language: str :param bounding_box: Required. Bounding box of a recognized line. :type bounding_box: list[float] :param text: Required. The text content of the line. :type text: str :param words: Required. List of words in the text line. :type words: list[~azure.cognitiveservices.vision.computervision.models.Word] """ _validation = { 'bounding_box': {'required': True}, 'text': {'required': True}, 'words': {'required': True}, } _attribute_map = { 'language': {'key': 'language', 'type': 'str'}, 'bounding_box': {'key': 'boundingBox', 'type': '[float]'}, 'text': {'key': 'text', 'type': 'str'}, 'words': {'key': 'words', 'type': '[Word]'}, } def __init__(self, *, bounding_box, text: str, words, language: str=None, **kwargs) -> None: super(Line, self).__init__(**kwargs) self.language = language self.bounding_box = bounding_box self.text = text self.words = words
[docs]class ListModelsResult(Model): """Result of the List Domain Models operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar models_property: An array of supported models. :vartype models_property: list[~azure.cognitiveservices.vision.computervision.models.ModelDescription] """ _validation = { 'models_property': {'readonly': True}, } _attribute_map = { 'models_property': {'key': 'models', 'type': '[ModelDescription]'}, } def __init__(self, **kwargs) -> None: super(ListModelsResult, self).__init__(**kwargs) self.models_property = None
[docs]class ModelDescription(Model): """An object describing supported model by name and categories. :param name: The name of the model. :type name: str :param categories: Categories of the model. :type categories: list[str] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'categories': {'key': 'categories', 'type': '[str]'}, } def __init__(self, *, name: str=None, categories=None, **kwargs) -> None: super(ModelDescription, self).__init__(**kwargs) self.name = name self.categories = categories
[docs]class ObjectHierarchy(Model): """An object detected inside an image. :param object_property: Label for the object. :type object_property: str :param confidence: Confidence score of having observed the object in the image, as a value ranging from 0 to 1. :type confidence: float :param parent: The parent object, from a taxonomy perspective. The parent object is a more generic form of this object. For example, a 'bulldog' would have a parent of 'dog'. :type parent: ~azure.cognitiveservices.vision.computervision.models.ObjectHierarchy """ _attribute_map = { 'object_property': {'key': 'object', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, 'parent': {'key': 'parent', 'type': 'ObjectHierarchy'}, } def __init__(self, *, object_property: str=None, confidence: float=None, parent=None, **kwargs) -> None: super(ObjectHierarchy, self).__init__(**kwargs) self.object_property = object_property self.confidence = confidence self.parent = parent
[docs]class OcrLine(Model): """An object describing a single recognized line of text. :param bounding_box: Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. :type bounding_box: str :param words: An array of objects, where each object represents a recognized word. :type words: list[~azure.cognitiveservices.vision.computervision.models.OcrWord] """ _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, 'words': {'key': 'words', 'type': '[OcrWord]'}, } def __init__(self, *, bounding_box: str=None, words=None, **kwargs) -> None: super(OcrLine, self).__init__(**kwargs) self.bounding_box = bounding_box self.words = words
[docs]class OcrRegion(Model): """A region consists of multiple lines (e.g. a column of text in a multi-column document). :param bounding_box: Bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. :type bounding_box: str :param lines: An array of recognized lines of text. :type lines: list[~azure.cognitiveservices.vision.computervision.models.OcrLine] """ _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, 'lines': {'key': 'lines', 'type': '[OcrLine]'}, } def __init__(self, *, bounding_box: str=None, lines=None, **kwargs) -> None: super(OcrRegion, self).__init__(**kwargs) self.bounding_box = bounding_box self.lines = lines
[docs]class OcrResult(Model): """OcrResult. :param language: The BCP-47 language code of the text in the image. :type language: str :param text_angle: The angle, in radians, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly. :type text_angle: float :param orientation: Orientation of the text recognized in the image, if requested. The value (up, down, left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property). If detection of the orientation was not requested, or no text is detected, the value is 'NotDetected'. :type orientation: str :param regions: An array of objects, where each object represents a region of recognized text. :type regions: list[~azure.cognitiveservices.vision.computervision.models.OcrRegion] """ _attribute_map = { 'language': {'key': 'language', 'type': 'str'}, 'text_angle': {'key': 'textAngle', 'type': 'float'}, 'orientation': {'key': 'orientation', 'type': 'str'}, 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, } def __init__(self, *, language: str=None, text_angle: float=None, orientation: str=None, regions=None, **kwargs) -> None: super(OcrResult, self).__init__(**kwargs) self.language = language self.text_angle = text_angle self.orientation = orientation self.regions = regions
[docs]class OcrWord(Model): """Information on a recognized word. :param bounding_box: Bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. :type bounding_box: str :param text: String value of a recognized word. :type text: str """ _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, 'text': {'key': 'text', 'type': 'str'}, } def __init__(self, *, bounding_box: str=None, text: str=None, **kwargs) -> None: super(OcrWord, self).__init__(**kwargs) self.bounding_box = bounding_box self.text = text
[docs]class ReadOperationResult(Model): """OCR result of the read operation. :param status: Status of the read operation. Possible values include: 'notStarted', 'running', 'failed', 'succeeded' :type status: str or ~azure.cognitiveservices.vision.computervision.models.OperationStatusCodes :param created_date_time: Get UTC date time the batch operation was submitted. :type created_date_time: str :param last_updated_date_time: Get last updated UTC date time of this batch operation. :type last_updated_date_time: str :param analyze_result: Analyze batch operation result. :type analyze_result: ~azure.cognitiveservices.vision.computervision.models.AnalyzeResults """ _attribute_map = { 'status': {'key': 'status', 'type': 'OperationStatusCodes'}, 'created_date_time': {'key': 'createdDateTime', 'type': 'str'}, 'last_updated_date_time': {'key': 'lastUpdatedDateTime', 'type': 'str'}, 'analyze_result': {'key': 'analyzeResult', 'type': 'AnalyzeResults'}, } def __init__(self, *, status=None, created_date_time: str=None, last_updated_date_time: str=None, analyze_result=None, **kwargs) -> None: super(ReadOperationResult, self).__init__(**kwargs) self.status = status self.created_date_time = created_date_time self.last_updated_date_time = last_updated_date_time self.analyze_result = analyze_result
[docs]class ReadResult(Model): """Text extracted from a page in the input document. All required parameters must be populated in order to send to Azure. :param page: Required. The 1-based page number of the recognition result. :type page: int :param language: The BCP-47 language code of the recognized text page. :type language: str :param angle: Required. The orientation of the image in degrees in the clockwise direction. Range between [-180, 180). :type angle: float :param width: Required. The width of the image in pixels or the PDF in inches. :type width: float :param height: Required. The height of the image in pixels or the PDF in inches. :type height: float :param unit: Required. The unit used in the Width, Height and BoundingBox. For images, the unit is 'pixel'. For PDF, the unit is 'inch'. Possible values include: 'pixel', 'inch' :type unit: str or ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResultDimensionUnit :param lines: Required. A list of recognized text lines. :type lines: list[~azure.cognitiveservices.vision.computervision.models.Line] """ _validation = { 'page': {'required': True}, 'angle': {'required': True}, 'width': {'required': True}, 'height': {'required': True}, 'unit': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'page': {'key': 'page', 'type': 'int'}, 'language': {'key': 'language', 'type': 'str'}, 'angle': {'key': 'angle', 'type': 'float'}, 'width': {'key': 'width', 'type': 'float'}, 'height': {'key': 'height', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'TextRecognitionResultDimensionUnit'}, 'lines': {'key': 'lines', 'type': '[Line]'}, } def __init__(self, *, page: int, angle: float, width: float, height: float, unit, lines, language: str=None, **kwargs) -> None: super(ReadResult, self).__init__(**kwargs) self.page = page self.language = language self.angle = angle self.width = width self.height = height self.unit = unit self.lines = lines
[docs]class TagResult(Model): """The results of a image tag operation, including any tags and image metadata. :param tags: A list of tags with confidence level. :type tags: list[~azure.cognitiveservices.vision.computervision.models.ImageTag] :param request_id: Id of the REST API request. :type request_id: str :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'tags': {'key': 'tags', 'type': '[ImageTag]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, tags=None, request_id: str=None, metadata=None, **kwargs) -> None: super(TagResult, self).__init__(**kwargs) self.tags = tags self.request_id = request_id self.metadata = metadata
[docs]class Word(Model): """An object representing a recognized word. All required parameters must be populated in order to send to Azure. :param bounding_box: Required. Bounding box of a recognized word. :type bounding_box: list[float] :param text: Required. The text content of the word. :type text: str :param confidence: Required. Qualitative confidence measure. :type confidence: float """ _validation = { 'bounding_box': {'required': True}, 'text': {'required': True}, 'confidence': {'required': True}, } _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': '[float]'}, 'text': {'key': 'text', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, } def __init__(self, *, bounding_box, text: str, confidence: float, **kwargs) -> None: super(Word, self).__init__(**kwargs) self.bounding_box = bounding_box self.text = text self.confidence = confidence