Source code for azure.media.videoanalyzeredge._generated.models._models_py3

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------

import datetime
from typing import List, Optional, Union

import msrest.serialization

from ._azure_video_analyzerfor_edge_enums import *


[docs]class CertificateSource(msrest.serialization.Model): """Base class for certificate sources. You probably want to use the sub-classes and not this class directly. Known sub-classes are: PemCertificateList. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class ProcessorNodeBase(msrest.serialization.Model): """Base class for topology processor nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: CognitiveServicesVisionProcessor, ExtensionProcessorBase, LineCrossingProcessor, MotionDetectionProcessor, ObjectTrackingProcessor, SignalGateProcessor. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor': 'CognitiveServicesVisionProcessor', '#Microsoft.VideoAnalyzer.ExtensionProcessorBase': 'ExtensionProcessorBase', '#Microsoft.VideoAnalyzer.LineCrossingProcessor': 'LineCrossingProcessor', '#Microsoft.VideoAnalyzer.MotionDetectionProcessor': 'MotionDetectionProcessor', '#Microsoft.VideoAnalyzer.ObjectTrackingProcessor': 'ObjectTrackingProcessor', '#Microsoft.VideoAnalyzer.SignalGateProcessor': 'SignalGateProcessor'} } def __init__( self, *, name: str, inputs: List["NodeInput"], **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name self.inputs = inputs
[docs]class CognitiveServicesVisionProcessor(ProcessorNodeBase): """A processor that allows the pipeline topology to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :param image: Describes the parameters of the image that is sent as input to the endpoint. :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions :param operation: Required. Describes the Spatial Analysis operation to be used in the Cognitive Services Vision processor. :type operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'operation': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, 'operation': {'key': 'operation', 'type': 'SpatialAnalysisOperationBase'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", operation: "SpatialAnalysisOperationBase", image: Optional["ImageProperties"] = None, sampling_options: Optional["SamplingOptions"] = None, **kwargs ): super(CognitiveServicesVisionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor' # type: str self.endpoint = endpoint self.image = image self.sampling_options = sampling_options self.operation = operation
[docs]class CredentialsBase(msrest.serialization.Model): """Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: HttpHeaderCredentials, UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.HttpHeaderCredentials': 'HttpHeaderCredentials', '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class EndpointBase(msrest.serialization.Model): """Base class for endpoints. You probably want to use the sub-classes and not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param credentials: Credentials to be presented to the endpoint. :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str """ _validation = { 'type': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, *, url: str, credentials: Optional["CredentialsBase"] = None, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = credentials self.url = url
[docs]class ExtensionProcessorBase(ProcessorNodeBase): """Base class for pipeline extension processors. Pipeline extensions allow for custom media analysis and processing to be plugged into the Video Analyzer pipeline. You probably want to use the sub-classes and not this class directly. Known sub-classes are: GrpcExtension, HttpExtension. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param endpoint: Required. Endpoint details of the pipeline extension plugin. :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :param image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties :param sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'image': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.GrpcExtension': 'GrpcExtension', '#Microsoft.VideoAnalyzer.HttpExtension': 'HttpExtension'} } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", image: "ImageProperties", sampling_options: Optional["SamplingOptions"] = None, **kwargs ): super(ExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.ExtensionProcessorBase' # type: str self.endpoint = endpoint self.image = image self.sampling_options = sampling_options
[docs]class SinkNodeBase(msrest.serialization.Model): """Base class for topology sink nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: FileSink, IotHubMessageSink, VideoSink. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.FileSink': 'FileSink', '#Microsoft.VideoAnalyzer.IotHubMessageSink': 'IotHubMessageSink', '#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, *, name: str, inputs: List["NodeInput"], **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name self.inputs = inputs
[docs]class FileSink(SinkNodeBase): """File sink allows for video and audio content to be recorded on the file system on the edge device. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param base_directory_path: Required. Absolute directory path where media files will be stored. :type base_directory_path: str :param file_name_pattern: Required. File name pattern for creating new files when performing event based recording. The pattern must include at least one system variable. :type file_name_pattern: str :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. Once this limit is reached, the oldest files from this sink will be automatically deleted. :type maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'base_directory_path': {'required': True}, 'file_name_pattern': {'required': True}, 'maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'base_directory_path': {'key': 'baseDirectoryPath', 'type': 'str'}, 'file_name_pattern': {'key': 'fileNamePattern', 'type': 'str'}, 'maximum_size_mi_b': {'key': 'maximumSizeMiB', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], base_directory_path: str, file_name_pattern: str, maximum_size_mi_b: str, **kwargs ): super(FileSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.FileSink' # type: str self.base_directory_path = base_directory_path self.file_name_pattern = file_name_pattern self.maximum_size_mi_b = maximum_size_mi_b
[docs]class GrpcExtension(ExtensionProcessorBase): """GRPC extension processor allows pipeline extension plugins to be connected to the pipeline through over a gRPC channel. Extension plugins must act as an gRPC server. Please see https://aka.ms/ava-extension-grpc for details. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param endpoint: Required. Endpoint details of the pipeline extension plugin. :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :param image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties :param sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions :param data_transfer: Required. Specifies how media is transferred to the extension plugin. :type data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer :param extension_configuration: An optional configuration string that is sent to the extension plugin. The configuration string is specific to each custom extension and it not understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc for details. :type extension_configuration: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'image': {'required': True}, 'data_transfer': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, 'data_transfer': {'key': 'dataTransfer', 'type': 'GrpcExtensionDataTransfer'}, 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", image: "ImageProperties", data_transfer: "GrpcExtensionDataTransfer", sampling_options: Optional["SamplingOptions"] = None, extension_configuration: Optional[str] = None, **kwargs ): super(GrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.VideoAnalyzer.GrpcExtension' # type: str self.data_transfer = data_transfer self.extension_configuration = extension_configuration
[docs]class GrpcExtensionDataTransfer(msrest.serialization.Model): """Describes how media is transferred to the extension plugin. All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It can only be used with the 'SharedMemory' transfer mode. :type shared_memory_size_mi_b: str :param mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: "embedded", "sharedMemory". :type mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode """ _validation = { 'mode': {'required': True}, } _attribute_map = { 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def __init__( self, *, mode: Union[str, "GrpcExtensionDataTransferMode"], shared_memory_size_mi_b: Optional[str] = None, **kwargs ): super(GrpcExtensionDataTransfer, self).__init__(**kwargs) self.shared_memory_size_mi_b = shared_memory_size_mi_b self.mode = mode
[docs]class HttpExtension(ExtensionProcessorBase): """HTTP extension processor allows pipeline extension plugins to be connected to the pipeline through over the HTTP protocol. Extension plugins must act as an HTTP server. Please see https://aka.ms/ava-extension-http for details. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param endpoint: Required. Endpoint details of the pipeline extension plugin. :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :param image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties :param sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'image': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", image: "ImageProperties", sampling_options: Optional["SamplingOptions"] = None, **kwargs ): super(HttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpExtension' # type: str
[docs]class HttpHeaderCredentials(CredentialsBase): """HTTP header credentials. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param header_name: Required. HTTP header name. :type header_name: str :param header_value: Required. HTTP header value. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :type header_value: str """ _validation = { 'type': {'required': True}, 'header_name': {'required': True}, 'header_value': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'header_name': {'key': 'headerName', 'type': 'str'}, 'header_value': {'key': 'headerValue', 'type': 'str'}, } def __init__( self, *, header_name: str, header_value: str, **kwargs ): super(HttpHeaderCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpHeaderCredentials' # type: str self.header_name = header_name self.header_value = header_value
[docs]class ImageFormatProperties(msrest.serialization.Model): """Base class for image formatting properties. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ImageFormatBmp, ImageFormatJpeg, ImageFormatPng, ImageFormatRaw. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ImageFormatBmp': 'ImageFormatBmp', '#Microsoft.VideoAnalyzer.ImageFormatJpeg': 'ImageFormatJpeg', '#Microsoft.VideoAnalyzer.ImageFormatPng': 'ImageFormatPng', '#Microsoft.VideoAnalyzer.ImageFormatRaw': 'ImageFormatRaw'} } def __init__( self, **kwargs ): super(ImageFormatProperties, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class ImageFormatBmp(ImageFormatProperties): """BMP image encoding. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } def __init__( self, **kwargs ): super(ImageFormatBmp, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatBmp' # type: str
[docs]class ImageFormatJpeg(ImageFormatProperties): """JPEG image encoding. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param quality: Image quality value between 0 to 100 (best quality). :type quality: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'str'}, } def __init__( self, *, quality: Optional[str] = None, **kwargs ): super(ImageFormatJpeg, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatJpeg' # type: str self.quality = quality
[docs]class ImageFormatPng(ImageFormatProperties): """PNG image encoding. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } def __init__( self, **kwargs ): super(ImageFormatPng, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatPng' # type: str
[docs]class ImageFormatRaw(ImageFormatProperties): """Raw image formatting. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param pixel_format: Required. Pixel format to be applied to the raw image. Possible values include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", "rgba", "abgr", "bgra". :type pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat """ _validation = { 'type': {'required': True}, 'pixel_format': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, } def __init__( self, *, pixel_format: Union[str, "ImageFormatRawPixelFormat"], **kwargs ): super(ImageFormatRaw, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatRaw' # type: str self.pixel_format = pixel_format
[docs]class ImageProperties(msrest.serialization.Model): """Image transformations and formatting options to be applied to the video frame(s). :param scale: Image scaling mode. :type scale: ~azure.media.videoanalyzer.edge.models.ImageScale :param format: Base class for image formatting properties. :type format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties """ _attribute_map = { 'scale': {'key': 'scale', 'type': 'ImageScale'}, 'format': {'key': 'format', 'type': 'ImageFormatProperties'}, } def __init__( self, *, scale: Optional["ImageScale"] = None, format: Optional["ImageFormatProperties"] = None, **kwargs ): super(ImageProperties, self).__init__(**kwargs) self.scale = scale self.format = format
[docs]class ImageScale(msrest.serialization.Model): """Image scaling mode. :param mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible values include: "preserveAspectRatio", "pad", "stretch". :type mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode :param width: The desired output image width. :type width: str :param height: The desired output image height. :type height: str """ _attribute_map = { 'mode': {'key': 'mode', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'height': {'key': 'height', 'type': 'str'}, } def __init__( self, *, mode: Optional[Union[str, "ImageScaleMode"]] = None, width: Optional[str] = None, height: Optional[str] = None, **kwargs ): super(ImageScale, self).__init__(**kwargs) self.mode = mode self.width = width self.height = height
[docs]class IotHubMessageSink(SinkNodeBase): """IoT Hub Message sink allows for pipeline messages to published into the IoT Edge Hub. Published messages can then be delivered to the cloud and other modules via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be published. :type hub_output_name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'hub_output_name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], hub_output_name: str, **kwargs ): super(IotHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSink' # type: str self.hub_output_name = hub_output_name
[docs]class SourceNodeBase(msrest.serialization.Model): """Base class for topology source nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: IotHubMessageSource, RtspSource. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.IotHubMessageSource': 'IotHubMessageSource', '#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource'} } def __init__( self, *, name: str, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name
[docs]class IotHubMessageSource(SourceNodeBase): """IoT Hub Message source allows for the pipeline to consume messages from the IoT Edge Hub. Messages can be routed from other IoT modules via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. :type hub_input_name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, } def __init__( self, *, name: str, hub_input_name: Optional[str] = None, **kwargs ): super(IotHubMessageSource, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSource' # type: str self.hub_input_name = hub_input_name
[docs]class LineCrossingProcessor(ProcessorNodeBase): """Line crossing processor allows for the detection of tracked objects moving across one or more predefined lines. It must be downstream of an object tracker of downstream on an AI extension node that generates sequenceId for objects which are tracked across different frames of the video. Inference events are generated every time objects crosses from one side of the line to another. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param lines: Required. An array of lines used to compute line crossing events. :type lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'lines': {'key': 'lines', 'type': '[NamedLineBase]'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], lines: List["NamedLineBase"], **kwargs ): super(LineCrossingProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.LineCrossingProcessor' # type: str self.lines = lines
[docs]class LivePipeline(msrest.serialization.Model): """Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. All required parameters must be populated in order to send to Azure. :param name: Required. Live pipeline unique identifier. :type name: str :param system_data: Read-only system metadata associated with this object. :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData :param properties: Live pipeline properties. :type properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'LivePipelineProperties'}, } def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["LivePipelineProperties"] = None, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.name = name self.system_data = system_data self.properties = properties
[docs]class MethodRequest(msrest.serialization.Model): """Base class for direct method calls. You probably want to use the sub-classes and not this class directly. Known sub-classes are: LivePipelineSetRequestBody, MethodRequestEmptyBodyBase, PipelineTopologySetRequestBody, LivePipelineListRequest, LivePipelineSetRequest, PipelineTopologyListRequest, PipelineTopologySetRequest. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } _subtype_map = { 'method_name': {'LivePipelineSetRequestBody': 'LivePipelineSetRequestBody', 'MethodRequestEmptyBodyBase': 'MethodRequestEmptyBodyBase', 'PipelineTopologySetRequestBody': 'PipelineTopologySetRequestBody', 'livePipelineList': 'LivePipelineListRequest', 'livePipelineSet': 'LivePipelineSetRequest', 'pipelineTopologyList': 'PipelineTopologyListRequest', 'pipelineTopologySet': 'PipelineTopologySetRequest'} } api_version = "1.0" def __init__( self, **kwargs ): super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str]
[docs]class MethodRequestEmptyBodyBase(MethodRequest): """MethodRequestEmptyBodyBase. You probably want to use the sub-classes and not this class directly. Known sub-classes are: LivePipelineActivateRequest, LivePipelineDeactivateRequest, LivePipelineDeleteRequest, LivePipelineGetRequest, PipelineTopologyDeleteRequest, PipelineTopologyGetRequest. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'method_name': {'livePipelineActivate': 'LivePipelineActivateRequest', 'livePipelineDeactivate': 'LivePipelineDeactivateRequest', 'livePipelineDelete': 'LivePipelineDeleteRequest', 'livePipelineGet': 'LivePipelineGetRequest', 'pipelineTopologyDelete': 'PipelineTopologyDeleteRequest', 'pipelineTopologyGet': 'PipelineTopologyGetRequest'} } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(MethodRequestEmptyBodyBase, self).__init__(**kwargs) self.method_name = 'MethodRequestEmptyBodyBase' # type: str self.name = name
[docs]class LivePipelineActivateRequest(MethodRequestEmptyBodyBase): """Activates an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(LivePipelineActivateRequest, self).__init__(name=name, **kwargs) self.method_name = 'livePipelineActivate' # type: str
[docs]class LivePipelineCollection(msrest.serialization.Model): """A collection of live pipelines. :param value: List of live pipelines. :type value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] :param continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. :type continuation_token: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, } def __init__( self, *, value: Optional[List["LivePipeline"]] = None, continuation_token: Optional[str] = None, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token
[docs]class LivePipelineDeactivateRequest(MethodRequestEmptyBodyBase): """Deactivates an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(LivePipelineDeactivateRequest, self).__init__(name=name, **kwargs) self.method_name = 'livePipelineDeactivate' # type: str
[docs]class LivePipelineDeleteRequest(MethodRequestEmptyBodyBase): """Deletes an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(LivePipelineDeleteRequest, self).__init__(name=name, **kwargs) self.method_name = 'livePipelineDelete' # type: str
[docs]class LivePipelineGetRequest(MethodRequestEmptyBodyBase): """Retrieves an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(LivePipelineGetRequest, self).__init__(name=name, **kwargs) self.method_name = 'livePipelineGet' # type: str
[docs]class LivePipelineListRequest(MethodRequest): """List all existing live pipelines. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } api_version = "1.0" def __init__( self, **kwargs ): super(LivePipelineListRequest, self).__init__(**kwargs) self.method_name = 'livePipelineList' # type: str
[docs]class LivePipelineProperties(msrest.serialization.Model): """Live pipeline properties. :param description: An optional description of the live pipeline. :type description: str :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] :param state: Current pipeline state (read-only). Possible values include: "inactive", "activating", "active", "deactivating". :type state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'topology_name': {'key': 'topologyName', 'type': 'str'}, 'parameters': {'key': 'parameters', 'type': '[ParameterDefinition]'}, 'state': {'key': 'state', 'type': 'str'}, } def __init__( self, *, description: Optional[str] = None, topology_name: Optional[str] = None, parameters: Optional[List["ParameterDefinition"]] = None, state: Optional[Union[str, "LivePipelineState"]] = None, **kwargs ): super(LivePipelineProperties, self).__init__(**kwargs) self.description = description self.topology_name = topology_name self.parameters = parameters self.state = state
[docs]class LivePipelineSetRequest(MethodRequest): """Creates a new live pipeline or updates an existing one. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. :type live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'live_pipeline': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'live_pipeline': {'key': 'livePipeline', 'type': 'LivePipeline'}, } api_version = "1.0" def __init__( self, *, live_pipeline: "LivePipeline", **kwargs ): super(LivePipelineSetRequest, self).__init__(**kwargs) self.method_name = 'livePipelineSet' # type: str self.live_pipeline = live_pipeline
[docs]class LivePipelineSetRequestBody(LivePipeline, MethodRequest): """Live pipeline resource representation. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Live pipeline unique identifier. :type name: str :param system_data: Read-only system metadata associated with this object. :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData :param properties: Live pipeline properties. :type properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'LivePipelineProperties'}, } api_version = "1.0" def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["LivePipelineProperties"] = None, **kwargs ): super(LivePipelineSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) self.method_name = 'LivePipelineSetRequestBody' # type: str self.method_name = 'LivePipelineSetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties
[docs]class MotionDetectionProcessor(ProcessorNodeBase): """Motion detection processor allows for motion detection on the video stream. It generates motion events whenever motion is present on the video. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: "low", "medium", "high". :type sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions within the video frame where motion was detected. Default is true. :type output_motion_region: bool :param event_aggregation_window: Time window duration on which events are aggregated before being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 seconds). Use 0 seconds for no aggregation. Default is 1 second. :type event_aggregation_window: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, 'event_aggregation_window': {'key': 'eventAggregationWindow', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], sensitivity: Optional[Union[str, "MotionDetectionSensitivity"]] = None, output_motion_region: Optional[bool] = None, event_aggregation_window: Optional[str] = None, **kwargs ): super(MotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.MotionDetectionProcessor' # type: str self.sensitivity = sensitivity self.output_motion_region = output_motion_region self.event_aggregation_window = event_aggregation_window
[docs]class NamedLineBase(msrest.serialization.Model): """Base class for named lines. You probably want to use the sub-classes and not this class directly. Known sub-classes are: NamedLineString. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Line name. Must be unique within the node. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.NamedLineString': 'NamedLineString'} } def __init__( self, *, name: str, **kwargs ): super(NamedLineBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name
[docs]class NamedLineString(NamedLineBase): """Describes a line configuration. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Line name. Must be unique within the node. :type name: str :param line: Required. Point coordinates for the line start and end, respectively. Example: '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. :type line: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'line': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'line': {'key': 'line', 'type': 'str'}, } def __init__( self, *, name: str, line: str, **kwargs ): super(NamedLineString, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedLineString' # type: str self.line = line
[docs]class NamedPolygonBase(msrest.serialization.Model): """Describes the named polygon. You probably want to use the sub-classes and not this class directly. Known sub-classes are: NamedPolygonString. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Polygon name. Must be unique within the node. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.NamedPolygonString': 'NamedPolygonString'} } def __init__( self, *, name: str, **kwargs ): super(NamedPolygonBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name
[docs]class NamedPolygonString(NamedPolygonBase): """Describes a closed polygon configuration. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Polygon name. Must be unique within the node. :type name: str :param polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. :type polygon: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'polygon': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'polygon': {'key': 'polygon', 'type': 'str'}, } def __init__( self, *, name: str, polygon: str, **kwargs ): super(NamedPolygonString, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedPolygonString' # type: str self.polygon = polygon
[docs]class NodeInput(msrest.serialization.Model): """Describes an input signal to be used on a pipeline node. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. :type node_name: str :param output_selectors: Allows for the selection of specific data streams (eg. video only) from another node. :type output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] """ _validation = { 'node_name': {'required': True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, 'output_selectors': {'key': 'outputSelectors', 'type': '[OutputSelector]'}, } def __init__( self, *, node_name: str, output_selectors: Optional[List["OutputSelector"]] = None, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = node_name self.output_selectors = output_selectors
[docs]class ObjectTrackingProcessor(ProcessorNodeBase): """Object tracker processor allows for continuous tracking of one of more objects over a finite sequence of video frames. It must be used downstream of an object detector extension node, thus allowing for the extension to be configured to to perform inferences on sparse frames through the use of the 'maximumSamplesPerSecond' sampling property. The object tracker node will then track the detected objects over the frames in which the detector is not invoked resulting on a smother tracking of detected objects across the continuum of video frames. The tracker will stop tracking objects which are not subsequently detected by the upstream detector on the subsequent detections. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in average. Possible values include: "low", "medium", "high". :type accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'accuracy': {'key': 'accuracy', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], accuracy: Optional[Union[str, "ObjectTrackingAccuracy"]] = None, **kwargs ): super(ObjectTrackingProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.ObjectTrackingProcessor' # type: str self.accuracy = accuracy
[docs]class OutputSelector(msrest.serialization.Model): """Allows for the selection of particular streams from another node. :param property: The property of the data stream to be used as the selection criteria. Possible values include: "mediaType". :type property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty :param operator: The operator to compare properties by. Possible values include: "is", "isNot". :type operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator :param value: Value to compare against. :type value: str """ _attribute_map = { 'property': {'key': 'property', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, *, property: Optional[Union[str, "OutputSelectorProperty"]] = None, operator: Optional[Union[str, "OutputSelectorOperator"]] = None, value: Optional[str] = None, **kwargs ): super(OutputSelector, self).__init__(**kwargs) self.property = property self.operator = operator self.value = value
[docs]class ParameterDeclaration(msrest.serialization.Model): """Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipeline instances. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter. :type name: str :param type: Required. Type of the parameter. Possible values include: "string", "secretString", "int", "double", "bool". :type type: str or ~azure.media.videoanalyzer.edge.models.ParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the live pipeline does not specify a value. :type default: str """ _validation = { 'name': {'required': True, 'max_length': 64, 'min_length': 0}, 'type': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, } def __init__( self, *, name: str, type: Union[str, "ParameterType"], description: Optional[str] = None, default: Optional[str] = None, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = name self.type = type self.description = description self.default = default
[docs]class ParameterDefinition(msrest.serialization.Model): """Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter declared in the pipeline topology. :type name: str :param value: Parameter value to be applied on this specific live pipeline. :type value: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, *, name: str, value: Optional[str] = None, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = name self.value = value
[docs]class PemCertificateList(CertificateSource): """A list of PEM formatted certificates. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param certificates: Required. PEM formatted public certificates. One certificate per entry. :type certificates: list[str] """ _validation = { 'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__( self, *, certificates: List[str], **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = certificates
[docs]class PipelineTopology(msrest.serialization.Model): """Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. All required parameters must be populated in order to send to Azure. :param name: Required. Pipeline topology unique identifier. :type name: str :param system_data: Read-only system metadata associated with this object. :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData :param properties: Pipeline topology properties. :type properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'PipelineTopologyProperties'}, } def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["PipelineTopologyProperties"] = None, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.name = name self.system_data = system_data self.properties = properties
[docs]class PipelineTopologyCollection(msrest.serialization.Model): """A collection of pipeline topologies. :param value: List of pipeline topologies. :type value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] :param continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. :type continuation_token: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, } def __init__( self, *, value: Optional[List["PipelineTopology"]] = None, continuation_token: Optional[str] = None, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token
[docs]class PipelineTopologyDeleteRequest(MethodRequestEmptyBodyBase): """Deletes an existing pipeline topology. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(PipelineTopologyDeleteRequest, self).__init__(name=name, **kwargs) self.method_name = 'pipelineTopologyDelete' # type: str
[docs]class PipelineTopologyGetRequest(MethodRequestEmptyBodyBase): """Retrieves an existing pipeline topology. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Resource name. :type name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } api_version = "1.0" def __init__( self, *, name: str, **kwargs ): super(PipelineTopologyGetRequest, self).__init__(name=name, **kwargs) self.method_name = 'pipelineTopologyGet' # type: str
[docs]class PipelineTopologyListRequest(MethodRequest): """List all existing pipeline topologies. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } api_version = "1.0" def __init__( self, **kwargs ): super(PipelineTopologyListRequest, self).__init__(**kwargs) self.method_name = 'pipelineTopologyList' # type: str
[docs]class PipelineTopologyProperties(msrest.serialization.Model): """Pipeline topology properties. :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'parameters': {'key': 'parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, *, description: Optional[str] = None, parameters: Optional[List["ParameterDeclaration"]] = None, sources: Optional[List["SourceNodeBase"]] = None, processors: Optional[List["ProcessorNodeBase"]] = None, sinks: Optional[List["SinkNodeBase"]] = None, **kwargs ): super(PipelineTopologyProperties, self).__init__(**kwargs) self.description = description self.parameters = parameters self.sources = sources self.processors = processors self.sinks = sinks
[docs]class PipelineTopologySetRequest(MethodRequest): """Creates a new pipeline topology or updates an existing one. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param pipeline_topology: Required. Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. :type pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'pipeline_topology': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'pipeline_topology': {'key': 'pipelineTopology', 'type': 'PipelineTopology'}, } api_version = "1.0" def __init__( self, *, pipeline_topology: "PipelineTopology", **kwargs ): super(PipelineTopologySetRequest, self).__init__(**kwargs) self.method_name = 'pipelineTopologySet' # type: str self.pipeline_topology = pipeline_topology
[docs]class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): """Pipeline topology resource representation. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. Default value: "1.0". :vartype api_version: str :param name: Required. Pipeline topology unique identifier. :type name: str :param system_data: Read-only system metadata associated with this object. :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData :param properties: Pipeline topology properties. :type properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'PipelineTopologyProperties'}, } api_version = "1.0" def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["PipelineTopologyProperties"] = None, **kwargs ): super(PipelineTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) self.method_name = 'PipelineTopologySetRequestBody' # type: str self.method_name = 'PipelineTopologySetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties
[docs]class RtspSource(SourceNodeBase): """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a live pipeline. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "http", "tcp". :type transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport :param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, *, name: str, endpoint: "EndpointBase", transport: Optional[Union[str, "RtspTransport"]] = None, **kwargs ): super(RtspSource, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = transport self.endpoint = endpoint
[docs]class SamplingOptions(msrest.serialization.Model): """Defines how often media is submitted to the extension plugin. :param skip_samples_without_annotation: When set to 'true', prevents frames without upstream inference data to be sent to the extension plugin. This is useful to limit the frames sent to the extension to pre-analyzed frames only. For example, when used downstream from a motion detector, this can enable for only frames in which motion has been detected to be further analyzed. :type skip_samples_without_annotation: str :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. This prevents an extension plugin to be overloaded with data. :type maximum_samples_per_second: str """ _attribute_map = { 'skip_samples_without_annotation': {'key': 'skipSamplesWithoutAnnotation', 'type': 'str'}, 'maximum_samples_per_second': {'key': 'maximumSamplesPerSecond', 'type': 'str'}, } def __init__( self, *, skip_samples_without_annotation: Optional[str] = None, maximum_samples_per_second: Optional[str] = None, **kwargs ): super(SamplingOptions, self).__init__(**kwargs) self.skip_samples_without_annotation = skip_samples_without_annotation self.maximum_samples_per_second = maximum_samples_per_second
[docs]class SignalGateProcessor(ProcessorNodeBase): """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. See https://aka.ms/ava-signalgate for more information. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :type activation_evaluation_window: str :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It determines the how much farther behind of after the signal will be let through based on the activation time. A negative offset indicates that data prior the activation time must be included on the signal that is let through, once the gate is activated. When used upstream of a file or video sink, this allows for scenarios such as recording buffered media prior an event, such as: record video 5 seconds prior motions is detected. :type activation_signal_offset: str :param minimum_activation_time: The minimum period for which the gate remains open in the absence of subsequent triggers (events). When used upstream of a file or video sink, it determines the minimum length of the recorded video clip. :type minimum_activation_time: str :param maximum_activation_time: The maximum period for which the gate remains open in the presence of subsequent triggers (events). When used upstream of a file or video sink, it determines the maximum length of the recorded video clip. :type maximum_activation_time: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], activation_evaluation_window: Optional[str] = None, activation_signal_offset: Optional[str] = None, minimum_activation_time: Optional[str] = None, maximum_activation_time: Optional[str] = None, **kwargs ): super(SignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SignalGateProcessor' # type: str self.activation_evaluation_window = activation_evaluation_window self.activation_signal_offset = activation_signal_offset self.minimum_activation_time = minimum_activation_time self.maximum_activation_time = maximum_activation_time
[docs]class SpatialAnalysisOperationBase(msrest.serialization.Model): """Base class for Azure Cognitive Services Spatial Analysis operations. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SpatialAnalysisCustomOperation, SpatialAnalysisTypedOperationBase. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation': 'SpatialAnalysisCustomOperation', 'SpatialAnalysisTypedOperationBase': 'SpatialAnalysisTypedOperationBase'} } def __init__( self, **kwargs ): super(SpatialAnalysisOperationBase, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class SpatialAnalysisCustomOperation(SpatialAnalysisOperationBase): """Defines a Spatial Analysis custom operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param extension_configuration: Required. Custom configuration to pass to the Azure Cognitive Services Spatial Analysis module. :type extension_configuration: str """ _validation = { 'type': {'required': True}, 'extension_configuration': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( self, *, extension_configuration: str, **kwargs ): super(SpatialAnalysisCustomOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation' # type: str self.extension_configuration = extension_configuration
[docs]class SpatialAnalysisOperationEventBase(msrest.serialization.Model): """Defines the Azure Cognitive Services Spatial Analysis operation eventing configuration. :param threshold: The event threshold. :type threshold: str :param focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, **kwargs ): super(SpatialAnalysisOperationEventBase, self).__init__(**kwargs) self.threshold = threshold self.focus = focus
[docs]class SpatialAnalysisPersonCountEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person count operation eventing configuration. :param threshold: The event threshold. :type threshold: str :param focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :param trigger: The event trigger type. Possible values include: "event", "interval". :type trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger :param output_frequency: The event or interval output frequency. :type output_frequency: str """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, 'trigger': {'key': 'trigger', 'type': 'str'}, 'output_frequency': {'key': 'outputFrequency', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, trigger: Optional[Union[str, "SpatialAnalysisPersonCountEventTrigger"]] = None, output_frequency: Optional[str] = None, **kwargs ): super(SpatialAnalysisPersonCountEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.trigger = trigger self.output_frequency = output_frequency
[docs]class SpatialAnalysisTypedOperationBase(SpatialAnalysisOperationBase): """Base class for Azure Cognitive Services Spatial Analysis typed operations. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SpatialAnalysisPersonCountOperation, SpatialAnalysisPersonDistanceOperation, SpatialAnalysisPersonLineCrossingOperation, SpatialAnalysisPersonZoneCrossingOperation. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param debug: If set to 'true', enables debugging mode for this operation. :type debug: str :param camera_configuration: Advanced camera configuration. :type camera_configuration: str :param detector_node_configuration: Advanced detector node configuration. :type detector_node_configuration: str :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :type enable_face_mask_classifier: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation': 'SpatialAnalysisPersonCountOperation', '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation': 'SpatialAnalysisPersonDistanceOperation', '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation': 'SpatialAnalysisPersonLineCrossingOperation', '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation': 'SpatialAnalysisPersonZoneCrossingOperation'} } def __init__( self, *, debug: Optional[str] = None, camera_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): super(SpatialAnalysisTypedOperationBase, self).__init__(**kwargs) self.type = 'SpatialAnalysisTypedOperationBase' # type: str self.debug = debug self.camera_configuration = camera_configuration self.detector_node_configuration = detector_node_configuration self.enable_face_mask_classifier = enable_face_mask_classifier
[docs]class SpatialAnalysisPersonCountOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person count operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param debug: If set to 'true', enables debugging mode for this operation. :type debug: str :param camera_configuration: Advanced camera configuration. :type camera_configuration: str :param detector_node_configuration: Advanced detector node configuration. :type detector_node_configuration: str :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :type enable_face_mask_classifier: str :param zones: Required. The list of zones and optional events. :type zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] """ _validation = { 'type': {'required': True}, 'zones': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonCountZoneEvents]'}, } def __init__( self, *, zones: List["SpatialAnalysisPersonCountZoneEvents"], debug: Optional[str] = None, camera_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): super(SpatialAnalysisPersonCountOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation' # type: str self.zones = zones
[docs]class SpatialAnalysisPersonCountZoneEvents(msrest.serialization.Model): """SpatialAnalysisPersonCountZoneEvents. All required parameters must be populated in order to send to Azure. :param zone: Required. The named zone. :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :param events: The event configuration. :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] """ _validation = { 'zone': {'required': True}, } _attribute_map = { 'zone': {'key': 'zone', 'type': 'NamedPolygonBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonCountEvent]'}, } def __init__( self, *, zone: "NamedPolygonBase", events: Optional[List["SpatialAnalysisPersonCountEvent"]] = None, **kwargs ): super(SpatialAnalysisPersonCountZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events
[docs]class SpatialAnalysisPersonDistanceEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person distance operation eventing configuration. :param threshold: The event threshold. :type threshold: str :param focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :param trigger: The event trigger type. Possible values include: "event", "interval". :type trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger :param output_frequency: The event or interval output frequency. :type output_frequency: str :param minimum_distance_threshold: The minimum distance threshold. :type minimum_distance_threshold: str :param maximum_distance_threshold: The maximum distance threshold. :type maximum_distance_threshold: str """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, 'trigger': {'key': 'trigger', 'type': 'str'}, 'output_frequency': {'key': 'outputFrequency', 'type': 'str'}, 'minimum_distance_threshold': {'key': 'minimumDistanceThreshold', 'type': 'str'}, 'maximum_distance_threshold': {'key': 'maximumDistanceThreshold', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, trigger: Optional[Union[str, "SpatialAnalysisPersonDistanceEventTrigger"]] = None, output_frequency: Optional[str] = None, minimum_distance_threshold: Optional[str] = None, maximum_distance_threshold: Optional[str] = None, **kwargs ): super(SpatialAnalysisPersonDistanceEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.trigger = trigger self.output_frequency = output_frequency self.minimum_distance_threshold = minimum_distance_threshold self.maximum_distance_threshold = maximum_distance_threshold
[docs]class SpatialAnalysisPersonDistanceOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person distance operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param debug: If set to 'true', enables debugging mode for this operation. :type debug: str :param camera_configuration: Advanced camera configuration. :type camera_configuration: str :param detector_node_configuration: Advanced detector node configuration. :type detector_node_configuration: str :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :type enable_face_mask_classifier: str :param zones: Required. The list of zones with optional events. :type zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] """ _validation = { 'type': {'required': True}, 'zones': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonDistanceZoneEvents]'}, } def __init__( self, *, zones: List["SpatialAnalysisPersonDistanceZoneEvents"], debug: Optional[str] = None, camera_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): super(SpatialAnalysisPersonDistanceOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation' # type: str self.zones = zones
[docs]class SpatialAnalysisPersonDistanceZoneEvents(msrest.serialization.Model): """SpatialAnalysisPersonDistanceZoneEvents. All required parameters must be populated in order to send to Azure. :param zone: Required. The named zone. :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :param events: The event configuration. :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] """ _validation = { 'zone': {'required': True}, } _attribute_map = { 'zone': {'key': 'zone', 'type': 'NamedPolygonBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonDistanceEvent]'}, } def __init__( self, *, zone: "NamedPolygonBase", events: Optional[List["SpatialAnalysisPersonDistanceEvent"]] = None, **kwargs ): super(SpatialAnalysisPersonDistanceZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events
[docs]class SpatialAnalysisPersonLineCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person line crossing operation eventing configuration. :param threshold: The event threshold. :type threshold: str :param focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, **kwargs ): super(SpatialAnalysisPersonLineCrossingEvent, self).__init__(threshold=threshold, focus=focus, **kwargs)
[docs]class SpatialAnalysisPersonLineCrossingLineEvents(msrest.serialization.Model): """SpatialAnalysisPersonLineCrossingLineEvents. All required parameters must be populated in order to send to Azure. :param line: Required. The named line. :type line: ~azure.media.videoanalyzer.edge.models.NamedLineBase :param events: The event configuration. :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] """ _validation = { 'line': {'required': True}, } _attribute_map = { 'line': {'key': 'line', 'type': 'NamedLineBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonLineCrossingEvent]'}, } def __init__( self, *, line: "NamedLineBase", events: Optional[List["SpatialAnalysisPersonLineCrossingEvent"]] = None, **kwargs ): super(SpatialAnalysisPersonLineCrossingLineEvents, self).__init__(**kwargs) self.line = line self.events = events
[docs]class SpatialAnalysisPersonLineCrossingOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person line crossing operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param debug: If set to 'true', enables debugging mode for this operation. :type debug: str :param camera_configuration: Advanced camera configuration. :type camera_configuration: str :param detector_node_configuration: Advanced detector node configuration. :type detector_node_configuration: str :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :type enable_face_mask_classifier: str :param lines: Required. The list of lines with optional events. :type lines: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] """ _validation = { 'type': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'lines': {'key': 'lines', 'type': '[SpatialAnalysisPersonLineCrossingLineEvents]'}, } def __init__( self, *, lines: List["SpatialAnalysisPersonLineCrossingLineEvents"], debug: Optional[str] = None, camera_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): super(SpatialAnalysisPersonLineCrossingOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation' # type: str self.lines = lines
[docs]class SpatialAnalysisPersonZoneCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person crossing zone operation eventing configuration. :param threshold: The event threshold. :type threshold: str :param focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :param event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". :type event_type: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, 'event_type': {'key': 'eventType', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, event_type: Optional[Union[str, "SpatialAnalysisPersonZoneCrossingEventType"]] = None, **kwargs ): super(SpatialAnalysisPersonZoneCrossingEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.event_type = event_type
[docs]class SpatialAnalysisPersonZoneCrossingOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person zone crossing operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :param type: Required. The Type discriminator for the derived types.Constant filled by server. :type type: str :param debug: If set to 'true', enables debugging mode for this operation. :type debug: str :param camera_configuration: Advanced camera configuration. :type camera_configuration: str :param detector_node_configuration: Advanced detector node configuration. :type detector_node_configuration: str :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :type enable_face_mask_classifier: str :param zones: Required. The list of zones with optional events. :type zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] """ _validation = { 'type': {'required': True}, 'zones': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonZoneCrossingZoneEvents]'}, } def __init__( self, *, zones: List["SpatialAnalysisPersonZoneCrossingZoneEvents"], debug: Optional[str] = None, camera_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): super(SpatialAnalysisPersonZoneCrossingOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation' # type: str self.zones = zones
[docs]class SpatialAnalysisPersonZoneCrossingZoneEvents(msrest.serialization.Model): """SpatialAnalysisPersonZoneCrossingZoneEvents. All required parameters must be populated in order to send to Azure. :param zone: Required. The named zone. :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :param events: The event configuration. :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] """ _validation = { 'zone': {'required': True}, } _attribute_map = { 'zone': {'key': 'zone', 'type': 'NamedPolygonBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonZoneCrossingEvent]'}, } def __init__( self, *, zone: "NamedPolygonBase", events: Optional[List["SpatialAnalysisPersonZoneCrossingEvent"]] = None, **kwargs ): super(SpatialAnalysisPersonZoneCrossingZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events
[docs]class SystemData(msrest.serialization.Model): """Read-only system metadata associated with a resource. :param created_at: Date and time when this resource was first created. Value is represented in UTC according to the ISO8601 date format. :type created_at: ~datetime.datetime :param last_modified_at: Date and time when this resource was last modified. Value is represented in UTC according to the ISO8601 date format. :type last_modified_at: ~datetime.datetime """ _attribute_map = { 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, *, created_at: Optional[datetime.datetime] = None, last_modified_at: Optional[datetime.datetime] = None, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_at = created_at self.last_modified_at = last_modified_at
[docs]class TlsEndpoint(EndpointBase): """TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit). All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param credentials: Credentials to be presented to the endpoint. :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. :type trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. :type validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions """ _validation = { 'type': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, *, url: str, credentials: Optional["CredentialsBase"] = None, trusted_certificates: Optional["CertificateSource"] = None, validation_options: Optional["TlsValidationOptions"] = None, **kwargs ): super(TlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = trusted_certificates self.validation_options = validation_options
[docs]class TlsValidationOptions(msrest.serialization.Model): """Options for controlling the validation of TLS endpoints. :param ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. :type ignore_hostname: str :param ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. :type ignore_signature: str """ _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self, *, ignore_hostname: Optional[str] = None, ignore_signature: Optional[str] = None, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = ignore_hostname self.ignore_signature = ignore_signature
[docs]class UnsecuredEndpoint(EndpointBase): """Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit). All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param credentials: Credentials to be presented to the endpoint. :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str """ _validation = { 'type': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, } def __init__( self, *, url: str, credentials: Optional["CredentialsBase"] = None, **kwargs ): super(UnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
[docs]class UsernamePasswordCredentials(CredentialsBase): """Username and password credentials. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param username: Required. Username to be presented as part of the credentials. :type username: str :param password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :type password: str """ _validation = { 'type': {'required': True}, 'username': {'required': True}, 'password': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, } def __init__( self, *, username: str, password: str, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = username self.password = password
[docs]class VideoCreationProperties(msrest.serialization.Model): """Optional video properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. :param title: Optional video title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional video description provided by the user. Value can be up to 2048 characters long. :type description: str :param segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the video is initially created can lead to errors when uploading media to the archive. Default value is 30 seconds. :type segment_length: str """ _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def __init__( self, *, title: Optional[str] = None, description: Optional[str] = None, segment_length: Optional[str] = None, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = title self.description = description self.segment_length = segment_length
[docs]class VideoSink(SinkNodeBase): """Video sink allows for video and audio to be recorded to the Video Analyzer service. The recorded video can be played from anywhere and further managed from the cloud. Due to security reasons, a given Video Analyzer edge module instance can only record content to new video entries, or existing video entries previously recorded by the same module. Any attempt to record content to an existing video which has not been created by the same module instance will result in failure to record. All required parameters must be populated in order to send to Azure. :param type: Required. Type discriminator for the derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :param video_name: Required. Name of a new or existing Video Analyzer video resource used for the media recording. :type video_name: str :param video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. :type video_creation_properties: ~azure.media.videoanalyzer.edge.models.VideoCreationProperties :param local_media_cache_path: Required. Path to a local file system directory for caching of temporary media files. This will also be used to store content which cannot be immediately uploaded to Azure due to Internet connectivity issues. :type local_media_cache_path: str :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be used for caching of temporary media files. Once this limit is reached, the oldest segments of the media archive will be continuously deleted in order to make space for new media, thus leading to gaps in the cloud recorded content. :type local_media_cache_maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True}, 'local_media_cache_path': {'required': True}, 'local_media_cache_maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], video_name: str, local_media_cache_path: str, local_media_cache_maximum_size_mi_b: str, video_creation_properties: Optional["VideoCreationProperties"] = None, **kwargs ): super(VideoSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = video_name self.video_creation_properties = video_creation_properties self.local_media_cache_path = local_media_cache_path self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b