Source code for azure.media.videoanalyzeredge._generated.models._models_py3

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------

import datetime
from typing import Any, List, Optional, Union

import msrest.serialization

from ._azure_video_analyzerfor_edge_enums import *


[docs]class CertificateSource(msrest.serialization.Model): """Base class for certificate sources. You probably want to use the sub-classes and not this class directly. Known sub-classes are: PemCertificateList. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ): """ """ super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class ProcessorNodeBase(msrest.serialization.Model): """Base class for topology processor nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: CognitiveServicesVisionProcessor, ExtensionProcessorBase, LineCrossingProcessor, MotionDetectionProcessor, ObjectTrackingProcessor, SignalGateProcessor. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor': 'CognitiveServicesVisionProcessor', '#Microsoft.VideoAnalyzer.ExtensionProcessorBase': 'ExtensionProcessorBase', '#Microsoft.VideoAnalyzer.LineCrossingProcessor': 'LineCrossingProcessor', '#Microsoft.VideoAnalyzer.MotionDetectionProcessor': 'MotionDetectionProcessor', '#Microsoft.VideoAnalyzer.ObjectTrackingProcessor': 'ObjectTrackingProcessor', '#Microsoft.VideoAnalyzer.SignalGateProcessor': 'SignalGateProcessor'} } def __init__( self, *, name: str, inputs: List["NodeInput"], **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ super(ProcessorNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name self.inputs = inputs
[docs]class CognitiveServicesVisionProcessor(ProcessorNodeBase): """A processor that allows the pipeline topology to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar endpoint: Required. Endpoint to which this processor should connect. :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :ivar image: Describes the parameters of the image that is sent as input to the endpoint. :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :ivar sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions :ivar operation: Required. Describes the Spatial Analysis operation to be used in the Cognitive Services Vision processor. :vartype operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'operation': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, 'operation': {'key': 'operation', 'type': 'SpatialAnalysisOperationBase'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", operation: "SpatialAnalysisOperationBase", image: Optional["ImageProperties"] = None, sampling_options: Optional["SamplingOptions"] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword endpoint: Required. Endpoint to which this processor should connect. :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :keyword image: Describes the parameters of the image that is sent as input to the endpoint. :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :keyword sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions :keyword operation: Required. Describes the Spatial Analysis operation to be used in the Cognitive Services Vision processor. :paramtype operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase """ super(CognitiveServicesVisionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor' # type: str self.endpoint = endpoint self.image = image self.sampling_options = sampling_options self.operation = operation
[docs]class CredentialsBase(msrest.serialization.Model): """Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: HttpHeaderCredentials, SymmetricKeyCredentials, UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.HttpHeaderCredentials': 'HttpHeaderCredentials', '#Microsoft.VideoAnalyzer.SymmetricKeyCredentials': 'SymmetricKeyCredentials', '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): """ """ super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class DiscoveredOnvifDevice(msrest.serialization.Model): """The discovered properties of the ONVIF device that are returned during the discovery. :ivar service_identifier: The unique identifier of the ONVIF device that was discovered in the same subnet as the IoT Edge device. :vartype service_identifier: str :ivar remote_ip_address: The IP address of the ONVIF device that was discovered in the same subnet as the IoT Edge device. :vartype remote_ip_address: str :ivar scopes: An array of hostnames for the ONVIF discovered devices that are in the same subnet as the IoT Edge device. :vartype scopes: list[str] :ivar endpoints: An array of media profile endpoints that the ONVIF discovered device supports. :vartype endpoints: list[str] """ _attribute_map = { 'service_identifier': {'key': 'serviceIdentifier', 'type': 'str'}, 'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'}, 'scopes': {'key': 'scopes', 'type': '[str]'}, 'endpoints': {'key': 'endpoints', 'type': '[str]'}, } def __init__( self, *, service_identifier: Optional[str] = None, remote_ip_address: Optional[str] = None, scopes: Optional[List[str]] = None, endpoints: Optional[List[str]] = None, **kwargs ): """ :keyword service_identifier: The unique identifier of the ONVIF device that was discovered in the same subnet as the IoT Edge device. :paramtype service_identifier: str :keyword remote_ip_address: The IP address of the ONVIF device that was discovered in the same subnet as the IoT Edge device. :paramtype remote_ip_address: str :keyword scopes: An array of hostnames for the ONVIF discovered devices that are in the same subnet as the IoT Edge device. :paramtype scopes: list[str] :keyword endpoints: An array of media profile endpoints that the ONVIF discovered device supports. :paramtype endpoints: list[str] """ super(DiscoveredOnvifDevice, self).__init__(**kwargs) self.service_identifier = service_identifier self.remote_ip_address = remote_ip_address self.scopes = scopes self.endpoints = endpoints
[docs]class DiscoveredOnvifDeviceCollection(msrest.serialization.Model): """A list of ONVIF devices that were discovered in the same subnet as the IoT Edge device. :ivar value: An array of ONVIF devices that have been discovered in the same subnet as the IoT Edge device. :vartype value: list[~azure.media.videoanalyzer.edge.models.DiscoveredOnvifDevice] """ _attribute_map = { 'value': {'key': 'value', 'type': '[DiscoveredOnvifDevice]'}, } def __init__( self, *, value: Optional[List["DiscoveredOnvifDevice"]] = None, **kwargs ): """ :keyword value: An array of ONVIF devices that have been discovered in the same subnet as the IoT Edge device. :paramtype value: list[~azure.media.videoanalyzer.edge.models.DiscoveredOnvifDevice] """ super(DiscoveredOnvifDeviceCollection, self).__init__(**kwargs) self.value = value
[docs]class EndpointBase(msrest.serialization.Model): """Base class for endpoints. You probably want to use the sub-classes and not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar credentials: Credentials to be presented to the endpoint. :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :ivar url: Required. The endpoint URL for Video Analyzer to connect to. :vartype url: str """ _validation = { 'type': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, *, url: str, credentials: Optional["CredentialsBase"] = None, **kwargs ): """ :keyword credentials: Credentials to be presented to the endpoint. :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :keyword url: Required. The endpoint URL for Video Analyzer to connect to. :paramtype url: str """ super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = credentials self.url = url
[docs]class ExtensionProcessorBase(ProcessorNodeBase): """Base class for pipeline extension processors. Pipeline extensions allow for custom media analysis and processing to be plugged into the Video Analyzer pipeline. You probably want to use the sub-classes and not this class directly. Known sub-classes are: GrpcExtension, HttpExtension. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'image': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.GrpcExtension': 'GrpcExtension', '#Microsoft.VideoAnalyzer.HttpExtension': 'HttpExtension'} } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", image: "ImageProperties", sampling_options: Optional["SamplingOptions"] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :keyword image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :keyword sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ super(ExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.ExtensionProcessorBase' # type: str self.endpoint = endpoint self.image = image self.sampling_options = sampling_options
[docs]class SinkNodeBase(msrest.serialization.Model): """Base class for topology sink nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: FileSink, IotHubMessageSink, VideoSink. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.FileSink': 'FileSink', '#Microsoft.VideoAnalyzer.IotHubMessageSink': 'IotHubMessageSink', '#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, *, name: str, inputs: List["NodeInput"], **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ super(SinkNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name self.inputs = inputs
[docs]class FileSink(SinkNodeBase): """File sink allows for video and audio content to be recorded on the file system on the edge device. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar base_directory_path: Required. Absolute directory path where media files will be stored. :vartype base_directory_path: str :ivar file_name_pattern: Required. File name pattern for creating new files when performing event based recording. The pattern must include at least one system variable. :vartype file_name_pattern: str :ivar maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. Once this limit is reached, the oldest files from this sink will be automatically deleted. :vartype maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'base_directory_path': {'required': True}, 'file_name_pattern': {'required': True}, 'maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'base_directory_path': {'key': 'baseDirectoryPath', 'type': 'str'}, 'file_name_pattern': {'key': 'fileNamePattern', 'type': 'str'}, 'maximum_size_mi_b': {'key': 'maximumSizeMiB', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], base_directory_path: str, file_name_pattern: str, maximum_size_mi_b: str, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword base_directory_path: Required. Absolute directory path where media files will be stored. :paramtype base_directory_path: str :keyword file_name_pattern: Required. File name pattern for creating new files when performing event based recording. The pattern must include at least one system variable. :paramtype file_name_pattern: str :keyword maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. Once this limit is reached, the oldest files from this sink will be automatically deleted. :paramtype maximum_size_mi_b: str """ super(FileSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.FileSink' # type: str self.base_directory_path = base_directory_path self.file_name_pattern = file_name_pattern self.maximum_size_mi_b = maximum_size_mi_b
[docs]class GrpcExtension(ExtensionProcessorBase): """GRPC extension processor allows pipeline extension plugins to be connected to the pipeline through over a gRPC channel. Extension plugins must act as an gRPC server. Please see https://aka.ms/ava-extension-grpc for details. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions :ivar data_transfer: Required. Specifies how media is transferred to the extension plugin. :vartype data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer :ivar extension_configuration: An optional configuration string that is sent to the extension plugin. The configuration string is specific to each custom extension and it not understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc for details. :vartype extension_configuration: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'image': {'required': True}, 'data_transfer': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, 'data_transfer': {'key': 'dataTransfer', 'type': 'GrpcExtensionDataTransfer'}, 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", image: "ImageProperties", data_transfer: "GrpcExtensionDataTransfer", sampling_options: Optional["SamplingOptions"] = None, extension_configuration: Optional[str] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :keyword image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :keyword sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions :keyword data_transfer: Required. Specifies how media is transferred to the extension plugin. :paramtype data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer :keyword extension_configuration: An optional configuration string that is sent to the extension plugin. The configuration string is specific to each custom extension and it not understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc for details. :paramtype extension_configuration: str """ super(GrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.VideoAnalyzer.GrpcExtension' # type: str self.data_transfer = data_transfer self.extension_configuration = extension_configuration
[docs]class GrpcExtensionDataTransfer(msrest.serialization.Model): """Describes how media is transferred to the extension plugin. All required parameters must be populated in order to send to Azure. :ivar shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It can only be used with the 'SharedMemory' transfer mode. :vartype shared_memory_size_mi_b: str :ivar mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: "embedded", "sharedMemory". :vartype mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode """ _validation = { 'mode': {'required': True}, } _attribute_map = { 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def __init__( self, *, mode: Union[str, "GrpcExtensionDataTransferMode"], shared_memory_size_mi_b: Optional[str] = None, **kwargs ): """ :keyword shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It can only be used with the 'SharedMemory' transfer mode. :paramtype shared_memory_size_mi_b: str :keyword mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: "embedded", "sharedMemory". :paramtype mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode """ super(GrpcExtensionDataTransfer, self).__init__(**kwargs) self.shared_memory_size_mi_b = shared_memory_size_mi_b self.mode = mode
[docs]class H264Configuration(msrest.serialization.Model): """Class representing the H264 Configuration. :ivar gov_length: Group of Video frames length. :vartype gov_length: float :ivar profile: The H264 Profile. Possible values include: "Baseline", "Main", "Extended", "High". :vartype profile: str or ~azure.media.videoanalyzer.edge.models.H264Profile """ _attribute_map = { 'gov_length': {'key': 'govLength', 'type': 'float'}, 'profile': {'key': 'profile', 'type': 'str'}, } def __init__( self, *, gov_length: Optional[float] = None, profile: Optional[Union[str, "H264Profile"]] = None, **kwargs ): """ :keyword gov_length: Group of Video frames length. :paramtype gov_length: float :keyword profile: The H264 Profile. Possible values include: "Baseline", "Main", "Extended", "High". :paramtype profile: str or ~azure.media.videoanalyzer.edge.models.H264Profile """ super(H264Configuration, self).__init__(**kwargs) self.gov_length = gov_length self.profile = profile
[docs]class HttpExtension(ExtensionProcessorBase): """HTTP extension processor allows pipeline extension plugins to be connected to the pipeline through over the HTTP protocol. Extension plugins must act as an HTTP server. Please see https://aka.ms/ava-extension-http for details. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'endpoint': {'required': True}, 'image': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, 'image': {'key': 'image', 'type': 'ImageProperties'}, 'sampling_options': {'key': 'samplingOptions', 'type': 'SamplingOptions'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], endpoint: "EndpointBase", image: "ImageProperties", sampling_options: Optional["SamplingOptions"] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase :keyword image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties :keyword sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ super(HttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpExtension' # type: str
[docs]class HttpHeaderCredentials(CredentialsBase): """HTTP header credentials. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar header_name: Required. HTTP header name. :vartype header_name: str :ivar header_value: Required. HTTP header value. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :vartype header_value: str """ _validation = { 'type': {'required': True}, 'header_name': {'required': True}, 'header_value': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'header_name': {'key': 'headerName', 'type': 'str'}, 'header_value': {'key': 'headerValue', 'type': 'str'}, } def __init__( self, *, header_name: str, header_value: str, **kwargs ): """ :keyword header_name: Required. HTTP header name. :paramtype header_name: str :keyword header_value: Required. HTTP header value. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :paramtype header_value: str """ super(HttpHeaderCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpHeaderCredentials' # type: str self.header_name = header_name self.header_value = header_value
[docs]class ImageFormatProperties(msrest.serialization.Model): """Base class for image formatting properties. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ImageFormatBmp, ImageFormatJpeg, ImageFormatPng, ImageFormatRaw. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ImageFormatBmp': 'ImageFormatBmp', '#Microsoft.VideoAnalyzer.ImageFormatJpeg': 'ImageFormatJpeg', '#Microsoft.VideoAnalyzer.ImageFormatPng': 'ImageFormatPng', '#Microsoft.VideoAnalyzer.ImageFormatRaw': 'ImageFormatRaw'} } def __init__( self, **kwargs ): """ """ super(ImageFormatProperties, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class ImageFormatBmp(ImageFormatProperties): """BMP image encoding. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } def __init__( self, **kwargs ): """ """ super(ImageFormatBmp, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatBmp' # type: str
[docs]class ImageFormatJpeg(ImageFormatProperties): """JPEG image encoding. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar quality: Image quality value between 0 to 100 (best quality). :vartype quality: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'str'}, } def __init__( self, *, quality: Optional[str] = None, **kwargs ): """ :keyword quality: Image quality value between 0 to 100 (best quality). :paramtype quality: str """ super(ImageFormatJpeg, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatJpeg' # type: str self.quality = quality
[docs]class ImageFormatPng(ImageFormatProperties): """PNG image encoding. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } def __init__( self, **kwargs ): """ """ super(ImageFormatPng, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatPng' # type: str
[docs]class ImageFormatRaw(ImageFormatProperties): """Raw image formatting. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar pixel_format: Required. Pixel format to be applied to the raw image. Possible values include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", "rgba", "abgr", "bgra". :vartype pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat """ _validation = { 'type': {'required': True}, 'pixel_format': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, } def __init__( self, *, pixel_format: Union[str, "ImageFormatRawPixelFormat"], **kwargs ): """ :keyword pixel_format: Required. Pixel format to be applied to the raw image. Possible values include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", "rgba", "abgr", "bgra". :paramtype pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat """ super(ImageFormatRaw, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatRaw' # type: str self.pixel_format = pixel_format
[docs]class ImageProperties(msrest.serialization.Model): """Image transformations and formatting options to be applied to the video frame(s). :ivar scale: Image scaling mode. :vartype scale: ~azure.media.videoanalyzer.edge.models.ImageScale :ivar format: Base class for image formatting properties. :vartype format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties """ _attribute_map = { 'scale': {'key': 'scale', 'type': 'ImageScale'}, 'format': {'key': 'format', 'type': 'ImageFormatProperties'}, } def __init__( self, *, scale: Optional["ImageScale"] = None, format: Optional["ImageFormatProperties"] = None, **kwargs ): """ :keyword scale: Image scaling mode. :paramtype scale: ~azure.media.videoanalyzer.edge.models.ImageScale :keyword format: Base class for image formatting properties. :paramtype format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties """ super(ImageProperties, self).__init__(**kwargs) self.scale = scale self.format = format
[docs]class ImageScale(msrest.serialization.Model): """Image scaling mode. :ivar mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible values include: "preserveAspectRatio", "pad", "stretch". :vartype mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode :ivar width: The desired output image width. :vartype width: str :ivar height: The desired output image height. :vartype height: str """ _attribute_map = { 'mode': {'key': 'mode', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'height': {'key': 'height', 'type': 'str'}, } def __init__( self, *, mode: Optional[Union[str, "ImageScaleMode"]] = None, width: Optional[str] = None, height: Optional[str] = None, **kwargs ): """ :keyword mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible values include: "preserveAspectRatio", "pad", "stretch". :paramtype mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode :keyword width: The desired output image width. :paramtype width: str :keyword height: The desired output image height. :paramtype height: str """ super(ImageScale, self).__init__(**kwargs) self.mode = mode self.width = width self.height = height
[docs]class IotHubDeviceConnection(msrest.serialization.Model): """Information that enables communication between the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between the two. All required parameters must be populated in order to send to Azure. :ivar device_id: Required. The name of the IoT device configured and managed in IoT Hub. (case-sensitive). :vartype device_id: str :ivar credentials: IoT device connection credentials. Currently IoT device symmetric key credentials are supported. :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase """ _validation = { 'device_id': {'required': True}, } _attribute_map = { 'device_id': {'key': 'deviceId', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, } def __init__( self, *, device_id: str, credentials: Optional["CredentialsBase"] = None, **kwargs ): """ :keyword device_id: Required. The name of the IoT device configured and managed in IoT Hub. (case-sensitive). :paramtype device_id: str :keyword credentials: IoT device connection credentials. Currently IoT device symmetric key credentials are supported. :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase """ super(IotHubDeviceConnection, self).__init__(**kwargs) self.device_id = device_id self.credentials = credentials
[docs]class IotHubMessageSink(SinkNodeBase): """IoT Hub Message sink allows for pipeline messages to published into the IoT Edge Hub. Published messages can then be delivered to the cloud and other modules via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be published. :vartype hub_output_name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'hub_output_name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], hub_output_name: str, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be published. :paramtype hub_output_name: str """ super(IotHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSink' # type: str self.hub_output_name = hub_output_name
[docs]class SourceNodeBase(msrest.serialization.Model): """Base class for topology source nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: IotHubMessageSource, RtspSource. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.IotHubMessageSource': 'IotHubMessageSource', '#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource'} } def __init__( self, *, name: str, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str """ super(SourceNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name
[docs]class IotHubMessageSource(SourceNodeBase): """IoT Hub Message source allows for the pipeline to consume messages from the IoT Edge Hub. Messages can be routed from other IoT modules via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. :vartype hub_input_name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, } def __init__( self, *, name: str, hub_input_name: Optional[str] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. :paramtype hub_input_name: str """ super(IotHubMessageSource, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSource' # type: str self.hub_input_name = hub_input_name
[docs]class LineCrossingProcessor(ProcessorNodeBase): """Line crossing processor allows for the detection of tracked objects moving across one or more predefined lines. It must be downstream of an object tracker of downstream on an AI extension node that generates sequenceId for objects which are tracked across different frames of the video. Inference events are generated every time objects crosses from one side of the line to another. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar lines: Required. An array of lines used to compute line crossing events. :vartype lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'lines': {'key': 'lines', 'type': '[NamedLineBase]'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], lines: List["NamedLineBase"], **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword lines: Required. An array of lines used to compute line crossing events. :paramtype lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] """ super(LineCrossingProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.LineCrossingProcessor' # type: str self.lines = lines
[docs]class LivePipeline(msrest.serialization.Model): """Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. All required parameters must be populated in order to send to Azure. :ivar name: Required. Live pipeline unique identifier. :vartype name: str :ivar system_data: Read-only system metadata associated with this object. :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :ivar properties: Live pipeline properties. :vartype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'LivePipelineProperties'}, } def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["LivePipelineProperties"] = None, **kwargs ): """ :keyword name: Required. Live pipeline unique identifier. :paramtype name: str :keyword system_data: Read-only system metadata associated with this object. :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :keyword properties: Live pipeline properties. :paramtype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ super(LivePipeline, self).__init__(**kwargs) self.name = name self.system_data = system_data self.properties = properties
[docs]class MethodRequest(msrest.serialization.Model): """Base class for direct method calls. You probably want to use the sub-classes and not this class directly. Known sub-classes are: LivePipelineSetRequestBody, MethodRequestEmptyBodyBase, PipelineTopologySetRequestBody, RemoteDeviceAdapterSetRequestBody, LivePipelineListRequest, LivePipelineSetRequest, OnvifDeviceDiscoverRequest, OnvifDeviceGetRequest, PipelineTopologyListRequest, PipelineTopologySetRequest, RemoteDeviceAdapterListRequest, RemoteDeviceAdapterSetRequest. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } _subtype_map = { 'method_name': {'LivePipelineSetRequestBody': 'LivePipelineSetRequestBody', 'MethodRequestEmptyBodyBase': 'MethodRequestEmptyBodyBase', 'PipelineTopologySetRequestBody': 'PipelineTopologySetRequestBody', 'RemoteDeviceAdapterSetRequestBody': 'RemoteDeviceAdapterSetRequestBody', 'livePipelineList': 'LivePipelineListRequest', 'livePipelineSet': 'LivePipelineSetRequest', 'onvifDeviceDiscover': 'OnvifDeviceDiscoverRequest', 'onvifDeviceGet': 'OnvifDeviceGetRequest', 'pipelineTopologyList': 'PipelineTopologyListRequest', 'pipelineTopologySet': 'PipelineTopologySetRequest', 'remoteDeviceAdapterList': 'RemoteDeviceAdapterListRequest', 'remoteDeviceAdapterSet': 'RemoteDeviceAdapterSetRequest'} } def __init__( self, *, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str """ super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] self.api_version = api_version
[docs]class MethodRequestEmptyBodyBase(MethodRequest): """MethodRequestEmptyBodyBase. You probably want to use the sub-classes and not this class directly. Known sub-classes are: LivePipelineActivateRequest, LivePipelineDeactivateRequest, LivePipelineDeleteRequest, LivePipelineGetRequest, PipelineTopologyDeleteRequest, PipelineTopologyGetRequest, RemoteDeviceAdapterDeleteRequest, RemoteDeviceAdapterGetRequest. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'method_name': {'livePipelineActivate': 'LivePipelineActivateRequest', 'livePipelineDeactivate': 'LivePipelineDeactivateRequest', 'livePipelineDelete': 'LivePipelineDeleteRequest', 'livePipelineGet': 'LivePipelineGetRequest', 'pipelineTopologyDelete': 'PipelineTopologyDeleteRequest', 'pipelineTopologyGet': 'PipelineTopologyGetRequest', 'remoteDeviceAdapterDelete': 'RemoteDeviceAdapterDeleteRequest', 'remoteDeviceAdapterGet': 'RemoteDeviceAdapterGetRequest'} } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(MethodRequestEmptyBodyBase, self).__init__(api_version=api_version, **kwargs) self.method_name = 'MethodRequestEmptyBodyBase' # type: str self.name = name
[docs]class LivePipelineActivateRequest(MethodRequestEmptyBodyBase): """Activates an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(LivePipelineActivateRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineActivate' # type: str
[docs]class LivePipelineCollection(msrest.serialization.Model): """A collection of live pipelines. :ivar value: List of live pipelines. :vartype value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] :ivar continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. :vartype continuation_token: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, } def __init__( self, *, value: Optional[List["LivePipeline"]] = None, continuation_token: Optional[str] = None, **kwargs ): """ :keyword value: List of live pipelines. :paramtype value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] :keyword continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. :paramtype continuation_token: str """ super(LivePipelineCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token
[docs]class LivePipelineDeactivateRequest(MethodRequestEmptyBodyBase): """Deactivates an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(LivePipelineDeactivateRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineDeactivate' # type: str
[docs]class LivePipelineDeleteRequest(MethodRequestEmptyBodyBase): """Deletes an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(LivePipelineDeleteRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineDelete' # type: str
[docs]class LivePipelineGetRequest(MethodRequestEmptyBodyBase): """Retrieves an existing live pipeline. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(LivePipelineGetRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineGet' # type: str
[docs]class LivePipelineListRequest(MethodRequest): """List all existing live pipelines. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } def __init__( self, *, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str """ super(LivePipelineListRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'livePipelineList' # type: str
[docs]class LivePipelineProperties(msrest.serialization.Model): """Live pipeline properties. :ivar description: An optional description of the live pipeline. :vartype description: str :ivar topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :vartype topology_name: str :ivar parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :vartype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] :ivar state: Current pipeline state (read-only). Possible values include: "inactive", "activating", "active", "deactivating". :vartype state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'topology_name': {'key': 'topologyName', 'type': 'str'}, 'parameters': {'key': 'parameters', 'type': '[ParameterDefinition]'}, 'state': {'key': 'state', 'type': 'str'}, } def __init__( self, *, description: Optional[str] = None, topology_name: Optional[str] = None, parameters: Optional[List["ParameterDefinition"]] = None, state: Optional[Union[str, "LivePipelineState"]] = None, **kwargs ): """ :keyword description: An optional description of the live pipeline. :paramtype description: str :keyword topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :paramtype topology_name: str :keyword parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :paramtype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] :keyword state: Current pipeline state (read-only). Possible values include: "inactive", "activating", "active", "deactivating". :paramtype state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState """ super(LivePipelineProperties, self).__init__(**kwargs) self.description = description self.topology_name = topology_name self.parameters = parameters self.state = state
[docs]class LivePipelineSetRequest(MethodRequest): """Creates a new live pipeline or updates an existing one. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. :vartype live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'live_pipeline': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'live_pipeline': {'key': 'livePipeline', 'type': 'LivePipeline'}, } def __init__( self, *, live_pipeline: "LivePipeline", api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. :paramtype live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline """ super(LivePipelineSetRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'livePipelineSet' # type: str self.live_pipeline = live_pipeline
[docs]class LivePipelineSetRequestBody(LivePipeline, MethodRequest): """Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Live pipeline unique identifier. :vartype name: str :ivar system_data: Read-only system metadata associated with this object. :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :ivar properties: Live pipeline properties. :vartype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'LivePipelineProperties'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", system_data: Optional["SystemData"] = None, properties: Optional["LivePipelineProperties"] = None, **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Live pipeline unique identifier. :paramtype name: str :keyword system_data: Read-only system metadata associated with this object. :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :keyword properties: Live pipeline properties. :paramtype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ super(LivePipelineSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, api_version=api_version, **kwargs) self.method_name = 'LivePipelineSetRequestBody' # type: str self.api_version = api_version self.method_name = 'LivePipelineSetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties
[docs]class MediaProfile(msrest.serialization.Model): """Class representing the ONVIF MediaProfiles. :ivar name: The name of the Media Profile. :vartype name: str :ivar media_uri: Object representing the URI that will be used to request for media streaming. :vartype media_uri: any :ivar video_encoder_configuration: The Video encoder configuration. :vartype video_encoder_configuration: ~azure.media.videoanalyzer.edge.models.VideoEncoderConfiguration """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'media_uri': {'key': 'mediaUri', 'type': 'object'}, 'video_encoder_configuration': {'key': 'videoEncoderConfiguration', 'type': 'VideoEncoderConfiguration'}, } def __init__( self, *, name: Optional[str] = None, media_uri: Optional[Any] = None, video_encoder_configuration: Optional["VideoEncoderConfiguration"] = None, **kwargs ): """ :keyword name: The name of the Media Profile. :paramtype name: str :keyword media_uri: Object representing the URI that will be used to request for media streaming. :paramtype media_uri: any :keyword video_encoder_configuration: The Video encoder configuration. :paramtype video_encoder_configuration: ~azure.media.videoanalyzer.edge.models.VideoEncoderConfiguration """ super(MediaProfile, self).__init__(**kwargs) self.name = name self.media_uri = media_uri self.video_encoder_configuration = video_encoder_configuration
[docs]class MediaUri(msrest.serialization.Model): """Object representing the URI that will be used to request for media streaming. :ivar uri: URI that can be used for media streaming. :vartype uri: str """ _attribute_map = { 'uri': {'key': 'uri', 'type': 'str'}, } def __init__( self, *, uri: Optional[str] = None, **kwargs ): """ :keyword uri: URI that can be used for media streaming. :paramtype uri: str """ super(MediaUri, self).__init__(**kwargs) self.uri = uri
[docs]class MotionDetectionProcessor(ProcessorNodeBase): """Motion detection processor allows for motion detection on the video stream. It generates motion events whenever motion is present on the video. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: "low", "medium", "high". :vartype sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity :ivar output_motion_region: Indicates whether the processor should detect and output the regions within the video frame where motion was detected. Default is true. :vartype output_motion_region: bool :ivar event_aggregation_window: Time window duration on which events are aggregated before being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 seconds). Use 0 seconds for no aggregation. Default is 1 second. :vartype event_aggregation_window: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, 'event_aggregation_window': {'key': 'eventAggregationWindow', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], sensitivity: Optional[Union[str, "MotionDetectionSensitivity"]] = None, output_motion_region: Optional[bool] = None, event_aggregation_window: Optional[str] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: "low", "medium", "high". :paramtype sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity :keyword output_motion_region: Indicates whether the processor should detect and output the regions within the video frame where motion was detected. Default is true. :paramtype output_motion_region: bool :keyword event_aggregation_window: Time window duration on which events are aggregated before being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 seconds). Use 0 seconds for no aggregation. Default is 1 second. :paramtype event_aggregation_window: str """ super(MotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.MotionDetectionProcessor' # type: str self.sensitivity = sensitivity self.output_motion_region = output_motion_region self.event_aggregation_window = event_aggregation_window
[docs]class MPEG4Configuration(msrest.serialization.Model): """Class representing the MPEG4 Configuration. :ivar gov_length: Group of Video frames length. :vartype gov_length: float :ivar profile: The MPEG4 Profile. Possible values include: "SP", "ASP". :vartype profile: str or ~azure.media.videoanalyzer.edge.models.MPEG4Profile """ _attribute_map = { 'gov_length': {'key': 'govLength', 'type': 'float'}, 'profile': {'key': 'profile', 'type': 'str'}, } def __init__( self, *, gov_length: Optional[float] = None, profile: Optional[Union[str, "MPEG4Profile"]] = None, **kwargs ): """ :keyword gov_length: Group of Video frames length. :paramtype gov_length: float :keyword profile: The MPEG4 Profile. Possible values include: "SP", "ASP". :paramtype profile: str or ~azure.media.videoanalyzer.edge.models.MPEG4Profile """ super(MPEG4Configuration, self).__init__(**kwargs) self.gov_length = gov_length self.profile = profile
[docs]class NamedLineBase(msrest.serialization.Model): """Base class for named lines. You probably want to use the sub-classes and not this class directly. Known sub-classes are: NamedLineString. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Line name. Must be unique within the node. :vartype name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.NamedLineString': 'NamedLineString'} } def __init__( self, *, name: str, **kwargs ): """ :keyword name: Required. Line name. Must be unique within the node. :paramtype name: str """ super(NamedLineBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name
[docs]class NamedLineString(NamedLineBase): """Describes a line configuration. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Line name. Must be unique within the node. :vartype name: str :ivar line: Required. Point coordinates for the line start and end, respectively. Example: '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. :vartype line: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'line': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'line': {'key': 'line', 'type': 'str'}, } def __init__( self, *, name: str, line: str, **kwargs ): """ :keyword name: Required. Line name. Must be unique within the node. :paramtype name: str :keyword line: Required. Point coordinates for the line start and end, respectively. Example: '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. :paramtype line: str """ super(NamedLineString, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedLineString' # type: str self.line = line
[docs]class NamedPolygonBase(msrest.serialization.Model): """Describes the named polygon. You probably want to use the sub-classes and not this class directly. Known sub-classes are: NamedPolygonString. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Polygon name. Must be unique within the node. :vartype name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.NamedPolygonString': 'NamedPolygonString'} } def __init__( self, *, name: str, **kwargs ): """ :keyword name: Required. Polygon name. Must be unique within the node. :paramtype name: str """ super(NamedPolygonBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name
[docs]class NamedPolygonString(NamedPolygonBase): """Describes a closed polygon configuration. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Polygon name. Must be unique within the node. :vartype name: str :ivar polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. :vartype polygon: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'polygon': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'polygon': {'key': 'polygon', 'type': 'str'}, } def __init__( self, *, name: str, polygon: str, **kwargs ): """ :keyword name: Required. Polygon name. Must be unique within the node. :paramtype name: str :keyword polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. :paramtype polygon: str """ super(NamedPolygonString, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedPolygonString' # type: str self.polygon = polygon
[docs]class NodeInput(msrest.serialization.Model): """Describes an input signal to be used on a pipeline node. All required parameters must be populated in order to send to Azure. :ivar node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. :vartype node_name: str :ivar output_selectors: Allows for the selection of specific data streams (eg. video only) from another node. :vartype output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] """ _validation = { 'node_name': {'required': True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, 'output_selectors': {'key': 'outputSelectors', 'type': '[OutputSelector]'}, } def __init__( self, *, node_name: str, output_selectors: Optional[List["OutputSelector"]] = None, **kwargs ): """ :keyword node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. :paramtype node_name: str :keyword output_selectors: Allows for the selection of specific data streams (eg. video only) from another node. :paramtype output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] """ super(NodeInput, self).__init__(**kwargs) self.node_name = node_name self.output_selectors = output_selectors
[docs]class ObjectTrackingProcessor(ProcessorNodeBase): """Object tracker processor allows for continuous tracking of one of more objects over a finite sequence of video frames. It must be used downstream of an object detector extension node, thus allowing for the extension to be configured to to perform inferences on sparse frames through the use of the 'maximumSamplesPerSecond' sampling property. The object tracker node will then track the detected objects over the frames in which the detector is not invoked resulting on a smother tracking of detected objects across the continuum of video frames. The tracker will stop tracking objects which are not subsequently detected by the upstream detector on the subsequent detections. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in average. Possible values include: "low", "medium", "high". :vartype accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'accuracy': {'key': 'accuracy', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], accuracy: Optional[Union[str, "ObjectTrackingAccuracy"]] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in average. Possible values include: "low", "medium", "high". :paramtype accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy """ super(ObjectTrackingProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.ObjectTrackingProcessor' # type: str self.accuracy = accuracy
[docs]class OnvifDevice(msrest.serialization.Model): """The ONVIF device properties. :ivar hostname: The hostname of the ONVIF device. :vartype hostname: ~azure.media.videoanalyzer.edge.models.OnvifHostName :ivar system_date_time: The system date and time of the ONVIF device. :vartype system_date_time: ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTime :ivar dns: The ONVIF device DNS properties. :vartype dns: ~azure.media.videoanalyzer.edge.models.OnvifDns :ivar media_profiles: An array of of ONVIF media profiles supported by the ONVIF device. :vartype media_profiles: list[~azure.media.videoanalyzer.edge.models.MediaProfile] """ _attribute_map = { 'hostname': {'key': 'hostname', 'type': 'OnvifHostName'}, 'system_date_time': {'key': 'systemDateTime', 'type': 'OnvifSystemDateTime'}, 'dns': {'key': 'dns', 'type': 'OnvifDns'}, 'media_profiles': {'key': 'mediaProfiles', 'type': '[MediaProfile]'}, } def __init__( self, *, hostname: Optional["OnvifHostName"] = None, system_date_time: Optional["OnvifSystemDateTime"] = None, dns: Optional["OnvifDns"] = None, media_profiles: Optional[List["MediaProfile"]] = None, **kwargs ): """ :keyword hostname: The hostname of the ONVIF device. :paramtype hostname: ~azure.media.videoanalyzer.edge.models.OnvifHostName :keyword system_date_time: The system date and time of the ONVIF device. :paramtype system_date_time: ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTime :keyword dns: The ONVIF device DNS properties. :paramtype dns: ~azure.media.videoanalyzer.edge.models.OnvifDns :keyword media_profiles: An array of of ONVIF media profiles supported by the ONVIF device. :paramtype media_profiles: list[~azure.media.videoanalyzer.edge.models.MediaProfile] """ super(OnvifDevice, self).__init__(**kwargs) self.hostname = hostname self.system_date_time = system_date_time self.dns = dns self.media_profiles = media_profiles
[docs]class OnvifDeviceDiscoverRequest(MethodRequest): """Lists all the discoverable ONVIF devices on the same subnet as the Edge Module. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar discovery_duration: The amount of time that the ONVIF device discovery will wait for supported device responses. :vartype discovery_duration: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'discovery_duration': {'key': 'discoveryDuration', 'type': 'str'}, } def __init__( self, *, api_version: Optional[str] = "1.1", discovery_duration: Optional[str] = None, **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword discovery_duration: The amount of time that the ONVIF device discovery will wait for supported device responses. :paramtype discovery_duration: str """ super(OnvifDeviceDiscoverRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'onvifDeviceDiscover' # type: str self.discovery_duration = discovery_duration
[docs]class OnvifDeviceGetRequest(MethodRequest): """Retrieves properties and media profiles of an ONVIF device. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar endpoint: Required. Base class for endpoints. :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'endpoint': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, *, endpoint: "EndpointBase", api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword endpoint: Required. Base class for endpoints. :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase """ super(OnvifDeviceGetRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'onvifDeviceGet' # type: str self.endpoint = endpoint
[docs]class OnvifDns(msrest.serialization.Model): """The ONVIF device DNS properties. :ivar from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. :vartype from_dhcp: bool :ivar ipv4_address: An array of IPv4 address for the discovered ONVIF device. :vartype ipv4_address: list[str] :ivar ipv6_address: An array of IPv6 address for the discovered ONVIF device. :vartype ipv6_address: list[str] """ _attribute_map = { 'from_dhcp': {'key': 'fromDhcp', 'type': 'bool'}, 'ipv4_address': {'key': 'ipv4Address', 'type': '[str]'}, 'ipv6_address': {'key': 'ipv6Address', 'type': '[str]'}, } def __init__( self, *, from_dhcp: Optional[bool] = None, ipv4_address: Optional[List[str]] = None, ipv6_address: Optional[List[str]] = None, **kwargs ): """ :keyword from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. :paramtype from_dhcp: bool :keyword ipv4_address: An array of IPv4 address for the discovered ONVIF device. :paramtype ipv4_address: list[str] :keyword ipv6_address: An array of IPv6 address for the discovered ONVIF device. :paramtype ipv6_address: list[str] """ super(OnvifDns, self).__init__(**kwargs) self.from_dhcp = from_dhcp self.ipv4_address = ipv4_address self.ipv6_address = ipv6_address
[docs]class OnvifHostName(msrest.serialization.Model): """The ONVIF device DNS properties. :ivar from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. :vartype from_dhcp: bool :ivar hostname: The hostname of the ONVIF device. :vartype hostname: str """ _attribute_map = { 'from_dhcp': {'key': 'fromDhcp', 'type': 'bool'}, 'hostname': {'key': 'hostname', 'type': 'str'}, } def __init__( self, *, from_dhcp: Optional[bool] = None, hostname: Optional[str] = None, **kwargs ): """ :keyword from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. :paramtype from_dhcp: bool :keyword hostname: The hostname of the ONVIF device. :paramtype hostname: str """ super(OnvifHostName, self).__init__(**kwargs) self.from_dhcp = from_dhcp self.hostname = hostname
[docs]class OnvifSystemDateTime(msrest.serialization.Model): """The ONVIF device DNS properties. :ivar type: An enum value determining whether the date time was configured using NTP or manual. Possible values include: "Ntp", "Manual". :vartype type: str or ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTimeType :ivar time: The device datetime returned when calling the request. :vartype time: str :ivar time_zone: The timezone of the ONVIF device datetime. :vartype time_zone: str """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'time': {'key': 'time', 'type': 'str'}, 'time_zone': {'key': 'timeZone', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, "OnvifSystemDateTimeType"]] = None, time: Optional[str] = None, time_zone: Optional[str] = None, **kwargs ): """ :keyword type: An enum value determining whether the date time was configured using NTP or manual. Possible values include: "Ntp", "Manual". :paramtype type: str or ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTimeType :keyword time: The device datetime returned when calling the request. :paramtype time: str :keyword time_zone: The timezone of the ONVIF device datetime. :paramtype time_zone: str """ super(OnvifSystemDateTime, self).__init__(**kwargs) self.type = type self.time = time self.time_zone = time_zone
[docs]class OutputSelector(msrest.serialization.Model): """Allows for the selection of particular streams from another node. :ivar property: The property of the data stream to be used as the selection criteria. Possible values include: "mediaType". :vartype property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty :ivar operator: The operator to compare properties by. Possible values include: "is", "isNot". :vartype operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator :ivar value: Value to compare against. :vartype value: str """ _attribute_map = { 'property': {'key': 'property', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, *, property: Optional[Union[str, "OutputSelectorProperty"]] = None, operator: Optional[Union[str, "OutputSelectorOperator"]] = None, value: Optional[str] = None, **kwargs ): """ :keyword property: The property of the data stream to be used as the selection criteria. Possible values include: "mediaType". :paramtype property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty :keyword operator: The operator to compare properties by. Possible values include: "is", "isNot". :paramtype operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator :keyword value: Value to compare against. :paramtype value: str """ super(OutputSelector, self).__init__(**kwargs) self.property = property self.operator = operator self.value = value
[docs]class ParameterDeclaration(msrest.serialization.Model): """Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipeline instances. All required parameters must be populated in order to send to Azure. :ivar name: Required. Name of the parameter. :vartype name: str :ivar type: Required. Type of the parameter. Possible values include: "string", "secretString", "int", "double", "bool". :vartype type: str or ~azure.media.videoanalyzer.edge.models.ParameterType :ivar description: Description of the parameter. :vartype description: str :ivar default: The default value for the parameter to be used if the live pipeline does not specify a value. :vartype default: str """ _validation = { 'name': {'required': True, 'max_length': 64, 'min_length': 0}, 'type': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, } def __init__( self, *, name: str, type: Union[str, "ParameterType"], description: Optional[str] = None, default: Optional[str] = None, **kwargs ): """ :keyword name: Required. Name of the parameter. :paramtype name: str :keyword type: Required. Type of the parameter. Possible values include: "string", "secretString", "int", "double", "bool". :paramtype type: str or ~azure.media.videoanalyzer.edge.models.ParameterType :keyword description: Description of the parameter. :paramtype description: str :keyword default: The default value for the parameter to be used if the live pipeline does not specify a value. :paramtype default: str """ super(ParameterDeclaration, self).__init__(**kwargs) self.name = name self.type = type self.description = description self.default = default
[docs]class ParameterDefinition(msrest.serialization.Model): """Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information. All required parameters must be populated in order to send to Azure. :ivar name: Required. Name of the parameter declared in the pipeline topology. :vartype name: str :ivar value: Parameter value to be applied on this specific live pipeline. :vartype value: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, *, name: str, value: Optional[str] = None, **kwargs ): """ :keyword name: Required. Name of the parameter declared in the pipeline topology. :paramtype name: str :keyword value: Parameter value to be applied on this specific live pipeline. :paramtype value: str """ super(ParameterDefinition, self).__init__(**kwargs) self.name = name self.value = value
[docs]class PemCertificateList(CertificateSource): """A list of PEM formatted certificates. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar certificates: Required. PEM formatted public certificates. One certificate per entry. :vartype certificates: list[str] """ _validation = { 'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__( self, *, certificates: List[str], **kwargs ): """ :keyword certificates: Required. PEM formatted public certificates. One certificate per entry. :paramtype certificates: list[str] """ super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = certificates
[docs]class PipelineTopology(msrest.serialization.Model): """Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. All required parameters must be populated in order to send to Azure. :ivar name: Required. Pipeline topology unique identifier. :vartype name: str :ivar system_data: Read-only system metadata associated with this object. :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :ivar properties: Pipeline topology properties. :vartype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'PipelineTopologyProperties'}, } def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["PipelineTopologyProperties"] = None, **kwargs ): """ :keyword name: Required. Pipeline topology unique identifier. :paramtype name: str :keyword system_data: Read-only system metadata associated with this object. :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :keyword properties: Pipeline topology properties. :paramtype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ super(PipelineTopology, self).__init__(**kwargs) self.name = name self.system_data = system_data self.properties = properties
[docs]class PipelineTopologyCollection(msrest.serialization.Model): """A collection of pipeline topologies. :ivar value: List of pipeline topologies. :vartype value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] :ivar continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. :vartype continuation_token: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, } def __init__( self, *, value: Optional[List["PipelineTopology"]] = None, continuation_token: Optional[str] = None, **kwargs ): """ :keyword value: List of pipeline topologies. :paramtype value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] :keyword continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. :paramtype continuation_token: str """ super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token
[docs]class PipelineTopologyDeleteRequest(MethodRequestEmptyBodyBase): """Deletes an existing pipeline topology. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(PipelineTopologyDeleteRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'pipelineTopologyDelete' # type: str
[docs]class PipelineTopologyGetRequest(MethodRequestEmptyBodyBase): """Retrieves an existing pipeline topology. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(PipelineTopologyGetRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'pipelineTopologyGet' # type: str
[docs]class PipelineTopologyListRequest(MethodRequest): """List all existing pipeline topologies. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } def __init__( self, *, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str """ super(PipelineTopologyListRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'pipelineTopologyList' # type: str
[docs]class PipelineTopologyProperties(msrest.serialization.Model): """Pipeline topology properties. :ivar description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :vartype description: str :ivar parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :vartype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] :ivar sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :vartype sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] :ivar processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :vartype processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] :ivar sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :vartype sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'parameters': {'key': 'parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, *, description: Optional[str] = None, parameters: Optional[List["ParameterDeclaration"]] = None, sources: Optional[List["SourceNodeBase"]] = None, processors: Optional[List["ProcessorNodeBase"]] = None, sinks: Optional[List["SinkNodeBase"]] = None, **kwargs ): """ :keyword description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :paramtype description: str :keyword parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :paramtype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] :keyword sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :paramtype sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] :keyword processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :paramtype processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] :keyword sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :paramtype sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] """ super(PipelineTopologyProperties, self).__init__(**kwargs) self.description = description self.parameters = parameters self.sources = sources self.processors = processors self.sinks = sinks
[docs]class PipelineTopologySetRequest(MethodRequest): """Creates a new pipeline topology or updates an existing one. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar pipeline_topology: Required. Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. :vartype pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'pipeline_topology': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'pipeline_topology': {'key': 'pipelineTopology', 'type': 'PipelineTopology'}, } def __init__( self, *, pipeline_topology: "PipelineTopology", api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword pipeline_topology: Required. Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. :paramtype pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology """ super(PipelineTopologySetRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'pipelineTopologySet' # type: str self.pipeline_topology = pipeline_topology
[docs]class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): """Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Pipeline topology unique identifier. :vartype name: str :ivar system_data: Read-only system metadata associated with this object. :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :ivar properties: Pipeline topology properties. :vartype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'PipelineTopologyProperties'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", system_data: Optional["SystemData"] = None, properties: Optional["PipelineTopologyProperties"] = None, **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Pipeline topology unique identifier. :paramtype name: str :keyword system_data: Read-only system metadata associated with this object. :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :keyword properties: Pipeline topology properties. :paramtype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ super(PipelineTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, api_version=api_version, **kwargs) self.method_name = 'PipelineTopologySetRequestBody' # type: str self.api_version = api_version self.method_name = 'PipelineTopologySetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties
[docs]class RateControl(msrest.serialization.Model): """Class representing the video's rate control. :ivar bit_rate_limit: the maximum output bitrate in kbps. :vartype bit_rate_limit: float :ivar encoding_interval: Interval at which images are encoded and transmitted. :vartype encoding_interval: float :ivar frame_rate_limit: Maximum output framerate in fps. :vartype frame_rate_limit: float :ivar guaranteed_frame_rate: A value of true indicates that frame rate is a fixed value rather than an upper limit, and that the video encoder shall prioritize frame rate over all other adaptable configuration values such as bitrate. :vartype guaranteed_frame_rate: bool """ _attribute_map = { 'bit_rate_limit': {'key': 'bitRateLimit', 'type': 'float'}, 'encoding_interval': {'key': 'encodingInterval', 'type': 'float'}, 'frame_rate_limit': {'key': 'frameRateLimit', 'type': 'float'}, 'guaranteed_frame_rate': {'key': 'guaranteedFrameRate', 'type': 'bool'}, } def __init__( self, *, bit_rate_limit: Optional[float] = None, encoding_interval: Optional[float] = None, frame_rate_limit: Optional[float] = None, guaranteed_frame_rate: Optional[bool] = None, **kwargs ): """ :keyword bit_rate_limit: the maximum output bitrate in kbps. :paramtype bit_rate_limit: float :keyword encoding_interval: Interval at which images are encoded and transmitted. :paramtype encoding_interval: float :keyword frame_rate_limit: Maximum output framerate in fps. :paramtype frame_rate_limit: float :keyword guaranteed_frame_rate: A value of true indicates that frame rate is a fixed value rather than an upper limit, and that the video encoder shall prioritize frame rate over all other adaptable configuration values such as bitrate. :paramtype guaranteed_frame_rate: bool """ super(RateControl, self).__init__(**kwargs) self.bit_rate_limit = bit_rate_limit self.encoding_interval = encoding_interval self.frame_rate_limit = frame_rate_limit self.guaranteed_frame_rate = guaranteed_frame_rate
[docs]class RemoteDeviceAdapter(msrest.serialization.Model): """The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. All required parameters must be populated in order to send to Azure. :ivar name: Required. The unique identifier for the remote device adapter. :vartype name: str :ivar system_data: Read-only system metadata associated with this object. :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :ivar properties: Properties of the remote device adapter. :vartype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'RemoteDeviceAdapterProperties'}, } def __init__( self, *, name: str, system_data: Optional["SystemData"] = None, properties: Optional["RemoteDeviceAdapterProperties"] = None, **kwargs ): """ :keyword name: Required. The unique identifier for the remote device adapter. :paramtype name: str :keyword system_data: Read-only system metadata associated with this object. :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :keyword properties: Properties of the remote device adapter. :paramtype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties """ super(RemoteDeviceAdapter, self).__init__(**kwargs) self.name = name self.system_data = system_data self.properties = properties
[docs]class RemoteDeviceAdapterCollection(msrest.serialization.Model): """A list of remote device adapters. :ivar value: An array of remote device adapters. :vartype value: list[~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter] :ivar continuation_token: A continuation token to use in subsequent calls to enumerate through the remote device adapter collection. This is used when the collection contains too many results to return in one response. :vartype continuation_token: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[RemoteDeviceAdapter]'}, 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, } def __init__( self, *, value: Optional[List["RemoteDeviceAdapter"]] = None, continuation_token: Optional[str] = None, **kwargs ): """ :keyword value: An array of remote device adapters. :paramtype value: list[~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter] :keyword continuation_token: A continuation token to use in subsequent calls to enumerate through the remote device adapter collection. This is used when the collection contains too many results to return in one response. :paramtype continuation_token: str """ super(RemoteDeviceAdapterCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token
[docs]class RemoteDeviceAdapterDeleteRequest(MethodRequestEmptyBodyBase): """Deletes an existing remote device adapter. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(RemoteDeviceAdapterDeleteRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'remoteDeviceAdapterDelete' # type: str
[docs]class RemoteDeviceAdapterGetRequest(MethodRequestEmptyBodyBase): """Retrieves an existing remote device adapter. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. Resource name. :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. Resource name. :paramtype name: str """ super(RemoteDeviceAdapterGetRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'remoteDeviceAdapterGet' # type: str
[docs]class RemoteDeviceAdapterListRequest(MethodRequest): """List all existing remote device adapters. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, } def __init__( self, *, api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str """ super(RemoteDeviceAdapterListRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'remoteDeviceAdapterList' # type: str
[docs]class RemoteDeviceAdapterProperties(msrest.serialization.Model): """Remote device adapter properties. All required parameters must be populated in order to send to Azure. :ivar description: An optional description for the remote device adapter. :vartype description: str :ivar target: Required. The IoT device to which this remote device will connect. :vartype target: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterTarget :ivar iot_hub_device_connection: Required. Information that enables communication between the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between the two. :vartype iot_hub_device_connection: ~azure.media.videoanalyzer.edge.models.IotHubDeviceConnection """ _validation = { 'target': {'required': True}, 'iot_hub_device_connection': {'required': True}, } _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'target': {'key': 'target', 'type': 'RemoteDeviceAdapterTarget'}, 'iot_hub_device_connection': {'key': 'iotHubDeviceConnection', 'type': 'IotHubDeviceConnection'}, } def __init__( self, *, target: "RemoteDeviceAdapterTarget", iot_hub_device_connection: "IotHubDeviceConnection", description: Optional[str] = None, **kwargs ): """ :keyword description: An optional description for the remote device adapter. :paramtype description: str :keyword target: Required. The IoT device to which this remote device will connect. :paramtype target: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterTarget :keyword iot_hub_device_connection: Required. Information that enables communication between the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between the two. :paramtype iot_hub_device_connection: ~azure.media.videoanalyzer.edge.models.IotHubDeviceConnection """ super(RemoteDeviceAdapterProperties, self).__init__(**kwargs) self.description = description self.target = target self.iot_hub_device_connection = iot_hub_device_connection
[docs]class RemoteDeviceAdapterSetRequest(MethodRequest): """Creates a new remote device adapter or updates an existing one. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar remote_device_adapter: Required. The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. :vartype remote_device_adapter: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'remote_device_adapter': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'remote_device_adapter': {'key': 'remoteDeviceAdapter', 'type': 'RemoteDeviceAdapter'}, } def __init__( self, *, remote_device_adapter: "RemoteDeviceAdapter", api_version: Optional[str] = "1.1", **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword remote_device_adapter: Required. The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. :paramtype remote_device_adapter: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter """ super(RemoteDeviceAdapterSetRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'remoteDeviceAdapterSet' # type: str self.remote_device_adapter = remote_device_adapter
[docs]class RemoteDeviceAdapterSetRequestBody(RemoteDeviceAdapter, MethodRequest): """The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :vartype api_version: str :ivar name: Required. The unique identifier for the remote device adapter. :vartype name: str :ivar system_data: Read-only system metadata associated with this object. :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :ivar properties: Properties of the remote device adapter. :vartype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, 'name': {'required': True}, } _attribute_map = { 'method_name': {'key': 'methodName', 'type': 'str'}, 'api_version': {'key': '@apiVersion', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'properties': {'key': 'properties', 'type': 'RemoteDeviceAdapterProperties'}, } def __init__( self, *, name: str, api_version: Optional[str] = "1.1", system_data: Optional["SystemData"] = None, properties: Optional["RemoteDeviceAdapterProperties"] = None, **kwargs ): """ :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are None and "1.1". The default value is "1.1". :paramtype api_version: str :keyword name: Required. The unique identifier for the remote device adapter. :paramtype name: str :keyword system_data: Read-only system metadata associated with this object. :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData :keyword properties: Properties of the remote device adapter. :paramtype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties """ super(RemoteDeviceAdapterSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, api_version=api_version, **kwargs) self.method_name = 'RemoteDeviceAdapterSetRequestBody' # type: str self.api_version = api_version self.method_name = 'RemoteDeviceAdapterSetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties
[docs]class RemoteDeviceAdapterTarget(msrest.serialization.Model): """Properties of the remote device adapter target. All required parameters must be populated in order to send to Azure. :ivar host: Required. Hostname or IP address of the remote device. :vartype host: str """ _validation = { 'host': {'required': True}, } _attribute_map = { 'host': {'key': 'host', 'type': 'str'}, } def __init__( self, *, host: str, **kwargs ): """ :keyword host: Required. Hostname or IP address of the remote device. :paramtype host: str """ super(RemoteDeviceAdapterTarget, self).__init__(**kwargs) self.host = host
[docs]class RtspSource(SourceNodeBase): """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a live pipeline. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "http", "tcp". :vartype transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport :ivar endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, *, name: str, endpoint: "EndpointBase", transport: Optional[Union[str, "RtspTransport"]] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "http", "tcp". :paramtype transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport :keyword endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase """ super(RtspSource, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = transport self.endpoint = endpoint
[docs]class SamplingOptions(msrest.serialization.Model): """Defines how often media is submitted to the extension plugin. :ivar skip_samples_without_annotation: When set to 'true', prevents frames without upstream inference data to be sent to the extension plugin. This is useful to limit the frames sent to the extension to pre-analyzed frames only. For example, when used downstream from a motion detector, this can enable for only frames in which motion has been detected to be further analyzed. :vartype skip_samples_without_annotation: str :ivar maximum_samples_per_second: Maximum rate of samples submitted to the extension. This prevents an extension plugin to be overloaded with data. :vartype maximum_samples_per_second: str """ _attribute_map = { 'skip_samples_without_annotation': {'key': 'skipSamplesWithoutAnnotation', 'type': 'str'}, 'maximum_samples_per_second': {'key': 'maximumSamplesPerSecond', 'type': 'str'}, } def __init__( self, *, skip_samples_without_annotation: Optional[str] = None, maximum_samples_per_second: Optional[str] = None, **kwargs ): """ :keyword skip_samples_without_annotation: When set to 'true', prevents frames without upstream inference data to be sent to the extension plugin. This is useful to limit the frames sent to the extension to pre-analyzed frames only. For example, when used downstream from a motion detector, this can enable for only frames in which motion has been detected to be further analyzed. :paramtype skip_samples_without_annotation: str :keyword maximum_samples_per_second: Maximum rate of samples submitted to the extension. This prevents an extension plugin to be overloaded with data. :paramtype maximum_samples_per_second: str """ super(SamplingOptions, self).__init__(**kwargs) self.skip_samples_without_annotation = skip_samples_without_annotation self.maximum_samples_per_second = maximum_samples_per_second
[docs]class SignalGateProcessor(ProcessorNodeBase): """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. See https://aka.ms/ava-signalgate for more information. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :vartype activation_evaluation_window: str :ivar activation_signal_offset: Signal offset once the gate is activated (can be negative). It determines the how much farther behind of after the signal will be let through based on the activation time. A negative offset indicates that data prior the activation time must be included on the signal that is let through, once the gate is activated. When used upstream of a file or video sink, this allows for scenarios such as recording buffered media prior an event, such as: record video 5 seconds prior motions is detected. :vartype activation_signal_offset: str :ivar minimum_activation_time: The minimum period for which the gate remains open in the absence of subsequent triggers (events). When used upstream of a file or video sink, it determines the minimum length of the recorded video clip. :vartype minimum_activation_time: str :ivar maximum_activation_time: The maximum period for which the gate remains open in the presence of subsequent triggers (events). When used upstream of a file or video sink, it determines the maximum length of the recorded video clip. :vartype maximum_activation_time: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], activation_evaluation_window: Optional[str] = None, activation_signal_offset: Optional[str] = None, minimum_activation_time: Optional[str] = None, maximum_activation_time: Optional[str] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :paramtype activation_evaluation_window: str :keyword activation_signal_offset: Signal offset once the gate is activated (can be negative). It determines the how much farther behind of after the signal will be let through based on the activation time. A negative offset indicates that data prior the activation time must be included on the signal that is let through, once the gate is activated. When used upstream of a file or video sink, this allows for scenarios such as recording buffered media prior an event, such as: record video 5 seconds prior motions is detected. :paramtype activation_signal_offset: str :keyword minimum_activation_time: The minimum period for which the gate remains open in the absence of subsequent triggers (events). When used upstream of a file or video sink, it determines the minimum length of the recorded video clip. :paramtype minimum_activation_time: str :keyword maximum_activation_time: The maximum period for which the gate remains open in the presence of subsequent triggers (events). When used upstream of a file or video sink, it determines the maximum length of the recorded video clip. :paramtype maximum_activation_time: str """ super(SignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SignalGateProcessor' # type: str self.activation_evaluation_window = activation_evaluation_window self.activation_signal_offset = activation_signal_offset self.minimum_activation_time = minimum_activation_time self.maximum_activation_time = maximum_activation_time
[docs]class SpatialAnalysisOperationBase(msrest.serialization.Model): """Base class for Azure Cognitive Services Spatial Analysis operations. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SpatialAnalysisCustomOperation, SpatialAnalysisTypedOperationBase. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation': 'SpatialAnalysisCustomOperation', 'SpatialAnalysisTypedOperationBase': 'SpatialAnalysisTypedOperationBase'} } def __init__( self, **kwargs ): """ """ super(SpatialAnalysisOperationBase, self).__init__(**kwargs) self.type = None # type: Optional[str]
[docs]class SpatialAnalysisCustomOperation(SpatialAnalysisOperationBase): """Defines a Spatial Analysis custom operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar extension_configuration: Required. Custom configuration to pass to the Azure Cognitive Services Spatial Analysis module. :vartype extension_configuration: str """ _validation = { 'type': {'required': True}, 'extension_configuration': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( self, *, extension_configuration: str, **kwargs ): """ :keyword extension_configuration: Required. Custom configuration to pass to the Azure Cognitive Services Spatial Analysis module. :paramtype extension_configuration: str """ super(SpatialAnalysisCustomOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation' # type: str self.extension_configuration = extension_configuration
[docs]class SpatialAnalysisOperationEventBase(msrest.serialization.Model): """Defines the Azure Cognitive Services Spatial Analysis operation eventing configuration. :ivar threshold: The event threshold. :vartype threshold: str :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, **kwargs ): """ :keyword threshold: The event threshold. :paramtype threshold: str :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ super(SpatialAnalysisOperationEventBase, self).__init__(**kwargs) self.threshold = threshold self.focus = focus
[docs]class SpatialAnalysisPersonCountEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person count operation eventing configuration. :ivar threshold: The event threshold. :vartype threshold: str :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :ivar trigger: The event trigger type. Possible values include: "event", "interval". :vartype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger :ivar output_frequency: The event or interval output frequency. :vartype output_frequency: str """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, 'trigger': {'key': 'trigger', 'type': 'str'}, 'output_frequency': {'key': 'outputFrequency', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, trigger: Optional[Union[str, "SpatialAnalysisPersonCountEventTrigger"]] = None, output_frequency: Optional[str] = None, **kwargs ): """ :keyword threshold: The event threshold. :paramtype threshold: str :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :keyword trigger: The event trigger type. Possible values include: "event", "interval". :paramtype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger :keyword output_frequency: The event or interval output frequency. :paramtype output_frequency: str """ super(SpatialAnalysisPersonCountEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.trigger = trigger self.output_frequency = output_frequency
[docs]class SpatialAnalysisTypedOperationBase(SpatialAnalysisOperationBase): """Base class for Azure Cognitive Services Spatial Analysis typed operations. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SpatialAnalysisPersonCountOperation, SpatialAnalysisPersonDistanceOperation, SpatialAnalysisPersonLineCrossingOperation, SpatialAnalysisPersonZoneCrossingOperation. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar debug: If set to 'true', enables debugging mode for this operation. :vartype debug: str :ivar calibration_configuration: Advanced calibration configuration. :vartype calibration_configuration: str :ivar camera_configuration: Advanced camera configuration. :vartype camera_configuration: str :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. :vartype camera_calibrator_node_configuration: str :ivar detector_node_configuration: Advanced detector node configuration. :vartype detector_node_configuration: str :ivar tracker_node_configuration: Advanced tracker node configuration. :vartype tracker_node_configuration: str :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :vartype enable_face_mask_classifier: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation': 'SpatialAnalysisPersonCountOperation', '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation': 'SpatialAnalysisPersonDistanceOperation', '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation': 'SpatialAnalysisPersonLineCrossingOperation', '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation': 'SpatialAnalysisPersonZoneCrossingOperation'} } def __init__( self, *, debug: Optional[str] = None, calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): """ :keyword debug: If set to 'true', enables debugging mode for this operation. :paramtype debug: str :keyword calibration_configuration: Advanced calibration configuration. :paramtype calibration_configuration: str :keyword camera_configuration: Advanced camera configuration. :paramtype camera_configuration: str :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. :paramtype camera_calibrator_node_configuration: str :keyword detector_node_configuration: Advanced detector node configuration. :paramtype detector_node_configuration: str :keyword tracker_node_configuration: Advanced tracker node configuration. :paramtype tracker_node_configuration: str :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :paramtype enable_face_mask_classifier: str """ super(SpatialAnalysisTypedOperationBase, self).__init__(**kwargs) self.type = 'SpatialAnalysisTypedOperationBase' # type: str self.debug = debug self.calibration_configuration = calibration_configuration self.camera_configuration = camera_configuration self.camera_calibrator_node_configuration = camera_calibrator_node_configuration self.detector_node_configuration = detector_node_configuration self.tracker_node_configuration = tracker_node_configuration self.enable_face_mask_classifier = enable_face_mask_classifier
[docs]class SpatialAnalysisPersonCountOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person count operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar debug: If set to 'true', enables debugging mode for this operation. :vartype debug: str :ivar calibration_configuration: Advanced calibration configuration. :vartype calibration_configuration: str :ivar camera_configuration: Advanced camera configuration. :vartype camera_configuration: str :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. :vartype camera_calibrator_node_configuration: str :ivar detector_node_configuration: Advanced detector node configuration. :vartype detector_node_configuration: str :ivar tracker_node_configuration: Advanced tracker node configuration. :vartype tracker_node_configuration: str :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :vartype enable_face_mask_classifier: str :ivar zones: Required. The list of zones and optional events. :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] """ _validation = { 'type': {'required': True}, 'zones': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonCountZoneEvents]'}, } def __init__( self, *, zones: List["SpatialAnalysisPersonCountZoneEvents"], debug: Optional[str] = None, calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): """ :keyword debug: If set to 'true', enables debugging mode for this operation. :paramtype debug: str :keyword calibration_configuration: Advanced calibration configuration. :paramtype calibration_configuration: str :keyword camera_configuration: Advanced camera configuration. :paramtype camera_configuration: str :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. :paramtype camera_calibrator_node_configuration: str :keyword detector_node_configuration: Advanced detector node configuration. :paramtype detector_node_configuration: str :keyword tracker_node_configuration: Advanced tracker node configuration. :paramtype tracker_node_configuration: str :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :paramtype enable_face_mask_classifier: str :keyword zones: Required. The list of zones and optional events. :paramtype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] """ super(SpatialAnalysisPersonCountOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation' # type: str self.zones = zones
[docs]class SpatialAnalysisPersonCountZoneEvents(msrest.serialization.Model): """SpatialAnalysisPersonCountZoneEvents. All required parameters must be populated in order to send to Azure. :ivar zone: Required. The named zone. :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :ivar events: The event configuration. :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] """ _validation = { 'zone': {'required': True}, } _attribute_map = { 'zone': {'key': 'zone', 'type': 'NamedPolygonBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonCountEvent]'}, } def __init__( self, *, zone: "NamedPolygonBase", events: Optional[List["SpatialAnalysisPersonCountEvent"]] = None, **kwargs ): """ :keyword zone: Required. The named zone. :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :keyword events: The event configuration. :paramtype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] """ super(SpatialAnalysisPersonCountZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events
[docs]class SpatialAnalysisPersonDistanceEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person distance operation eventing configuration. :ivar threshold: The event threshold. :vartype threshold: str :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :ivar trigger: The event trigger type. Possible values include: "event", "interval". :vartype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger :ivar output_frequency: The event or interval output frequency. :vartype output_frequency: str :ivar minimum_distance_threshold: The minimum distance threshold. :vartype minimum_distance_threshold: str :ivar maximum_distance_threshold: The maximum distance threshold. :vartype maximum_distance_threshold: str """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, 'trigger': {'key': 'trigger', 'type': 'str'}, 'output_frequency': {'key': 'outputFrequency', 'type': 'str'}, 'minimum_distance_threshold': {'key': 'minimumDistanceThreshold', 'type': 'str'}, 'maximum_distance_threshold': {'key': 'maximumDistanceThreshold', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, trigger: Optional[Union[str, "SpatialAnalysisPersonDistanceEventTrigger"]] = None, output_frequency: Optional[str] = None, minimum_distance_threshold: Optional[str] = None, maximum_distance_threshold: Optional[str] = None, **kwargs ): """ :keyword threshold: The event threshold. :paramtype threshold: str :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :keyword trigger: The event trigger type. Possible values include: "event", "interval". :paramtype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger :keyword output_frequency: The event or interval output frequency. :paramtype output_frequency: str :keyword minimum_distance_threshold: The minimum distance threshold. :paramtype minimum_distance_threshold: str :keyword maximum_distance_threshold: The maximum distance threshold. :paramtype maximum_distance_threshold: str """ super(SpatialAnalysisPersonDistanceEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.trigger = trigger self.output_frequency = output_frequency self.minimum_distance_threshold = minimum_distance_threshold self.maximum_distance_threshold = maximum_distance_threshold
[docs]class SpatialAnalysisPersonDistanceOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person distance operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar debug: If set to 'true', enables debugging mode for this operation. :vartype debug: str :ivar calibration_configuration: Advanced calibration configuration. :vartype calibration_configuration: str :ivar camera_configuration: Advanced camera configuration. :vartype camera_configuration: str :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. :vartype camera_calibrator_node_configuration: str :ivar detector_node_configuration: Advanced detector node configuration. :vartype detector_node_configuration: str :ivar tracker_node_configuration: Advanced tracker node configuration. :vartype tracker_node_configuration: str :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :vartype enable_face_mask_classifier: str :ivar zones: Required. The list of zones with optional events. :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] """ _validation = { 'type': {'required': True}, 'zones': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonDistanceZoneEvents]'}, } def __init__( self, *, zones: List["SpatialAnalysisPersonDistanceZoneEvents"], debug: Optional[str] = None, calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): """ :keyword debug: If set to 'true', enables debugging mode for this operation. :paramtype debug: str :keyword calibration_configuration: Advanced calibration configuration. :paramtype calibration_configuration: str :keyword camera_configuration: Advanced camera configuration. :paramtype camera_configuration: str :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. :paramtype camera_calibrator_node_configuration: str :keyword detector_node_configuration: Advanced detector node configuration. :paramtype detector_node_configuration: str :keyword tracker_node_configuration: Advanced tracker node configuration. :paramtype tracker_node_configuration: str :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :paramtype enable_face_mask_classifier: str :keyword zones: Required. The list of zones with optional events. :paramtype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] """ super(SpatialAnalysisPersonDistanceOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation' # type: str self.zones = zones
[docs]class SpatialAnalysisPersonDistanceZoneEvents(msrest.serialization.Model): """SpatialAnalysisPersonDistanceZoneEvents. All required parameters must be populated in order to send to Azure. :ivar zone: Required. The named zone. :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :ivar events: The event configuration. :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] """ _validation = { 'zone': {'required': True}, } _attribute_map = { 'zone': {'key': 'zone', 'type': 'NamedPolygonBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonDistanceEvent]'}, } def __init__( self, *, zone: "NamedPolygonBase", events: Optional[List["SpatialAnalysisPersonDistanceEvent"]] = None, **kwargs ): """ :keyword zone: Required. The named zone. :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :keyword events: The event configuration. :paramtype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] """ super(SpatialAnalysisPersonDistanceZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events
[docs]class SpatialAnalysisPersonLineCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person line crossing operation eventing configuration. :ivar threshold: The event threshold. :vartype threshold: str :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, **kwargs ): """ :keyword threshold: The event threshold. :paramtype threshold: str :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ super(SpatialAnalysisPersonLineCrossingEvent, self).__init__(threshold=threshold, focus=focus, **kwargs)
[docs]class SpatialAnalysisPersonLineCrossingLineEvents(msrest.serialization.Model): """SpatialAnalysisPersonLineCrossingLineEvents. All required parameters must be populated in order to send to Azure. :ivar line: Required. The named line. :vartype line: ~azure.media.videoanalyzer.edge.models.NamedLineBase :ivar events: The event configuration. :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] """ _validation = { 'line': {'required': True}, } _attribute_map = { 'line': {'key': 'line', 'type': 'NamedLineBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonLineCrossingEvent]'}, } def __init__( self, *, line: "NamedLineBase", events: Optional[List["SpatialAnalysisPersonLineCrossingEvent"]] = None, **kwargs ): """ :keyword line: Required. The named line. :paramtype line: ~azure.media.videoanalyzer.edge.models.NamedLineBase :keyword events: The event configuration. :paramtype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] """ super(SpatialAnalysisPersonLineCrossingLineEvents, self).__init__(**kwargs) self.line = line self.events = events
[docs]class SpatialAnalysisPersonLineCrossingOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person line crossing operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar debug: If set to 'true', enables debugging mode for this operation. :vartype debug: str :ivar calibration_configuration: Advanced calibration configuration. :vartype calibration_configuration: str :ivar camera_configuration: Advanced camera configuration. :vartype camera_configuration: str :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. :vartype camera_calibrator_node_configuration: str :ivar detector_node_configuration: Advanced detector node configuration. :vartype detector_node_configuration: str :ivar tracker_node_configuration: Advanced tracker node configuration. :vartype tracker_node_configuration: str :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :vartype enable_face_mask_classifier: str :ivar lines: Required. The list of lines with optional events. :vartype lines: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] """ _validation = { 'type': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'lines': {'key': 'lines', 'type': '[SpatialAnalysisPersonLineCrossingLineEvents]'}, } def __init__( self, *, lines: List["SpatialAnalysisPersonLineCrossingLineEvents"], debug: Optional[str] = None, calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): """ :keyword debug: If set to 'true', enables debugging mode for this operation. :paramtype debug: str :keyword calibration_configuration: Advanced calibration configuration. :paramtype calibration_configuration: str :keyword camera_configuration: Advanced camera configuration. :paramtype camera_configuration: str :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. :paramtype camera_calibrator_node_configuration: str :keyword detector_node_configuration: Advanced detector node configuration. :paramtype detector_node_configuration: str :keyword tracker_node_configuration: Advanced tracker node configuration. :paramtype tracker_node_configuration: str :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :paramtype enable_face_mask_classifier: str :keyword lines: Required. The list of lines with optional events. :paramtype lines: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] """ super(SpatialAnalysisPersonLineCrossingOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation' # type: str self.lines = lines
[docs]class SpatialAnalysisPersonZoneCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person crossing zone operation eventing configuration. :ivar threshold: The event threshold. :vartype threshold: str :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :ivar event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". :vartype event_type: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType """ _attribute_map = { 'threshold': {'key': 'threshold', 'type': 'str'}, 'focus': {'key': 'focus', 'type': 'str'}, 'event_type': {'key': 'eventType', 'type': 'str'}, } def __init__( self, *, threshold: Optional[str] = None, focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, event_type: Optional[Union[str, "SpatialAnalysisPersonZoneCrossingEventType"]] = None, **kwargs ): """ :keyword threshold: The event threshold. :paramtype threshold: str :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus :keyword event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". :paramtype event_type: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType """ super(SpatialAnalysisPersonZoneCrossingEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.event_type = event_type
[docs]class SpatialAnalysisPersonZoneCrossingOperation(SpatialAnalysisTypedOperationBase): """Defines a Spatial Analysis person zone crossing operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information. All required parameters must be populated in order to send to Azure. :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar debug: If set to 'true', enables debugging mode for this operation. :vartype debug: str :ivar calibration_configuration: Advanced calibration configuration. :vartype calibration_configuration: str :ivar camera_configuration: Advanced camera configuration. :vartype camera_configuration: str :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. :vartype camera_calibrator_node_configuration: str :ivar detector_node_configuration: Advanced detector node configuration. :vartype detector_node_configuration: str :ivar tracker_node_configuration: Advanced tracker node configuration. :vartype tracker_node_configuration: str :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :vartype enable_face_mask_classifier: str :ivar zones: Required. The list of zones with optional events. :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] """ _validation = { 'type': {'required': True}, 'zones': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonZoneCrossingZoneEvents]'}, } def __init__( self, *, zones: List["SpatialAnalysisPersonZoneCrossingZoneEvents"], debug: Optional[str] = None, calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): """ :keyword debug: If set to 'true', enables debugging mode for this operation. :paramtype debug: str :keyword calibration_configuration: Advanced calibration configuration. :paramtype calibration_configuration: str :keyword camera_configuration: Advanced camera configuration. :paramtype camera_configuration: str :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. :paramtype camera_calibrator_node_configuration: str :keyword detector_node_configuration: Advanced detector node configuration. :paramtype detector_node_configuration: str :keyword tracker_node_configuration: Advanced tracker node configuration. :paramtype tracker_node_configuration: str :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. :paramtype enable_face_mask_classifier: str :keyword zones: Required. The list of zones with optional events. :paramtype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] """ super(SpatialAnalysisPersonZoneCrossingOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation' # type: str self.zones = zones
[docs]class SpatialAnalysisPersonZoneCrossingZoneEvents(msrest.serialization.Model): """SpatialAnalysisPersonZoneCrossingZoneEvents. All required parameters must be populated in order to send to Azure. :ivar zone: Required. The named zone. :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :ivar events: The event configuration. :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] """ _validation = { 'zone': {'required': True}, } _attribute_map = { 'zone': {'key': 'zone', 'type': 'NamedPolygonBase'}, 'events': {'key': 'events', 'type': '[SpatialAnalysisPersonZoneCrossingEvent]'}, } def __init__( self, *, zone: "NamedPolygonBase", events: Optional[List["SpatialAnalysisPersonZoneCrossingEvent"]] = None, **kwargs ): """ :keyword zone: Required. The named zone. :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase :keyword events: The event configuration. :paramtype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] """ super(SpatialAnalysisPersonZoneCrossingZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events
[docs]class SymmetricKeyCredentials(CredentialsBase): """Symmetric key credential. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar key: Required. Symmetric key credential. :vartype key: str """ _validation = { 'type': {'required': True}, 'key': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'key': {'key': 'key', 'type': 'str'}, } def __init__( self, *, key: str, **kwargs ): """ :keyword key: Required. Symmetric key credential. :paramtype key: str """ super(SymmetricKeyCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SymmetricKeyCredentials' # type: str self.key = key
[docs]class SystemData(msrest.serialization.Model): """Read-only system metadata associated with a resource. :ivar created_at: Date and time when this resource was first created. Value is represented in UTC according to the ISO8601 date format. :vartype created_at: ~datetime.datetime :ivar last_modified_at: Date and time when this resource was last modified. Value is represented in UTC according to the ISO8601 date format. :vartype last_modified_at: ~datetime.datetime """ _attribute_map = { 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, *, created_at: Optional[datetime.datetime] = None, last_modified_at: Optional[datetime.datetime] = None, **kwargs ): """ :keyword created_at: Date and time when this resource was first created. Value is represented in UTC according to the ISO8601 date format. :paramtype created_at: ~datetime.datetime :keyword last_modified_at: Date and time when this resource was last modified. Value is represented in UTC according to the ISO8601 date format. :paramtype last_modified_at: ~datetime.datetime """ super(SystemData, self).__init__(**kwargs) self.created_at = created_at self.last_modified_at = last_modified_at
[docs]class TlsEndpoint(EndpointBase): """TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit). All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar credentials: Credentials to be presented to the endpoint. :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :ivar url: Required. The endpoint URL for Video Analyzer to connect to. :vartype url: str :ivar trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. :vartype trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource :ivar validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. :vartype validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions """ _validation = { 'type': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, *, url: str, credentials: Optional["CredentialsBase"] = None, trusted_certificates: Optional["CertificateSource"] = None, validation_options: Optional["TlsValidationOptions"] = None, **kwargs ): """ :keyword credentials: Credentials to be presented to the endpoint. :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :keyword url: Required. The endpoint URL for Video Analyzer to connect to. :paramtype url: str :keyword trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. :paramtype trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource :keyword validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. :paramtype validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions """ super(TlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = trusted_certificates self.validation_options = validation_options
[docs]class TlsValidationOptions(msrest.serialization.Model): """Options for controlling the validation of TLS endpoints. :ivar ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. :vartype ignore_hostname: str :ivar ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. :vartype ignore_signature: str """ _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self, *, ignore_hostname: Optional[str] = None, ignore_signature: Optional[str] = None, **kwargs ): """ :keyword ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. :paramtype ignore_hostname: str :keyword ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. :paramtype ignore_signature: str """ super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = ignore_hostname self.ignore_signature = ignore_signature
[docs]class UnsecuredEndpoint(EndpointBase): """Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit). All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar credentials: Credentials to be presented to the endpoint. :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :ivar url: Required. The endpoint URL for Video Analyzer to connect to. :vartype url: str """ _validation = { 'type': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, } def __init__( self, *, url: str, credentials: Optional["CredentialsBase"] = None, **kwargs ): """ :keyword credentials: Credentials to be presented to the endpoint. :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase :keyword url: Required. The endpoint URL for Video Analyzer to connect to. :paramtype url: str """ super(UnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
[docs]class UsernamePasswordCredentials(CredentialsBase): """Username and password credentials. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar username: Required. Username to be presented as part of the credentials. :vartype username: str :ivar password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :vartype password: str """ _validation = { 'type': {'required': True}, 'username': {'required': True}, 'password': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, } def __init__( self, *, username: str, password: str, **kwargs ): """ :keyword username: Required. Username to be presented as part of the credentials. :paramtype username: str :keyword password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :paramtype password: str """ super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = username self.password = password
[docs]class VideoCreationProperties(msrest.serialization.Model): """Optional video properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. :ivar title: Optional video title provided by the user. Value can be up to 256 characters long. :vartype title: str :ivar description: Optional video description provided by the user. Value can be up to 2048 characters long. :vartype description: str :ivar segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the video is initially created can lead to errors when uploading media to the archive. Default value is 30 seconds. :vartype segment_length: str :ivar retention_period: Video retention period indicates how long the video is kept in storage, and must be a multiple of 1 day. For example, if this is set to 30 days, then content older than 30 days will be deleted. :vartype retention_period: str """ _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, *, title: Optional[str] = None, description: Optional[str] = None, segment_length: Optional[str] = None, retention_period: Optional[str] = None, **kwargs ): """ :keyword title: Optional video title provided by the user. Value can be up to 256 characters long. :paramtype title: str :keyword description: Optional video description provided by the user. Value can be up to 2048 characters long. :paramtype description: str :keyword segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the video is initially created can lead to errors when uploading media to the archive. Default value is 30 seconds. :paramtype segment_length: str :keyword retention_period: Video retention period indicates how long the video is kept in storage, and must be a multiple of 1 day. For example, if this is set to 30 days, then content older than 30 days will be deleted. :paramtype retention_period: str """ super(VideoCreationProperties, self).__init__(**kwargs) self.title = title self.description = description self.segment_length = segment_length self.retention_period = retention_period
[docs]class VideoEncoderConfiguration(msrest.serialization.Model): """Class representing the MPEG4 Configuration. :ivar encoding: The video codec used by the Media Profile. Possible values include: "JPEG", "H264", "MPEG4". :vartype encoding: str or ~azure.media.videoanalyzer.edge.models.VideoEncoding :ivar quality: Relative value representing the quality of the video. :vartype quality: float :ivar resolution: The Video Resolution. :vartype resolution: ~azure.media.videoanalyzer.edge.models.VideoResolution :ivar rate_control: The Video's rate control. :vartype rate_control: ~azure.media.videoanalyzer.edge.models.RateControl :ivar h264: The H264 Configuration. :vartype h264: ~azure.media.videoanalyzer.edge.models.H264Configuration :ivar mpeg4: The H264 Configuration. :vartype mpeg4: ~azure.media.videoanalyzer.edge.models.MPEG4Configuration """ _attribute_map = { 'encoding': {'key': 'encoding', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'float'}, 'resolution': {'key': 'resolution', 'type': 'VideoResolution'}, 'rate_control': {'key': 'rateControl', 'type': 'RateControl'}, 'h264': {'key': 'h264', 'type': 'H264Configuration'}, 'mpeg4': {'key': 'mpeg4', 'type': 'MPEG4Configuration'}, } def __init__( self, *, encoding: Optional[Union[str, "VideoEncoding"]] = None, quality: Optional[float] = None, resolution: Optional["VideoResolution"] = None, rate_control: Optional["RateControl"] = None, h264: Optional["H264Configuration"] = None, mpeg4: Optional["MPEG4Configuration"] = None, **kwargs ): """ :keyword encoding: The video codec used by the Media Profile. Possible values include: "JPEG", "H264", "MPEG4". :paramtype encoding: str or ~azure.media.videoanalyzer.edge.models.VideoEncoding :keyword quality: Relative value representing the quality of the video. :paramtype quality: float :keyword resolution: The Video Resolution. :paramtype resolution: ~azure.media.videoanalyzer.edge.models.VideoResolution :keyword rate_control: The Video's rate control. :paramtype rate_control: ~azure.media.videoanalyzer.edge.models.RateControl :keyword h264: The H264 Configuration. :paramtype h264: ~azure.media.videoanalyzer.edge.models.H264Configuration :keyword mpeg4: The H264 Configuration. :paramtype mpeg4: ~azure.media.videoanalyzer.edge.models.MPEG4Configuration """ super(VideoEncoderConfiguration, self).__init__(**kwargs) self.encoding = encoding self.quality = quality self.resolution = resolution self.rate_control = rate_control self.h264 = h264 self.mpeg4 = mpeg4
[docs]class VideoPublishingOptions(msrest.serialization.Model): """Options for changing video publishing behavior on the video sink and output video. :ivar enable_video_preview_image: When set to 'true' the video will publish preview images. Default is 'false'. :vartype enable_video_preview_image: str """ _attribute_map = { 'enable_video_preview_image': {'key': 'enableVideoPreviewImage', 'type': 'str'}, } def __init__( self, *, enable_video_preview_image: Optional[str] = None, **kwargs ): """ :keyword enable_video_preview_image: When set to 'true' the video will publish preview images. Default is 'false'. :paramtype enable_video_preview_image: str """ super(VideoPublishingOptions, self).__init__(**kwargs) self.enable_video_preview_image = enable_video_preview_image
[docs]class VideoResolution(msrest.serialization.Model): """The Video resolution. :ivar width: The number of columns of the Video image. :vartype width: float :ivar height: The number of lines of the Video image. :vartype height: float """ _attribute_map = { 'width': {'key': 'width', 'type': 'float'}, 'height': {'key': 'height', 'type': 'float'}, } def __init__( self, *, width: Optional[float] = None, height: Optional[float] = None, **kwargs ): """ :keyword width: The number of columns of the Video image. :paramtype width: float :keyword height: The number of lines of the Video image. :paramtype height: float """ super(VideoResolution, self).__init__(**kwargs) self.width = width self.height = height
[docs]class VideoSink(SinkNodeBase): """Video sink allows for video and audio to be recorded to the Video Analyzer service. The recorded video can be played from anywhere and further managed from the cloud. Due to security reasons, a given Video Analyzer edge module instance can only record content to new video entries, or existing video entries previously recorded by the same module. Any attempt to record content to an existing video which has not been created by the same module instance will result in failure to record. All required parameters must be populated in order to send to Azure. :ivar type: Required. Type discriminator for the derived types.Constant filled by server. :vartype type: str :ivar name: Required. Node name. Must be unique within the topology. :vartype name: str :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :ivar video_name: Required. Name of a new or existing Video Analyzer video resource used for the media recording. :vartype video_name: str :ivar video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. :vartype video_creation_properties: ~azure.media.videoanalyzer.edge.models.VideoCreationProperties :ivar video_publishing_options: Optional video publishing options to be used for changing publishing behavior of the output video. :vartype video_publishing_options: ~azure.media.videoanalyzer.edge.models.VideoPublishingOptions :ivar local_media_cache_path: Required. Path to a local file system directory for caching of temporary media files. This will also be used to store content which cannot be immediately uploaded to Azure due to Internet connectivity issues. :vartype local_media_cache_path: str :ivar local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be used for caching of temporary media files. Once this limit is reached, the oldest segments of the media archive will be continuously deleted in order to make space for new media, thus leading to gaps in the cloud recorded content. :vartype local_media_cache_maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True}, 'local_media_cache_path': {'required': True}, 'local_media_cache_maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } def __init__( self, *, name: str, inputs: List["NodeInput"], video_name: str, local_media_cache_path: str, local_media_cache_maximum_size_mi_b: str, video_creation_properties: Optional["VideoCreationProperties"] = None, video_publishing_options: Optional["VideoPublishingOptions"] = None, **kwargs ): """ :keyword name: Required. Node name. Must be unique within the topology. :paramtype name: str :keyword inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] :keyword video_name: Required. Name of a new or existing Video Analyzer video resource used for the media recording. :paramtype video_name: str :keyword video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. :paramtype video_creation_properties: ~azure.media.videoanalyzer.edge.models.VideoCreationProperties :keyword video_publishing_options: Optional video publishing options to be used for changing publishing behavior of the output video. :paramtype video_publishing_options: ~azure.media.videoanalyzer.edge.models.VideoPublishingOptions :keyword local_media_cache_path: Required. Path to a local file system directory for caching of temporary media files. This will also be used to store content which cannot be immediately uploaded to Azure due to Internet connectivity issues. :paramtype local_media_cache_path: str :keyword local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be used for caching of temporary media files. Once this limit is reached, the oldest segments of the media archive will be continuously deleted in order to make space for new media, thus leading to gaps in the cloud recorded content. :paramtype local_media_cache_maximum_size_mi_b: str """ super(VideoSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = video_name self.video_creation_properties = video_creation_properties self.video_publishing_options = video_publishing_options self.local_media_cache_path = local_media_cache_path self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b