Source code for azure.media.videoanalyzeredge._generated.models._azure_video_analyzerfor_edge_enums

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------

from enum import Enum, EnumMeta
from six import with_metaclass

class _CaseInsensitiveEnumMeta(EnumMeta):
    def __getitem__(self, name):
        return super().__getitem__(name.upper())

    def __getattr__(cls, name):
        """Return the enum member matching `name`
        We use __getattr__ instead of descriptors or inserting into the enum
        class' __dict__ in order to support `name` and `value` being both
        properties for enum members (which live in the class' __dict__) and
        enum members themselves.
        """
        try:
            return cls._member_map_[name.upper()]
        except KeyError:
            raise AttributeError(name)


[docs]class GrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Data transfer mode: embedded or sharedMemory. """ #: Media samples are embedded into the gRPC messages. This mode is less efficient but it requires #: a simpler implementations and can be used with plugins which are not on the same node as the #: Video Analyzer module. EMBEDDED = "embedded" #: Media samples are made available through shared memory. This mode enables efficient data #: transfers but it requires that the extension plugin to be co-located on the same node and #: sharing the same shared memory space. SHARED_MEMORY = "sharedMemory"
[docs]class ImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Pixel format to be applied to the raw image. """ #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). YUV420_P = "yuv420p" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. RGB565_BE = "rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. RGB565_LE = "rgb565le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. RGB555_BE = "rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. RGB555_LE = "rgb555le" #: Packed RGB 8:8:8, 24bpp, RGBRGB. RGB24 = "rgb24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. BGR24 = "bgr24" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. ARGB = "argb" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. RGBA = "rgba" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. ABGR = "abgr" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. BGRA = "bgra"
[docs]class ImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Describes the image scaling mode to be applied. Default mode is 'pad'. """ #: Preserves the same aspect ratio as the input image. If only one image dimension is provided, #: the second dimension is calculated based on the input image aspect ratio. When 2 dimensions are #: provided, the image is resized to fit the most constraining dimension, considering the input #: image size and aspect ratio. PRESERVE_ASPECT_RATIO = "preserveAspectRatio" #: Pads the image with black horizontal stripes (letterbox) or black vertical stripes (pillar-box) #: so the image is resized to the specified dimensions while not altering the content aspect #: ratio. PAD = "pad" #: Stretches the original image so it resized to the specified dimensions. STRETCH = "stretch"
[docs]class LivePipelineState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Current pipeline state (read-only). """ #: The live pipeline is idle and not processing media. INACTIVE = "inactive" #: The live pipeline is transitioning into the active state. ACTIVATING = "activating" #: The live pipeline is active and able to process media. If your data source is not available, #: for instance, if your RTSP camera is powered off or unreachable, the pipeline will still be #: active and periodically retrying the connection. Your Azure subscription will be billed for the #: duration in which the live pipeline is in the active state. ACTIVE = "active" #: The live pipeline is transitioning into the inactive state. DEACTIVATING = "deactivating"
[docs]class MotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Motion detection sensitivity: low, medium, high. """ #: Low sensitivity. LOW = "low" #: Medium sensitivity. MEDIUM = "medium" #: High sensitivity. HIGH = "high"
[docs]class ObjectTrackingAccuracy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in average. """ #: Low accuracy. LOW = "low" #: Medium accuracy. MEDIUM = "medium" #: High accuracy. HIGH = "high"
[docs]class OutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The operator to compare properties by. """ #: The property is of the type defined by value. IS_ENUM = "is" #: The property is not of the type defined by value. IS_NOT = "isNot"
[docs]class OutputSelectorProperty(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The property of the data stream to be used as the selection criteria. """ #: The stream's MIME type or subtype: audio, video or application. MEDIA_TYPE = "mediaType"
[docs]class ParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Type of the parameter. """ #: The parameter's value is a string. STRING = "string" #: The parameter's value is a string that holds sensitive information. SECRET_STRING = "secretString" #: The parameter's value is a 32-bit signed integer. INT = "int" #: The parameter's value is a 64-bit double-precision floating point. DOUBLE = "double" #: The parameter's value is a boolean value that is either true or false. BOOL = "bool"
[docs]class RtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. """ #: HTTP transport. RTSP messages are exchanged over long running HTTP requests and RTP packets are #: interleaved within the HTTP channel. HTTP = "http" #: TCP transport. RTSP is used directly over TCP and RTP packets are interleaved within the TCP #: channel. TCP = "tcp"
[docs]class SpatialAnalysisOperationFocus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The operation focus type. """ #: The center of the object. CENTER = "center" #: The bottom center of the object. BOTTOM_CENTER = "bottomCenter" #: The footprint. FOOTPRINT = "footprint"
[docs]class SpatialAnalysisPersonCountEventTrigger(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The event trigger type. """ #: Event trigger. EVENT = "event" #: Interval trigger. INTERVAL = "interval"
[docs]class SpatialAnalysisPersonDistanceEventTrigger(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The event trigger type. """ #: Event trigger. EVENT = "event" #: Interval trigger. INTERVAL = "interval"
[docs]class SpatialAnalysisPersonZoneCrossingEventType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The event type. """ #: Zone crossing event type. ZONE_CROSSING = "zoneCrossing" #: Zone dwell time event type. ZONE_DWELL_TIME = "zoneDwellTime"