Source code for azure.developer.loadtesting.aio.operations._operations

# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload

from azure.core.exceptions import (
    ClientAuthenticationError,
    HttpResponseError,
    ResourceExistsError,
    ResourceNotFoundError,
    map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict

from ...operations._operations import (
    build_app_component_create_or_update_app_components_request,
    build_app_component_delete_app_components_request,
    build_app_component_get_app_component_by_name_request,
    build_app_component_get_app_component_request,
    build_server_metrics_create_or_update_server_metrics_config_request,
    build_server_metrics_delete_server_metrics_config_request,
    build_server_metrics_get_server_default_metrics_config_request,
    build_server_metrics_get_server_metrics_config_by_name_request,
    build_server_metrics_get_server_metrics_config_request,
    build_server_metrics_list_supported_resource_types_request,
    build_test_create_or_update_test_request,
    build_test_delete_load_test_request,
    build_test_delete_test_file_request,
    build_test_get_load_test_request,
    build_test_get_test_file_request,
    build_test_list_load_test_search_request,
    build_test_list_test_files_request,
    build_test_run_create_or_update_test_request,
    build_test_run_delete_test_run_request,
    build_test_run_get_test_run_client_metrics_filters_request,
    build_test_run_get_test_run_client_metrics_request,
    build_test_run_get_test_run_file_request,
    build_test_run_get_test_run_request,
    build_test_run_list_test_runs_request,
    build_test_run_stop_test_run_request,
)
from .._vendor import raise_if_not_implemented

if sys.version_info >= (3, 9):
    from collections.abc import MutableMapping
else:
    from typing import MutableMapping  # type: ignore  # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any]  # pylint: disable=unsubscriptable-object
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]


class AppComponentOperations:
    """
    .. warning::
        **DO NOT** instantiate this class directly.

        Instead, you should access the following operations through
        :class:`~azure.developer.loadtesting.aio.LoadTestingClient`'s
        :attr:`app_component` attribute.
    """

    def __init__(self, *args, **kwargs) -> None:
        input_args = list(args)
        self._client = input_args.pop(0) if input_args else kwargs.pop("client")
        self._config = input_args.pop(0) if input_args else kwargs.pop("config")
        self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
        self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")

    @overload
    async def create_or_update_app_components(
        self, name: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any
    ) -> JSON:
        """Associate an App Component (Azure resource) to a test or test run.

        Associate an App Component (Azure resource) to a test or test run.

        :param name: Unique name of the App Component, must be a valid URL character ^[a-z0-9_-]*$.
         Required.
        :type name: str
        :param body: App Component model. Required.
        :type body: JSON
        :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
         Default value is "application/merge-patch+json".
        :paramtype content_type: str
        :return: JSON object
        :rtype: JSON
        :raises ~azure.core.exceptions.HttpResponseError:

        Example:
            .. code-block:: python

                # JSON input template you can fill out and use as your body input.
                body = {
                    "value": {
                        "str": {
                            "resourceId": "str",  # Fully qualified resource Id e.g
                              subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}.
                              Required.
                            "resourceName": "str",  # Azure resource name. Required.
                            "resourceType": "str",  # Azure resource type. Required.
                            "displayName": "str",  # Optional. Azure resource display
                              name.
                            "kind": "str",  # Optional. Kind of Azure resource type.
                            "resourceGroup": "str",  # Optional. Resource group name of
                              the Azure resource.
                            "subscriptionId": "str"  # Optional. Subscription Id of the
                              Azure resource.
                        }
                    },
                    "name": "str",  # Optional. AppComponent name.
                    "resourceId": "str",  # Optional. Azure Load Testing resource Id.
                    "testId": "str",  # Optional. [Required, if testRunId is not given] Load test
                      unique identifier.
                    "testRunId": "str"  # Optional. [Required if testId is not given] Load test
                      run unique identifier.
                }

                # response body for status code(s): 200, 201
                response == {
                    "value": {
                        "str": {
                            "resourceId": "str",  # Fully qualified resource Id e.g
                              subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}.
                              Required.
                            "resourceName": "str",  # Azure resource name. Required.
                            "resourceType": "str",  # Azure resource type. Required.
                            "displayName": "str",  # Optional. Azure resource display
                              name.
                            "kind": "str",  # Optional. Kind of Azure resource type.
                            "resourceGroup": "str",  # Optional. Resource group name of
                              the Azure resource.
                            "subscriptionId": "str"  # Optional. Subscription Id of the
                              Azure resource.
                        }
                    },
                    "name": "str",  # Optional. AppComponent name.
                    "resourceId": "str",  # Optional. Azure Load Testing resource Id.
                    "testId": "str",  # Optional. [Required, if testRunId is not given] Load test
                      unique identifier.
                    "testRunId": "str"  # Optional. [Required if testId is not given] Load test
                      run unique identifier.
                }
        """

    @overload
    async def create_or_update_app_components(
        self, name: str, body: IO, *, content_type: str = "application/merge-patch+json", **kwargs: Any
    ) -> JSON:
        """Associate an App Component (Azure resource) to a test or test run.

        Associate an App Component (Azure resource) to a test or test run.

        :param name: Unique name of the App Component, must be a valid URL character ^[a-z0-9_-]*$.
         Required.
        :type name: str
        :param body: App Component model. Required.
        :type body: IO
        :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
         Default value is "application/merge-patch+json".
        :paramtype content_type: str
        :return: JSON object
        :rtype: JSON
        :raises ~azure.core.exceptions.HttpResponseError:

        Example:
            .. code-block:: python

                # response body for status code(s): 200, 201
                response == {
                    "value": {
                        "str": {
                            "resourceId": "str",  # Fully qualified resource Id e.g
                              subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}.
                              Required.
                            "resourceName": "str",  # Azure resource name. Required.
                            "resourceType": "str",  # Azure resource type. Required.
                            "displayName": "str",  # Optional. Azure resource display
                              name.
                            "kind": "str",  # Optional. Kind of Azure resource type.
                            "resourceGroup": "str",  # Optional. Resource group name of
                              the Azure resource.
                            "subscriptionId": "str"  # Optional. Subscription Id of the
                              Azure resource.
                        }
                    },
                    "name": "str",  # Optional. AppComponent name.
                    "resourceId": "str",  # Optional. Azure Load Testing resource Id.
                    "testId": "str",  # Optional. [Required, if testRunId is not given] Load test
                      unique identifier.
                    "testRunId": "str"  # Optional. [Required if testId is not given] Load test
                      run unique identifier.
                }
        """

    @distributed_trace_async
    async def create_or_update_app_components(self, name: str, body: Union[JSON, IO], **kwargs: Any) -> JSON:
        """Associate an App Component (Azure resource) to a test or test run.

        Associate an App Component (Azure resource) to a test or test run.

        :param name: Unique name of the App Component, must be a valid URL character ^[a-z0-9_-]*$.
         Required.
        :type name: str
        :param body: App Component model. Is either a model type or a IO type. Required.
        :type body: JSON or IO
        :keyword content_type: Body Parameter content-type. Known values are:
         'application/merge-patch+json'. Default value is None.
        :paramtype content_type: str
        :return: JSON object
        :rtype: JSON
        :raises ~azure.core.exceptions.HttpResponseError:

        Example:
            .. code-block:: python

                # response body for status code(s): 200, 201
                response == {
                    "value": {
                        "str": {
                            "resourceId": "str",  # Fully qualified resource Id e.g
                              subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}.
                              Required.
                            "resourceName": "str",  # Azure resource name. Required.
                            "resourceType": "str",  # Azure resource type. Required.
                            "displayName": "str",  # Optional. Azure resource display
                              name.
                            "kind": "str",  # Optional. Kind of Azure resource type.
                            "resourceGroup": "str",  # Optional. Resource group name of
                              the Azure resource.
                            "subscriptionId": "str"  # Optional. Subscription Id of the
                              Azure resource.
                        }
                    },
                    "name": "str",  # Optional. AppComponent name.
                    "resourceId": "str",  # Optional. Azure Load Testing resource Id.
                    "testId": "str",  # Optional. [Required, if testRunId is not given] Load test
                      unique identifier.
                    "testRunId": "str"  # Optional. [Required if testId is not given] Load test
                      run unique identifier.
                }
        """
        error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
        error_map.update(kwargs.pop("error_map", {}) or {})

        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
        _params = kwargs.pop("params", {}) or {}

        content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None))  # type: Optional[str]
        cls = kwargs.pop("cls", None)  # type: ClsType[JSON]

        content_type = content_type or "application/merge-patch+json"
        _json = None
        _content = None
        if isinstance(body, (IO, bytes)):
            _content = body
        else:
            _json = body

        request = build_app_component_create_or_update_app_components_request(
            name=name,
            content_type=content_type,
            api_version=self._config.api_version,
            json=_json,
            content=_content,
            headers=_headers,
            params=_params,
        )
        path_format_arguments = {
            "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
        }
        request.url = self._client.format_url(request.url, **path_format_arguments)  # type: ignore

        pipeline_response = await self._client._pipeline.run(  # type: ignore # pylint: disable=protected-access
            request, stream=False, **kwargs
        )

        response = pipeline_response.http_response

        if response.status_code not in [200, 201]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response)

        if response.status_code == 200:
            if response.content:
                deserialized = response.json()
            else:
                deserialized = None

        if response.status_code == 201:
            if response.content:
                deserialized = response.json()
            else:
                deserialized = None

        if cls:
            return cls(pipeline_response, cast(JSON, deserialized), {})

        return cast(JSON, deserialized)

    @distributed_trace_async
    async def delete_app_components(  # pylint: disable=inconsistent-return-statements
        self, name: str, **kwargs: Any
    ) -> None:
        """Delete an App Component.

        Delete an App Component.

        :param name: Unique name of the App Component, must be a valid URL character ^[a-z0-9_-]*$.
         Required.
        :type name: str
        :return: None
        :rtype: None
        :raises ~azure.core.exceptions.HttpResponseError:
        """
        error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
        error_map.update(kwargs.pop("error_map", {}) or {})

        _headers = kwargs.pop("headers", {}) or {}
        _params = kwargs.pop("params", {}) or {}

        cls = kwargs.pop("cls", None)  # type: ClsType[None]

        request = build_app_component_delete_app_components_request(
            name=name,
            api_version=self._config.api_version,
            headers=_headers,
            params=_params,
        )
        path_format_arguments = {
            "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
        }
        request.url = self._client.format_url(request.url, **path_format_arguments)  # type: ignore

        pipeline_response = await self._client._pipeline.run(  # type: ignore # pylint: disable=protected-access
            request, stream=False, **kwargs
        )

        response = pipeline_response.http_response

        if response.status_code not in [204]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response)

        if cls:
            return cls(pipeline_response, None, {})

    @distributed_trace_async
    async def get_app_component_by_name(self, name: str, **kwargs: Any) -> JSON:
        """Get App Component details by App Component name.

        Get App Component details by App Component name.

        :param name: Unique name of the App Component, must be a valid URL character ^[a-z0-9_-]*$.
         Required.
        :type name: str
        :return: JSON object
        :rtype: JSON
        :raises ~azure.core.exceptions.HttpResponseError:

        Example:
            .. code-block:: python

                # response body for status code(s): 200
                response == {
                    "value": {
                        "str": {
                            "resourceId": "str",  # Fully qualified resource Id e.g
                              subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}.
                              Required.
                            "resourceName": "str",  # Azure resource name. Required.
                            "resourceType": "str",  # Azure resource type. Required.
                            "displayName": "str",  # Optional. Azure resource display
                              name.
                            "kind": "str",  # Optional. Kind of Azure resource type.
                            "resourceGroup": "str",  # Optional. Resource group name of
                              the Azure resource.
                            "subscriptionId": "str"  # Optional. Subscription Id of the
                              Azure resource.
                        }
                    },
                    "name": "str",  # Optional. AppComponent name.
                    "resourceId": "str",  # Optional. Azure Load Testing resource Id.
                    "testId": "str",  # Optional. [Required, if testRunId is not given] Load test
                      unique identifier.
                    "testRunId": "str"  # Optional. [Required if testId is not given] Load test
                      run unique identifier.
                }
        """
        error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
        error_map.update(kwargs.pop("error_map", {}) or {})

        _headers = kwargs.pop("headers", {}) or {}
        _params = kwargs.pop("params", {}) or {}

        cls = kwargs.pop("cls", None)  # type: ClsType[JSON]

        request = build_app_component_get_app_component_by_name_request(
            name=name,
            api_version=self._config.api_version,
            headers=_headers,
            params=_params,
        )
        path_format_arguments = {
            "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
        }
        request.url = self._client.format_url(request.url, **path_format_arguments)  # type: ignore

        pipeline_response = await self._client._pipeline.run(  # type: ignore # pylint: disable=protected-access
            request, stream=False, **kwargs
        )

        response = pipeline_response.http_response

        if response.status_code not in [200]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response)

        if response.content:
            deserialized = response.json()
        else:
            deserialized = None

        if cls:
            return cls(pipeline_response, cast(JSON, deserialized), {})

        return cast(JSON, deserialized)

    @distributed_trace_async
    async def get_app_component(
        self, *, test_run_id: Optional[str] = None, test_id: Optional[str] = None, **kwargs: Any
    ) -> JSON:
        """Get App Components for a test or a test run by its name.

        Get App Components for a test or a test run by its name.

        :keyword test_run_id: [Required, if testId is not provided] Test run Id. Default value is None.
        :paramtype test_run_id: str
        :keyword test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$.
         Default value is None.
        :paramtype test_id: str
        :return: JSON object
        :rtype: JSON
        :raises ~azure.core.exceptions.HttpResponseError:

        Example:
            .. code-block:: python

                # response body for status code(s): 200
                response == {
                    "value": {
                        "str": {
                            "resourceId": "str",  # Fully qualified resource Id e.g
                              subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}.
                              Required.
                            "resourceName": "str",  # Azure resource name. Required.
                            "resourceType": "str",  # Azure resource type. Required.
                            "displayName": "str",  # Optional. Azure resource display
                              name.
                            "kind": "str",  # Optional. Kind of Azure resource type.
                            "resourceGroup": "str",  # Optional. Resource group name of
                              the Azure resource.
                            "subscriptionId": "str"  # Optional. Subscription Id of the
                              Azure resource.
                        }
                    },
                    "name": "str",  # Optional. AppComponent name.
                    "resourceId": "str",  # Optional. Azure Load Testing resource Id.
                    "testId": "str",  # Optional. [Required, if testRunId is not given] Load test
                      unique identifier.
                    "testRunId": "str"  # Optional. [Required if testId is not given] Load test
                      run unique identifier.
                }
        """
        error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
        error_map.update(kwargs.pop("error_map", {}) or {})

        _headers = kwargs.pop("headers", {}) or {}
        _params = kwargs.pop("params", {}) or {}

        cls = kwargs.pop("cls", None)  # type: ClsType[JSON]

        request = build_app_component_get_app_component_request(
            test_run_id=test_run_id,
            test_id=test_id,
            api_version=self._config.api_version,
            headers=_headers,
            params=_params,
        )
        path_format_arguments = {
            "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
        }
        request.url = self._client.format_url(request.url, **path_format_arguments)  # type: ignore

        pipeline_response = await self._client._pipeline.run(  # type: ignore # pylint: disable=protected-access
            request, stream=False, **kwargs
        )

        response = pipeline_response.http_response

        if response.status_code not in [200]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response)

        if response.content:
            deserialized = response.json()
        else:
            deserialized = None

        if cls:
            return cls(pipeline_response, cast(JSON, deserialized), {})

        return cast(JSON, deserialized)


[docs]class ServerMetricsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.developer.loadtesting.aio.LoadTestingClient`'s :attr:`server_metrics` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create_or_update_server_metrics_config( self, name: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> JSON: """Configure server metrics for a test or test run. Configure server metrics for a test or test run. :param name: Unique name for server metrics, must be a valid URL character ^[a-z0-9_-]*$. Required. :type name: str :param body: Server metrics configuration model. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "metrics": { "str": { "aggregation": "str", # Metric aggregation. Required. "metricnamespace": "str", # Metric name space. Required. "name": { "localizedValue": "str", # Metric localized name. Required. "value": "str" # Metric name value. Required. }, "resourceId": "str", # Azure resource Id. Required. "resourceType": "str", # Azure resource type. Required. "displayDescription": "str", # Optional. Metric description. "id": "str", # Optional. Unique identifier for metric. "unit": "str" # Optional. Metric unit. } }, "name": "str", # Optional. Server metrics config name. "testId": "str", # Optional. [Required, if testRunId is not given] Load test unique identifier. "testRunId": "str" # Optional. [Required, if testId is not given] Load test run unique identifier. } # response body for status code(s): 200, 201 response == { "metrics": { "str": { "aggregation": "str", # Metric aggregation. Required. "metricnamespace": "str", # Metric name space. Required. "name": { "localizedValue": "str", # Metric localized name. Required. "value": "str" # Metric name value. Required. }, "resourceId": "str", # Azure resource Id. Required. "resourceType": "str", # Azure resource type. Required. "displayDescription": "str", # Optional. Metric description. "id": "str", # Optional. Unique identifier for metric. "unit": "str" # Optional. Metric unit. } }, "name": "str", # Optional. Server metrics config name. "testId": "str", # Optional. [Required, if testRunId is not given] Load test unique identifier. "testRunId": "str" # Optional. [Required, if testId is not given] Load test run unique identifier. } """ @overload async def create_or_update_server_metrics_config( self, name: str, body: IO, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> JSON: """Configure server metrics for a test or test run. Configure server metrics for a test or test run. :param name: Unique name for server metrics, must be a valid URL character ^[a-z0-9_-]*$. Required. :type name: str :param body: Server metrics configuration model. Required. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/merge-patch+json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200, 201 response == { "metrics": { "str": { "aggregation": "str", # Metric aggregation. Required. "metricnamespace": "str", # Metric name space. Required. "name": { "localizedValue": "str", # Metric localized name. Required. "value": "str" # Metric name value. Required. }, "resourceId": "str", # Azure resource Id. Required. "resourceType": "str", # Azure resource type. Required. "displayDescription": "str", # Optional. Metric description. "id": "str", # Optional. Unique identifier for metric. "unit": "str" # Optional. Metric unit. } }, "name": "str", # Optional. Server metrics config name. "testId": "str", # Optional. [Required, if testRunId is not given] Load test unique identifier. "testRunId": "str" # Optional. [Required, if testId is not given] Load test run unique identifier. } """
[docs] @distributed_trace_async async def create_or_update_server_metrics_config(self, name: str, body: Union[JSON, IO], **kwargs: Any) -> JSON: """Configure server metrics for a test or test run. Configure server metrics for a test or test run. :param name: Unique name for server metrics, must be a valid URL character ^[a-z0-9_-]*$. Required. :type name: str :param body: Server metrics configuration model. Is either a model type or a IO type. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/merge-patch+json'. Default value is None. :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200, 201 response == { "metrics": { "str": { "aggregation": "str", # Metric aggregation. Required. "metricnamespace": "str", # Metric name space. Required. "name": { "localizedValue": "str", # Metric localized name. Required. "value": "str" # Metric name value. Required. }, "resourceId": "str", # Azure resource Id. Required. "resourceType": "str", # Azure resource type. Required. "displayDescription": "str", # Optional. Metric description. "id": "str", # Optional. Unique identifier for metric. "unit": "str" # Optional. Metric unit. } }, "name": "str", # Optional. Server metrics config name. "testId": "str", # Optional. [Required, if testRunId is not given] Load test unique identifier. "testRunId": "str" # Optional. [Required, if testId is not given] Load test run unique identifier. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[JSON] content_type = content_type or "application/merge-patch+json" _json = None _content = None if isinstance(body, (IO, bytes)): _content = body else: _json = body request = build_server_metrics_create_or_update_server_metrics_config_request( name=name, content_type=content_type, api_version=self._config.api_version, json=_json, content=_content, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.status_code == 200: if response.content: deserialized = response.json() else: deserialized = None if response.status_code == 201: if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def get_server_metrics_config_by_name(self, name: str, **kwargs: Any) -> JSON: """Get server metrics configuration by its name. Get server metrics configuration by its name. :param name: Unique name for server metrics, must be a valid URL character ^[a-z0-9_-]*$. Required. :type name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "metrics": { "str": { "aggregation": "str", # Metric aggregation. Required. "metricnamespace": "str", # Metric name space. Required. "name": { "localizedValue": "str", # Metric localized name. Required. "value": "str" # Metric name value. Required. }, "resourceId": "str", # Azure resource Id. Required. "resourceType": "str", # Azure resource type. Required. "displayDescription": "str", # Optional. Metric description. "id": "str", # Optional. Unique identifier for metric. "unit": "str" # Optional. Metric unit. } }, "name": "str", # Optional. Server metrics config name. "testId": "str", # Optional. [Required, if testRunId is not given] Load test unique identifier. "testRunId": "str" # Optional. [Required, if testId is not given] Load test run unique identifier. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_server_metrics_get_server_metrics_config_by_name_request( name=name, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def delete_server_metrics_config( # pylint: disable=inconsistent-return-statements self, name: str, **kwargs: Any ) -> None: """Delete server metrics configuration by its name. Delete server metrics configuration by its name. :param name: Unique name for server metrics, must be a valid URL character ^[a-z0-9_-]*$. Required. :type name: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[None] request = build_server_metrics_delete_server_metrics_config_request( name=name, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {})
[docs] @distributed_trace_async async def get_server_metrics_config( self, *, test_run_id: Optional[str] = None, test_id: Optional[str] = None, **kwargs: Any ) -> JSON: """Get server metrics configuration for a test or test run by its name. Get server metrics configuration for a test or test run by its name. :keyword test_run_id: [Required, if testId is not provided] Test run Id. Default value is None. :paramtype test_run_id: str :keyword test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Default value is None. :paramtype test_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "metrics": { "str": { "aggregation": "str", # Metric aggregation. Required. "metricnamespace": "str", # Metric name space. Required. "name": { "localizedValue": "str", # Metric localized name. Required. "value": "str" # Metric name value. Required. }, "resourceId": "str", # Azure resource Id. Required. "resourceType": "str", # Azure resource type. Required. "displayDescription": "str", # Optional. Metric description. "id": "str", # Optional. Unique identifier for metric. "unit": "str" # Optional. Metric unit. } }, "name": "str", # Optional. Server metrics config name. "testId": "str", # Optional. [Required, if testRunId is not given] Load test unique identifier. "testRunId": "str" # Optional. [Required, if testId is not given] Load test run unique identifier. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_server_metrics_get_server_metrics_config_request( test_run_id=test_run_id, test_id=test_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def get_server_default_metrics_config(self, **kwargs: Any) -> JSON: """Get all default server metrics configuration for supported resource types. Get all default server metrics configuration for supported resource types. :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "defaultMetrics": { "str": [ { "aggregation": "str", # Optional. Default metrics map {resourceType : list of metrics config} (Refer for metrics structure: https://docs.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition). "displayDescription": "str", # Optional. Default metrics map {resourceType : list of metrics config} (Refer for metrics structure: https://docs.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition). "metricnamespace": "str", # Optional. Default metrics map {resourceType : list of metrics config} (Refer for metrics structure: https://docs.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition). "name": { "localizedValue": "str", # Optional. Default metrics map {resourceType : list of metrics config} (Refer for metrics structure: https://docs.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition). "value": "str" # Optional. Default metrics map {resourceType : list of metrics config} (Refer for metrics structure: https://docs.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition). }, "unit": "str" # Optional. Default metrics map {resourceType : list of metrics config} (Refer for metrics structure: https://docs.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition). } ] } } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_server_metrics_get_server_default_metrics_config_request( api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def list_supported_resource_types(self, **kwargs: Any) -> JSON: """Get all supported resource types for App Components(Azure resource types). Get all supported resource types for App Components(Azure resource types). :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "value": [ "str" # Optional. ] } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_server_metrics_list_supported_resource_types_request( api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
class TestOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.developer.loadtesting.aio.LoadTestingClient`'s :attr:`test` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") raise_if_not_implemented( self.__class__, [ "upload_test_file", ], ) @overload async def create_or_update_test( self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> JSON: """Create a new test or Update an existing test. Create a new test or Update an existing test. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :param body: Load test model. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "createdBy": "str", # Optional. The user that created the test model. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test model. "description": "str", # Optional. The test description. "displayName": "str", # Optional. Display name of a test. "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "keyvaultReferenceIdentityId": "str", # Optional. Resource Id of the managed identity referencing the Key vault. "keyvaultReferenceIdentityType": "str", # Optional. Type of the managed identity referencing the Key vault. "lastModifiedBy": "str", # Optional. The user that last modified the test model. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last Modified DateTime(ISO 8601 literal format) of the test model. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "resourceId": "str", # Optional. Fully qualified resource Id e.g /subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testId": "str" # Optional. Unique test name as identifier. } # response body for status code(s): 200, 201 response == { "createdBy": "str", # Optional. The user that created the test model. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test model. "description": "str", # Optional. The test description. "displayName": "str", # Optional. Display name of a test. "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "keyvaultReferenceIdentityId": "str", # Optional. Resource Id of the managed identity referencing the Key vault. "keyvaultReferenceIdentityType": "str", # Optional. Type of the managed identity referencing the Key vault. "lastModifiedBy": "str", # Optional. The user that last modified the test model. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last Modified DateTime(ISO 8601 literal format) of the test model. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "resourceId": "str", # Optional. Fully qualified resource Id e.g /subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testId": "str" # Optional. Unique test name as identifier. } """ @overload async def create_or_update_test( self, test_id: str, body: IO, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> JSON: """Create a new test or Update an existing test. Create a new test or Update an existing test. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :param body: Load test model. Required. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/merge-patch+json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200, 201 response == { "createdBy": "str", # Optional. The user that created the test model. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test model. "description": "str", # Optional. The test description. "displayName": "str", # Optional. Display name of a test. "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "keyvaultReferenceIdentityId": "str", # Optional. Resource Id of the managed identity referencing the Key vault. "keyvaultReferenceIdentityType": "str", # Optional. Type of the managed identity referencing the Key vault. "lastModifiedBy": "str", # Optional. The user that last modified the test model. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last Modified DateTime(ISO 8601 literal format) of the test model. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "resourceId": "str", # Optional. Fully qualified resource Id e.g /subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testId": "str" # Optional. Unique test name as identifier. } """
[docs] @distributed_trace_async async def create_or_update_test(self, test_id: str, body: Union[JSON, IO], **kwargs: Any) -> JSON: """Create a new test or Update an existing test. Create a new test or Update an existing test. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :param body: Load test model. Is either a model type or a IO type. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/merge-patch+json'. Default value is None. :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200, 201 response == { "createdBy": "str", # Optional. The user that created the test model. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test model. "description": "str", # Optional. The test description. "displayName": "str", # Optional. Display name of a test. "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "keyvaultReferenceIdentityId": "str", # Optional. Resource Id of the managed identity referencing the Key vault. "keyvaultReferenceIdentityType": "str", # Optional. Type of the managed identity referencing the Key vault. "lastModifiedBy": "str", # Optional. The user that last modified the test model. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last Modified DateTime(ISO 8601 literal format) of the test model. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "resourceId": "str", # Optional. Fully qualified resource Id e.g /subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testId": "str" # Optional. Unique test name as identifier. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[JSON] content_type = content_type or "application/merge-patch+json" _json = None _content = None if isinstance(body, (IO, bytes)): _content = body else: _json = body request = build_test_create_or_update_test_request( test_id=test_id, content_type=content_type, api_version=self._config.api_version, json=_json, content=_content, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.status_code == 200: if response.content: deserialized = response.json() else: deserialized = None if response.status_code == 201: if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def delete_load_test( # pylint: disable=inconsistent-return-statements self, test_id: str, **kwargs: Any ) -> None: """Delete a test by its name. Delete a test by its name. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[None] request = build_test_delete_load_test_request( test_id=test_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {})
[docs] @distributed_trace_async async def get_load_test(self, test_id: str, **kwargs: Any) -> JSON: """Get load test details by test name. Get load test details by test name. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "createdBy": "str", # Optional. The user that created the test model. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test model. "description": "str", # Optional. The test description. "displayName": "str", # Optional. Display name of a test. "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "keyvaultReferenceIdentityId": "str", # Optional. Resource Id of the managed identity referencing the Key vault. "keyvaultReferenceIdentityType": "str", # Optional. Type of the managed identity referencing the Key vault. "lastModifiedBy": "str", # Optional. The user that last modified the test model. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last Modified DateTime(ISO 8601 literal format) of the test model. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "resourceId": "str", # Optional. Fully qualified resource Id e.g /subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testId": "str" # Optional. Unique test name as identifier. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_get_load_test_request( test_id=test_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def get_test_file(self, test_id: str, file_id: str, **kwargs: Any) -> JSON: """Get test file by the file name. Get test file by the file name. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :param file_id: Unique identifier for test file, must be a valid URL character ^[a-z0-9_-]*$. Required. :type file_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_get_test_file_request( test_id=test_id, file_id=file_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def delete_test_file( # pylint: disable=inconsistent-return-statements self, test_id: str, file_id: str, **kwargs: Any ) -> None: """Delete file by the file name for a test. Delete file by the file name for a test. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :param file_id: Unique identifier for test file, must be a valid URL character ^[a-z0-9_-]*$. Required. :type file_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[None] request = build_test_delete_test_file_request( test_id=test_id, file_id=file_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {})
[docs] @distributed_trace_async async def list_test_files( self, test_id: str, *, continuation_token_parameter: Optional[str] = None, **kwargs: Any ) -> JSON: """Get all test files. Get all test files. :param test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_id: str :keyword continuation_token_parameter: Continuation token to get the next page of response. Default value is None. :paramtype continuation_token_parameter: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "value": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "nextLink": "str" # Optional. Link for the next list of file URLs, if applicable. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_list_test_files_request( test_id=test_id, continuation_token_parameter=continuation_token_parameter, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs]class TestRunOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.developer.loadtesting.aio.LoadTestingClient`'s :attr:`test_run` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
[docs] @distributed_trace_async async def delete_test_run( # pylint: disable=inconsistent-return-statements self, test_run_id: str, **kwargs: Any ) -> None: """Delete a test run by its name. Delete a test run by its name. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[None] request = build_test_run_delete_test_run_request( test_run_id=test_run_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {})
@overload async def create_or_update_test( self, test_run_id: str, body: JSON, *, old_test_run_id: Optional[str] = None, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> JSON: """Create and start a new test run with the given name. Create and start a new test run with the given name. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param body: Load test run model. Required. :type body: JSON :keyword old_test_run_id: Existing test run Id that should be rerun. Default value is None. :paramtype old_test_run_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } # response body for status code(s): 200 response == { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } """ @overload async def create_or_update_test( self, test_run_id: str, body: IO, *, old_test_run_id: Optional[str] = None, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> JSON: """Create and start a new test run with the given name. Create and start a new test run with the given name. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param body: Load test run model. Required. :type body: IO :keyword old_test_run_id: Existing test run Id that should be rerun. Default value is None. :paramtype old_test_run_id: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/merge-patch+json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } """
[docs] @distributed_trace_async async def create_or_update_test( self, test_run_id: str, body: Union[JSON, IO], *, old_test_run_id: Optional[str] = None, **kwargs: Any ) -> JSON: """Create and start a new test run with the given name. Create and start a new test run with the given name. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param body: Load test run model. Is either a model type or a IO type. Required. :type body: JSON or IO :keyword old_test_run_id: Existing test run Id that should be rerun. Default value is None. :paramtype old_test_run_id: str :keyword content_type: Body Parameter content-type. Known values are: 'application/merge-patch+json'. Default value is None. :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[JSON] content_type = content_type or "application/merge-patch+json" _json = None _content = None if isinstance(body, (IO, bytes)): _content = body else: _json = body request = build_test_run_create_or_update_test_request( test_run_id=test_run_id, old_test_run_id=old_test_run_id, content_type=content_type, api_version=self._config.api_version, json=_json, content=_content, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def get_test_run(self, test_run_id: str, **kwargs: Any) -> JSON: """Get test run details by name. Get test run details by name. :param test_run_id: Unique name of load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_run_get_test_run_request( test_run_id=test_run_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def get_test_run_file(self, test_run_id: str, file_id: str, **kwargs: Any) -> JSON: """Get test run file by file name. Get test run file by file name. :param test_run_id: Unique name of load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param file_id: Unique identifier for test run file, must be a valid URL character ^[a-z0-9_-]*$. Required. :type file_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_run_get_test_run_file_request( test_run_id=test_run_id, file_id=file_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def list_test_runs( self, *, order_by: Optional[str] = None, continuation_token_parameter: Optional[str] = None, search: Optional[str] = None, execution_from: Optional[datetime.datetime] = None, execution_to: Optional[datetime.datetime] = None, status: Optional[str] = None, max_page_size: int = 50, test_id: Optional[str] = None, **kwargs: Any ) -> JSON: """Get all test runs with given filters. Get all test runs with given filters. :keyword order_by: Sort on one of the field - status, displayName, executedDateTime in (field asc/desc) format. eg: displayName asc. Default value is None. :paramtype order_by: str :keyword continuation_token_parameter: Continuation token to get the next page of response. Default value is None. :paramtype continuation_token_parameter: str :keyword search: Filter search based on searchable fields - description, executedUser. Default value is None. :paramtype search: str :keyword execution_from: The end DateTime(ISO 8601 literal format) of test-run execution time filter range. Default value is None. :paramtype execution_from: ~datetime.datetime :keyword execution_to: The start DateTime(ISO 8601 literal format) of test-run execution time filter range. Default value is None. :paramtype execution_to: ~datetime.datetime :keyword status: Comma separated list of test run status, value can be - "ACCEPTED", "NOTSTARTED","PROVISIONING","PROVISIONED","CONFIGURING", "CONFIGURED","EXECUTING","EXECUTED","DEPROVISIONING","DEPROVISIONED","DONE","CANCELLED","FAILED". Default value is None. :paramtype status: str :keyword max_page_size: Number of results in response. Default value is 50. :paramtype max_page_size: int :keyword test_id: Unique name for load test, must be a valid URL character ^[a-z0-9_-]*$. Default value is None. :paramtype test_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "value": [ { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } ], "nextLink": "str" # Optional. Link for the next list of resources in case of paginated results, if applicable. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_run_list_test_runs_request( order_by=order_by, continuation_token_parameter=continuation_token_parameter, search=search, execution_from=execution_from, execution_to=execution_to, status=status, max_page_size=max_page_size, test_id=test_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def stop_test_run(self, test_run_id: str, **kwargs: Any) -> JSON: """Stop test run by name. Stop test run by name. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "createdBy": "str", # Optional. The user that created the test run. "createdDateTime": "2020-02-20 00:00:00", # Optional. The created DateTime(ISO 8601 literal format) of the test run. "description": "str", # Optional. The test run description. "displayName": "str", # Optional. Display name of a test run. "duration": 0, # Optional. Test run duration in milliseconds. "endDateTime": "2020-02-20 00:00:00", # Optional. The test run end DateTime(ISO 8601 literal format). "environmentVariables": { "str": "str" # Optional. Environment variables which are defined as a set of <name,value> pairs. }, "executedDateTime": "2020-02-20 00:00:00", # Optional. Test run initiated time. "lastModifiedBy": "str", # Optional. The user that updated the test run. "lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. The last updated DateTime(ISO 8601 literal format) of the test run. "loadTestConfig": { "engineInstances": 0, # Optional. The number of engine instances to execute load test. Supported values are in range of 1-45. Required for creating a new test. "splitAllCSVs": bool # Optional. Whether all the input CSV files should be split evenly across all engines. }, "passFailCriteria": { "passFailMetrics": { "str": { "action": "str", # Optional. Either "u2018stop"u2019 or "u2018continue"u2019 after the threshold is met. Default is "u2018continue"u2019. "actualValue": 0.0, # Optional. The actual value of the client metric for the test run. "aggregate": "str", # Optional. The aggregation function to be applied on the client metric. Allowed functions - "u2018percentage"u2019 - for error metric ,"u2018avg"u2019, "u2018p50"u2019, "u2018p90"u2019, "u2018p95"u2019, "u2018p99"u2019, "u2018min"u2019, "u2018max"u2019 - for response_time_ms and latency metric, "u2018avg"u2019 - for requests_per_sec, "u2018count"u2019 - for requests. "clientmetric": "str", # Optional. The client metric on which the criteria should be applied. Allowed values - "u2018response_time_ms"u2019 , "u2018latency"u2019, "u2018error"u2019, "u2018requests"u2019, "u2018requests_per_sec"u2019. "condition": "str", # Optional. The comparison operator. Supported types "u2018>"u2019. "requestName": "str", # Optional. Request name for which the Pass fail criteria has to be applied. "result": "str", # Optional. Outcome of the test run. possible outcome - "u2018passed"u2019 , "u2018failed"u2019 , "u2018undetermined"u2019. "value": 0.0 # Optional. The value to compare with the client metric. Allowed values - "u2018error : [0.0 , 100.0] unit- % "u2019, response_time_ms and latency : any integer value unit- ms. } } }, "portalUrl": "str", # Optional. Portal url. "resourceId": "str", # Optional. Load test resource Id. "secrets": { "str": { "type": "str", # Optional. Type of secret. eg. AKV_SECRET_URI/SECRET_VALUE. "value": "str" # Optional. The value of the secret, of type AKV_SECRET_URI or SECRET_VALUE. } }, "startDateTime": "2020-02-20 00:00:00", # Optional. The test run start DateTime(ISO 8601 literal format). "status": "str", # Optional. The test run status. "subnetId": "str", # Optional. Subnet ID on which the load test instances should run. "testArtifacts": { "inputArtifacts": { "additionalUrls": [ { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } ], "configUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "inputArtifactsZipFileurl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "testScriptUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "userPropUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } }, "outputArtifacts": { "logsUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. }, "resultUrl": { "expireTime": "2020-02-20 00:00:00", # Optional. Expiry time of the file. "fileId": "str", # Optional. File unique identifier. "fileType": 0, # Optional. Integer representation of the file type (0 = JMX_FILE, 1 = USER_PROPERTIES, 2 = ADDITIONAL_ARTIFACTS). Known values are: 0, 1, and 2. "filename": "str", # Optional. Name of the file. "url": "str", # Optional. File URL. "validationStatus": "str" # Optional. Validation status of the file. } } }, "testId": "str", # Optional. Associated test Id. "testResult": "str", # Optional. Test result for pass/Fail criteria used during the test run. possible outcome - "u2018Passed"u2019 , "u2018Failed"u2019 , "u2018Not Applicable"u2019. "testRunId": "str", # Optional. Unique test run name as identifier. "testRunStatistics": { "str": { "errorCount": 0.0, # Optional. Error count. "errorPct": 0.0, # Optional. Error percentage. "maxResTime": 0.0, # Optional. Max response time. "meanResTime": 0.0, # Optional. Mean response time. "medianResTime": 0.0, # Optional. Median response time. "minResTime": 0.0, # Optional. Minimum response time. "pct1ResTime": 0.0, # Optional. 90 percentile response time. "pct2ResTime": 0.0, # Optional. 95 percentile response time. "pct3ResTime": 0.0, # Optional. 99 percentile response time. "receivedKBytesPerSec": 0.0, # Optional. Received network bytes. "sampleCount": 0.0, # Optional. Sampler count. "sentKBytesPerSec": 0.0, # Optional. Sent network bytes. "throughput": 0.0, # Optional. Throughput. "transaction": "str" # Optional. Transaction name. } }, "vusers": 0 # Optional. Number of virtual users, for which test has been run. } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_run_stop_test_run_request( test_run_id=test_run_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
@overload async def get_test_run_client_metrics( self, test_run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: """Get all client metrics for a load test run. Get all client metrics for a load test run. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param body: Client metrics request model. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "endTime": "2020-02-20 00:00:00", # End time. Required. "startTime": "2020-02-20 00:00:00", # Start time. Required. "errors": [ "str" # Optional. List of errors, maximum supported errors for queries are 20. In case of empty, by default will return metrics for maximum 20 errors. ], "groupByInterval": "str", # Optional. For test duration less than 10 minutes group by time interval can be any one of 5s,10s,1m,5m.""n""nFor test duration greater than 10 minutes, group by time interval can be any one of 1m,5m,1h. Default value is 1m. "percentiles": [ "str" # Optional. List of percentiles values for response time, supported values 50,90,99,95. Default value is 50th percentile. ], "requestSamplers": [ "str" # Optional. List of request samplers, maximum supported samplers for queries are 20. In case of empty, it will return metrics for maximum 20 samplers. ] } # response body for status code(s): 200 response == { "testRunId": "str", # Optional. Test run name for which client metrics results is required. "timeSeries": { "activeUsers": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "errors": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "responseTime": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "throughput": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] } } } """ @overload async def get_test_run_client_metrics( self, test_run_id: str, body: IO, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: """Get all client metrics for a load test run. Get all client metrics for a load test run. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param body: Client metrics request model. Required. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "testRunId": "str", # Optional. Test run name for which client metrics results is required. "timeSeries": { "activeUsers": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "errors": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "responseTime": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "throughput": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] } } } """
[docs] @distributed_trace_async async def get_test_run_client_metrics(self, test_run_id: str, body: Union[JSON, IO], **kwargs: Any) -> JSON: """Get all client metrics for a load test run. Get all client metrics for a load test run. :param test_run_id: Unique name of the load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :param body: Client metrics request model. Is either a model type or a IO type. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "testRunId": "str", # Optional. Test run name for which client metrics results is required. "timeSeries": { "activeUsers": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "errors": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "responseTime": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] }, "throughput": { "str": [ { "timestamp": "2020-02-20 00:00:00", # Optional. Timestamp(ISO 8601 literal format). "value": 0.0 # Optional. Value at timestamp. } ] } } } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[JSON] content_type = content_type or "application/json" _json = None _content = None if isinstance(body, (IO, bytes)): _content = body else: _json = body request = build_test_run_get_test_run_client_metrics_request( test_run_id=test_run_id, content_type=content_type, api_version=self._config.api_version, json=_json, content=_content, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)
[docs] @distributed_trace_async async def get_test_run_client_metrics_filters(self, test_run_id: str, **kwargs: Any) -> JSON: """Get all filters that are supported for client metrics for a given load test run. Get all filters that are supported for client metrics for a given load test run. :param test_run_id: Unique name for load test run, must be a valid URL character ^[a-z0-9_-]*$. Required. :type test_run_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "filters": { "errorFiltersValues": [ "str" # Optional. List of errors occurred for the test run, for which client metrics can be filtered. ], "requestSamplerValues": [ "str" # Optional. List of request sampler for the test run, for which client metrics can be filtered. ] }, "testRunId": "str", # Optional. Test run name for which client metrics filters is required. "timeRange": { "endTime": "2020-02-20 00:00:00", # Optional. end DateTime(ISO 8601 literal format) for the requested client metrics filter. "startTime": "2020-02-20 00:00:00" # Optional. start DateTime(ISO 8601 literal format) for the requested client metrics filter. } } """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls = kwargs.pop("cls", None) # type: ClsType[JSON] request = build_test_run_get_test_run_client_metrics_filters_request( test_run_id=test_run_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if response.content: deserialized = response.json() else: deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), {}) return cast(JSON, deserialized)