Source code for azure.ai.personalizer._patch

# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Customize generated code here.

Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
# pylint: disable=too-many-lines
import datetime
import sys
from typing import List, Union, Any, IO, Iterator, Iterable, Optional
from azure.core.credentials import AzureKeyCredential, TokenCredential
from azure.core.pipeline.policies import AzureKeyCredentialPolicy, BearerTokenCredentialPolicy
from azure.core.tracing.decorator import distributed_trace
from azure.core.rest import HttpRequest, HttpResponse

from ._client import PersonalizerClient as PersonalizerClientGenerated

if sys.version_info >= (3, 9):
    from collections.abc import MutableMapping
else:
    from typing import MutableMapping  # type: ignore  # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any]  # pylint: disable=unsubscriptable-object

__all__: List[str] = [
    "PersonalizerClient",
    "PersonalizerAdministrationClient",
]  # Add all objects you want publicly available to users at this package level


def _authentication_policy(credential, **kwargs):
    if credential is None:
        raise ValueError("Parameter 'credential' must not be None.")
    if isinstance(credential, AzureKeyCredential):
        authentication_policy = AzureKeyCredentialPolicy(
            name="Ocp-Apim-Subscription-Key", credential=credential, **kwargs
        )
    elif hasattr(credential, "get_token"):
        authentication_policy = BearerTokenCredentialPolicy(
            credential, *kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]), **kwargs
        )
    else:
        raise TypeError(
            "Unsupported credential: {}. Use an instance of AzureKeyCredential "
            "or a token credential from azure.identity".format(type(credential))
        )
    return authentication_policy

[docs]class PersonalizerAdministrationClient: # pylint: disable=too-many-public-methods """This client contains the operations that apply to Azure Personalizer. Operations allowed by the client are viewing and editing the properties, policy, model, running evaluations. :param endpoint: Supported Cognitive Services endpoint. Required. :type endpoint: str :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: Api Version. Default value is "2022-09-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: self._client = PersonalizerClientGenerated( endpoint=endpoint.rstrip("/"), credential=credential, # type: ignore authentication_policy=kwargs.pop("authentication_policy", _authentication_policy(credential, **kwargs)), **kwargs )
[docs] @distributed_trace def export_model(self, *, signed: bool = False, **kwargs: Any) -> Iterator[bytes]: """Model. Get the model file generated by Personalizer service. :keyword signed: True if requesting signed model zip archive, false otherwise. Default value is False. :paramtype signed: bool :return: Iterator of the response bytes :rtype: Iterator[bytes] :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.get_model(signed=signed, **kwargs)
[docs] @distributed_trace def import_model(self, body: IO, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements """Model File. Replace the existing model file for the Personalizer service. :param body: The digitally signed model file obtained from getting the model. Required. :type body: IO :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.import_model(body, **kwargs)
[docs] @distributed_trace def reset_model(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements """Reset Model. Resets the model file generated by Personalizer service. :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.reset_model(**kwargs)
[docs] @distributed_trace def get_model_properties(self, **kwargs: Any) -> JSON: """Model Properties. Get properties of the model file generated by Personalizer service. :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "creationTime": "2020-02-20 00:00:00", # Optional. Creation time of the model. "lastModifiedTime": "2020-02-20 00:00:00" # Optional. Last time the model was modified. } """ return self._client.get_model_properties(**kwargs)
[docs] @distributed_trace def get_log_properties(self, **kwargs: Any) -> JSON: """Log Properties. Get properties of the Personalizer logs. :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "dateRange": { "from": "2020-02-20 00:00:00", # Optional. Start date for the range. "to": "2020-02-20 00:00:00" # Optional. End date for the range. } } """ return self._client.get_log_properties(**kwargs)
[docs] @distributed_trace def delete_logs(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements """Logs. Delete all logs of Rank and Reward calls stored by Personalizer. :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.delete_log(**kwargs)
[docs] @distributed_trace def get_policy(self, **kwargs: Any) -> JSON: """Policy. Get the Learning Settings currently used by the Personalizer service. :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "arguments": "str", # Arguments of the learning settings. Required. "name": "str" # Name of the learning settings. Required. } """ return self._client.get_policy(**kwargs)
[docs] @distributed_trace def update_policy(self, policy: Union[JSON, IO], **kwargs: Any) -> JSON: """Update Policy. Update the Learning Settings that the Personalizer service will use to train models. :param policy: The learning settings. Required. :type policy: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. policy = { "arguments": "str", # Arguments of the learning settings. Required. "name": "str" # Name of the learning settings. Required. } # response body for status code(s): 200 response == { "arguments": "str", # Arguments of the learning settings. Required. "name": "str" # Name of the learning settings. Required. } """ return self._client.update_policy(policy, **kwargs)
[docs] @distributed_trace def reset_policy(self, **kwargs: Any) -> JSON: """Reset Policy. Resets the learning settings of the Personalizer service to default. :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "arguments": "str", # Arguments of the learning settings. Required. "name": "str" # Name of the learning settings. Required. } """ return self._client.reset_policy(**kwargs)
[docs] @distributed_trace def get_service_configuration(self, **kwargs: Any) -> JSON: """Service Configuration. Get the Personalizer service configuration. :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "defaultReward": 0.0, # The reward given if a reward is not received within the specified wait time. Required. "explorationPercentage": 0.0, # The percentage of rank responses that will use exploration. Required. "logRetentionDays": 0, # Number of days historical logs are to be maintained. -1 implies the logs will never be deleted. Required. "modelExportFrequency": "1 day, 0:00:00", # Personalizer will start using the most updated trained model for online ranks automatically every specified time period."nFor example, PT5M (5 mins). For information about the time format,"nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. Required. "modelRetrainDays": 0, # Default value is 0. Number of days of historical logs used when a model retrain is triggered. Required. "rewardAggregation": "str", # The function used to process rewards, if multiple reward scores are received before rewardWaitTime is over. Required. "rewardWaitTime": "1 day, 0:00:00", # The time span waited until a request is marked with the default reward"nand should be between 5 seconds and 2 days."nFor example, PT5M (5 mins). For information about the time format,"nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. Required. "autoOptimizationFrequency": "1 day, 0:00:00", # Optional. Frequency of automatic optimization. Only relevant if IsAutoOptimizationEnabled is true."nFor example, PT5M (5 mins). For information about the time format,"n""r""nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. "autoOptimizationStartDate": "2020-02-20 00:00:00", # Optional. Date when the first automatic optimization evaluation must be performed. Only relevant if IsAutoOptimizationEnabled is true. "isAutoOptimizationEnabled": bool, # Optional. Flag indicating whether Personalizer will automatically optimize Learning Settings by running Offline Evaluations periodically. "lastConfigurationEditDate": "2020-02-20 00:00:00", # Optional. Last time model training configuration was updated. "latestApprenticeModeMetrics": { "lastProcessedEventTime": "2020-02-20 00:00:00", # Required. "numberOfEvents": 0, # Required. "numberOfImitatedEvents": 0, # Required. "startTime": "2020-02-20 00:00:00", # Required. "sumOfImitatedRewards": 0.0, # Required. "sumOfRewards": 0.0, # Required. "lastBatchMetrics": { "numberOfEvents": 0, # Required. "numberOfImitatedEvents": 0, # Required. "sumOfImitatedRewards": 0.0, # Required. "sumOfRewards": 0.0 # Required. } }, "learningMode": "str", # Optional. Learning Modes for Personalizer. Known values are: "Online", "Apprentice", and "LoggingOnly". "logMirrorEnabled": bool, # Optional. Flag indicates whether log mirroring is enabled. "logMirrorSasUri": "str" # Optional. Azure storage account container SAS URI for log mirroring. } """ return self._client.get_service_configuration(**kwargs)
[docs] @distributed_trace def update_service_configuration(self, config: Union[JSON, IO], **kwargs: Any) -> JSON: """Update Service Configuration. Update the Personalizer service configuration. :param config: The personalizer service configuration. Required. :type config: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. config = { "defaultReward": 0.0, # The reward given if a reward is not received within the specified wait time. Required. "explorationPercentage": 0.0, # The percentage of rank responses that will use exploration. Required. "logRetentionDays": 0, # Number of days historical logs are to be maintained. -1 implies the logs will never be deleted. Required. "modelExportFrequency": "1 day, 0:00:00", # Personalizer will start using the most updated trained model for online ranks automatically every specified time period."nFor example, PT5M (5 mins). For information about the time format,"nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. Required. "modelRetrainDays": 0, # Default value is 0. Number of days of historical logs used when a model retrain is triggered. Required. "rewardAggregation": "str", # The function used to process rewards, if multiple reward scores are received before rewardWaitTime is over. Required. "rewardWaitTime": "1 day, 0:00:00", # The time span waited until a request is marked with the default reward"nand should be between 5 seconds and 2 days."nFor example, PT5M (5 mins). For information about the time format,"nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. Required. "autoOptimizationFrequency": "1 day, 0:00:00", # Optional. Frequency of automatic optimization. Only relevant if IsAutoOptimizationEnabled is true."nFor example, PT5M (5 mins). For information about the time format,"n""r""nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. "autoOptimizationStartDate": "2020-02-20 00:00:00", # Optional. Date when the first automatic optimization evaluation must be performed. Only relevant if IsAutoOptimizationEnabled is true. "isAutoOptimizationEnabled": bool, # Optional. Flag indicating whether Personalizer will automatically optimize Learning Settings by running Offline Evaluations periodically. "lastConfigurationEditDate": "2020-02-20 00:00:00", # Optional. Last time model training configuration was updated. "latestApprenticeModeMetrics": { "lastProcessedEventTime": "2020-02-20 00:00:00", # Required. "numberOfEvents": 0, # Required. "numberOfImitatedEvents": 0, # Required. "startTime": "2020-02-20 00:00:00", # Required. "sumOfImitatedRewards": 0.0, # Required. "sumOfRewards": 0.0, # Required. "lastBatchMetrics": { "numberOfEvents": 0, # Required. "numberOfImitatedEvents": 0, # Required. "sumOfImitatedRewards": 0.0, # Required. "sumOfRewards": 0.0 # Required. } }, "learningMode": "str", # Optional. Learning Modes for Personalizer. Known values are: "Online", "Apprentice", and "LoggingOnly". "logMirrorEnabled": bool, # Optional. Flag indicates whether log mirroring is enabled. "logMirrorSasUri": "str" # Optional. Azure storage account container SAS URI for log mirroring. } # response body for status code(s): 200 response == { "defaultReward": 0.0, # The reward given if a reward is not received within the specified wait time. Required. "explorationPercentage": 0.0, # The percentage of rank responses that will use exploration. Required. "logRetentionDays": 0, # Number of days historical logs are to be maintained. -1 implies the logs will never be deleted. Required. "modelExportFrequency": "1 day, 0:00:00", # Personalizer will start using the most updated trained model for online ranks automatically every specified time period."nFor example, PT5M (5 mins). For information about the time format,"nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. Required. "modelRetrainDays": 0, # Default value is 0. Number of days of historical logs used when a model retrain is triggered. Required. "rewardAggregation": "str", # The function used to process rewards, if multiple reward scores are received before rewardWaitTime is over. Required. "rewardWaitTime": "1 day, 0:00:00", # The time span waited until a request is marked with the default reward"nand should be between 5 seconds and 2 days."nFor example, PT5M (5 mins). For information about the time format,"nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. Required. "autoOptimizationFrequency": "1 day, 0:00:00", # Optional. Frequency of automatic optimization. Only relevant if IsAutoOptimizationEnabled is true."nFor example, PT5M (5 mins). For information about the time format,"n""r""nsee http://en.wikipedia.org/wiki/ISO_8601#Durations. "autoOptimizationStartDate": "2020-02-20 00:00:00", # Optional. Date when the first automatic optimization evaluation must be performed. Only relevant if IsAutoOptimizationEnabled is true. "isAutoOptimizationEnabled": bool, # Optional. Flag indicating whether Personalizer will automatically optimize Learning Settings by running Offline Evaluations periodically. "lastConfigurationEditDate": "2020-02-20 00:00:00", # Optional. Last time model training configuration was updated. "latestApprenticeModeMetrics": { "lastProcessedEventTime": "2020-02-20 00:00:00", # Required. "numberOfEvents": 0, # Required. "numberOfImitatedEvents": 0, # Required. "startTime": "2020-02-20 00:00:00", # Required. "sumOfImitatedRewards": 0.0, # Required. "sumOfRewards": 0.0, # Required. "lastBatchMetrics": { "numberOfEvents": 0, # Required. "numberOfImitatedEvents": 0, # Required. "sumOfImitatedRewards": 0.0, # Required. "sumOfRewards": 0.0 # Required. } }, "learningMode": "str", # Optional. Learning Modes for Personalizer. Known values are: "Online", "Apprentice", and "LoggingOnly". "logMirrorEnabled": bool, # Optional. Flag indicates whether log mirroring is enabled. "logMirrorSasUri": "str" # Optional. Azure storage account container SAS URI for log mirroring. } """ return self._client.update_service_configuration(config, **kwargs)
[docs] @distributed_trace def apply_from_evaluation( # pylint: disable=inconsistent-return-statements self, body: Union[JSON, IO], **kwargs: Any ) -> None: """Apply Learning Settings and Model. Apply Learning Settings and model from a pre-existing Offline Evaluation, making them the current online Learning Settings and model and replacing the previous ones. :param body: Reference to the policy within the evaluation. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "evaluationId": "str", # Evaluation Id of the evaluation. Required. "policyName": "str" # Name of the learning settings. Required. } """ return self._client.apply_from_evaluation(body, **kwargs)
[docs] @distributed_trace def create_evaluation( # pylint: disable=inconsistent-return-statements self, evaluation_id: str, evaluation: Union[JSON, IO], **kwargs: Any ) -> None: """Create Offline Evaluation. Submit a new Offline Evaluation job. :param evaluation_id: Id of the Offline Evaluation to create. Required. :type evaluation_id: str :param evaluation: The Offline Evaluation job definition. Required. :type evaluation: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. evaluation = { "endTime": "2020-02-20 00:00:00", # The end time of the evaluation. Required. "name": "str", # The name of the evaluation. Required. "startTime": "2020-02-20 00:00:00", # The start time of the evaluation. Required. "enablePolicyOptimization": bool, # Optional. True if the evaluation should explore for a more optimal learning settings. "policies": [ { "arguments": "str", # Arguments of the learning settings. Required. "name": "str" # Name of the learning settings. Required. } ] } """ return self._client.create_evaluation(evaluation_id, evaluation, **kwargs)
[docs] @distributed_trace def get_evaluation( self, evaluation_id: str, *, start_time: datetime.datetime, end_time: datetime.datetime, interval_in_minutes: Optional[int] = None, window: Optional[str] = None, **kwargs: Any ) -> JSON: """Offline Evaluation. Get the Offline Evaluation associated with the Id. :param evaluation_id: Id of the Offline Evaluation. Required. :type evaluation_id: str :keyword start_time: Start of aggregation time interval. Required. :paramtype start_time: ~datetime.datetime :keyword end_time: End of aggregation time interval. Required. :paramtype end_time: ~datetime.datetime :keyword interval_in_minutes: "Time interval for aggregation of events in minutes. Allowed intervals: 5 minutes, 60 minutes, 360 minutes, 720 minutes and 1440 minutes. Defaults to 5 minutes. Default value is None. :paramtype interval_in_minutes: int :keyword window: Rolling or Expanding time. Rolling compatible with 60 minutes, 360 minutes, 720 minutes and 1440 minutes intervals. Expanding compatible with 5 minute time interval only. Defaults to Expanding. Known values are: "Expanding" and "Rolling". Default value is None. :paramtype window: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "creationTime": "2020-02-20 00:00:00", # The creation time of the evaluation. Required. "endTime": "2020-02-20 00:00:00", # The end time of the evaluation. Required. "evaluationType": "str", # The type of evaluation. Required. Known values are: "Manual" and "Auto". "id": "str", # The ID of the evaluation. Required. "name": "str", # The name of the evaluation. Required. "startTime": "2020-02-20 00:00:00", # The start time of the evaluation. Required. "status": "str", # The status of the job processing the evaluation. Required. Known values are: "Succeeded", "Running", "Failed", "NotSubmitted", "Timeout", "OptimalPolicyApplied", "OnlinePolicyRetained", and "Canceled". "content": { "endTime": "2020-02-20 00:00:00", # Optional. End time of the time series. "intervalInMinutes": 0, # Optional. Aggregation window. "startTime": "2020-02-20 00:00:00", # Optional. Start time of the time series. "value": [ { "arguments": "str", # Optional. The arguments of the Learning Settings. "name": "str", # Optional. The name of the Learning Settings. "policySource": "str", # Optional. The source of the Learning Settings. Known values are: "Online", "Baseline", "Random", "Custom", and "OfflineExperimentation". "slotId": "str", # Optional. Slot id. Empty for overall results and single slot results. "summary": { "confidenceInterval": [ 0.0 # Optional. Confidence interval. ], "expectedReward": 0.0 # Optional. Average reward. }, "timeseries": { "data": [ { "confidenceInterval": [ 0.0 # Optional. Confidence interval. ], "expectedReward": 0.0, # Optional. Average reward. "timeStamp": "2020-02-20 00:00:00" # Optional. Timestamp of the aggregation. } ] } } ] }, "description": "str", # Optional. Description of the evaluation job. "optimalPolicy": "str" # Optional. Optimal policy found by the evaluation job. } """ return self._client.get_evaluation( evaluation_id, start_time=start_time, end_time=end_time, interval_in_minutes=interval_in_minutes, window=window, **kwargs )
[docs] @distributed_trace def list_evaluations( self, *, filter_expression: Optional[str] = None, top: Optional[int] = None, skip: int = 0, **kwargs: Any ) -> Iterable[JSON]: """All Offline Evaluations. List of all Offline Evaluations. :keyword filter_expression: An expression to filter the evaluations against evaluation metadata. Only evaluations where the expression evaluates to true are included in the response. Here is an example, metadata=evaluationType eq 'Manual'. Default value is None. :paramtype filter_expression: str :keyword top: The maximum number of resources to return from the collection. Defaults to maximum value of integer. Default value is None. :paramtype top: int :keyword skip: An offset into the collection of the first resource to be returned. Defaults to 0. Default value is 0. :paramtype skip: int :return: An iterator like instance of JSON object :rtype: ~azure.core.paging.ItemPaged[JSON] :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "creationTime": "2020-02-20 00:00:00", # The creation time of the evaluation. Required. "endTime": "2020-02-20 00:00:00", # The end time of the evaluation. Required. "evaluationType": "str", # Type of evaluation. Required. Known values are: "Manual" and "Auto". "id": "str", # The ID of the evaluation. Required. "name": "str", # The name of the evaluation. Required. "startTime": "2020-02-20 00:00:00", # The start time of the evaluation. Required. "status": "str" # The status of the job processing the evaluation. Required. Known values are: "Succeeded", "Running", "Failed", "NotSubmitted", "Timeout", "OptimalPolicyApplied", "OnlinePolicyRetained", and "Canceled". } """ return self._client.list_evaluations(filter_expression=filter_expression, top=top, skip=skip, **kwargs)
[docs] @distributed_trace def delete_evaluation( self, evaluation_id: str, **kwargs: Any ) -> None: # pylint: disable=inconsistent-return-statements """Offline Evaluation. Delete the Offline Evaluation associated with the Id. :param evaluation_id: Id of the Offline Evaluation to delete. Required. :type evaluation_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.delete_evaluation(evaluation_id, **kwargs)
[docs] @distributed_trace def create_feature_importance( # pylint: disable=inconsistent-return-statements self, feature_importance_id: str, feature_importance: Union[JSON, IO], **kwargs: Any ) -> None: """Create Feature Importance. Submit a new Feature Importance job. :param feature_importance_id: Id of the Feature Importance to create. Required. :type feature_importance_id: str :param feature_importance: The Feature Importance job definition. Required. :type feature_importance: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. feature_importance = { "endTime": "2020-02-20 00:00:00", # The end time of the feature importance. Required. "name": "str", # The name of the feature importance. Required. "startTime": "2020-02-20 00:00:00" # The start time of the feature importance. Required. } """ return self._client.create_feature_importance(feature_importance_id, feature_importance, **kwargs)
[docs] @distributed_trace def get_feature_importance(self, feature_importance_id: str, **kwargs: Any) -> JSON: """Feature Importance. Get the Feature Importance associated with the Id. :param feature_importance_id: Id of the Feature Importance. Required. :type feature_importance_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the feature importance. "description": "str", # Optional. Description of the feature importance job. "endTime": "2020-02-20 00:00:00", # Optional. The end time of the feature importance. "featureScores": [ { "featureName": "str", # Optional. Feature name. "namespace": "str", # Optional. Feature namespace. "score": 0.0 # Optional. Feature score. } ], "id": "str", # Optional. The ID of the feature importance. "name": "str", # Optional. The name of the feature importance. "startTime": "2020-02-20 00:00:00", # Optional. The start time of the feature importance. "status": "str" # Optional. The status of the job processing the feature importance. Known values are: "Succeeded", "Running", "Failed", "NotSubmitted", "Timeout", and "Canceled". } """ return self._client.get_feature_importance(feature_importance_id, **kwargs)
[docs] @distributed_trace def list_feature_importances(self, *, top: Optional[int] = None, skip: int = 0, **kwargs: Any) -> Iterable[JSON]: """All Feature Importances. List of all Feature Importances. :keyword top: The maximum number of resources to return from the collection. Defaults to maximum value of integer. Default value is None. :paramtype top: int :keyword skip: An offset into the collection of the first resource to be returned. Defaults to 0. Default value is 0. :paramtype skip: int :return: An iterator like instance of JSON object :rtype: ~azure.core.paging.ItemPaged[JSON] :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # response body for status code(s): 200 response == { "creationTime": "2020-02-20 00:00:00", # Required. "endTime": "2020-02-20 00:00:00", # Required. "id": "str", # Required. "name": "str", # Required. "startTime": "2020-02-20 00:00:00", # Required. "status": "str" # The status of the feature importance job. Required. Known values are: "Succeeded", "Running", "Failed", "NotSubmitted", "Timeout", and "Canceled". } """ return self._client.list_feature_importances(top=top, skip=skip, **kwargs)
[docs] @distributed_trace def delete_feature_importance( # pylint: disable=inconsistent-return-statements self, feature_importance_id: str, **kwargs: Any ) -> None: """Feature Importance. Delete the Feature Importance associated with the Id. :param feature_importance_id: Id of the Feature Importance to delete. Required. :type feature_importance_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.delete_feature_importance(feature_importance_id, **kwargs)
[docs] def send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = client.send_request(request) <HttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.HttpResponse """ return self._client.send_request(request, **kwargs)
[docs] def close(self): # type: () -> None self._client.close()
def __enter__(self): # type: () -> PersonalizerAdministrationClient self._client.__enter__() return self def __exit__(self, *exc_details): # type: (Any) -> None self._client.__exit__(*exc_details)
[docs]class PersonalizerClient: """Personalizer Service is an Azure Cognitive Service that makes it easy to target content and experiences without complex pre-analysis or cleanup of past data. Given a context and featurized content, the Personalizer Service returns which content item to show to users in rewardActionId. As rewards are sent in response to the use of rewardActionId, the reinforcement learning algorithm will improve the model and improve performance of future rank calls. :param endpoint: Supported Cognitive Services endpoint. Required. :type endpoint: str :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: Api Version. Default value is "2022-09-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: self._client = PersonalizerClientGenerated( endpoint=endpoint.rstrip("/"), credential=credential, # type: ignore authentication_policy=kwargs.pop("authentication_policy", _authentication_policy(credential, **kwargs)), **kwargs )
[docs] @distributed_trace def rank(self, rank_request: Union[JSON, IO], **kwargs: Any) -> JSON: """Rank. Submit a Personalizer rank request. Receives a context and a list of actions. Returns which of the provided actions should be used by your application, in rewardActionId. :param rank_request: A Personalizer Rank request. Required. :type rank_request: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. rank_request = { "actions": [ { "features": [ {} # List of dictionaries containing features. Required. ], "id": "str" # Id of the action. Required. } ], "contextFeatures": [ {} # Optional. Features of the context used for Personalizer as a"ndictionary of dictionaries. This is determined by your application, and"ntypically includes features about the current user, their"ndevice, profile information, aggregated data about time and date, etc."nFeatures should not include personally identifiable information (PII),"nunique UserIDs, or precise timestamps. ], "deferActivation": False, # Optional. Default value is False. Send false if it is certain the rewardActionId in rank results will be shown to the user, therefore"nPersonalizer will expect a Reward call, otherwise it will assign the default"nReward to the event. Send true if it is possible the user will not see the action specified in the rank results,"n(e.g. because the page is rendering later, or the Rank results may be overridden by code further downstream)."nYou must call the Activate Event API if the event output is shown to users, otherwise Rewards will be ignored. "eventId": "str", # Optional. Optionally pass an eventId that uniquely identifies this Rank event."nIf null, the service generates a unique eventId. The eventId will be used for"nassociating this request with its reward, as well as seeding the pseudo-random"ngenerator when making a Personalizer call. "excludedActions": [ "str" # Optional. The set of action ids to exclude from ranking."nPersonalizer will consider the first non-excluded item in the array as the Baseline action when performing Offline Evaluations. ] } # response body for status code(s): 201 response == { "eventId": "str", # The eventId for the round trip from request to response. Required. "ranking": [ { "id": "str", # Optional. Id of the action. "probability": 0.0 # Optional. Probability of the action. } ], "rewardActionId": "str" # Optional. The action chosen by the Personalizer service."nThis is the action your application should display, and for which to report the reward."nThis might not be the first found in 'ranking'. } """ return self._client.rank_single_slot(rank_request, **kwargs)
[docs] @distributed_trace def reward( # pylint: disable=inconsistent-return-statements self, event_id: str, reward: Union[JSON, IO], **kwargs: Any ) -> None: """Reward. Report reward between 0 and 1 that resulted from using the action specified in rewardActionId, for the specified event. :param event_id: The event id this reward applies to. Required. :type event_id: str :param reward: The reward should be a floating point number, typically between 0 and 1. Required. :type reward: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. reward = { "value": 0.0 # Reward to be assigned to an action. Value is a float calculated by your application, typically between 0 and 1, and must be between -1 and 1. Required. } """ return self._client.reward_single_slot_event(event_id, reward, **kwargs)
[docs] @distributed_trace def activate(self, event_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements """Activate Event. Report that the specified event was actually used (e.g. by being displayed to the user) and a reward should be expected for it. :param event_id: The event ID to be activated. Required. :type event_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.activate_single_slot_event(event_id, **kwargs)
[docs] @distributed_trace def rank_multi_slot(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: """Rank (MultiSlot). Submit a Personalizer multi-slot rank request. Receives a context, a list of actions, and a list of slots. Returns which of the provided actions should be used in each slot, in each rewardActionId. :param body: A Personalizer multi-slot Rank request. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "actions": [ { "features": [ {} # List of dictionaries containing features. Required. ], "id": "str" # Id of the action. Required. } ], "slots": [ { "baselineAction": "str", # The 'baseline action' ID for the slot."nThe BaselineAction is the Id of the Action your application would use in that slot if Personalizer didn't exist."nBaselineAction must be defined for every slot."nBaselineAction should never be part of ExcludedActions."nEach slot must have a unique BaselineAction which corresponds to an an action from the event's Actions list. Required. "id": "str", # Slot ID. Required. "excludedActions": [ "str" # Optional. List of excluded action Ids. ], "features": [ {} # Optional. List of dictionaries containing slot features. ] } ], "contextFeatures": [ {} # Optional. Features of the context used for Personalizer as a"ndictionary of dictionaries. This is determined by your application, and"ntypically includes features about the current user, their"ndevice, profile information, aggregated data about time and date, etc."nFeatures should not include personally identifiable information (PII),"nunique UserIDs, or precise timestamps. ], "deferActivation": False, # Optional. Default value is False. Send false if it is certain the rewardActionId in rank results will be shown to the user, therefore"nPersonalizer will expect a Reward call, otherwise it will assign the default"nReward to the event. Send true if it is possible the user will not see the action specified in the rank results,"n(e.g. because the page is rendering later, or the Rank results may be overridden by code further downstream)."nYou must call the Activate Event API if the event output is shown to users, otherwise Rewards will be ignored. "eventId": "str" # Optional. Optionally pass an eventId that uniquely identifies this Rank event."nIf null, the service generates a unique eventId. The eventId will be used for"nassociating this request with its reward, as well as seeding the pseudo-random"ngenerator when making a Personalizer call. } # response body for status code(s): 201 response == { "eventId": "str", # The eventId for the round trip from request to response. Required. "slots": [ { "id": "str", # Id is the slot ID. Required. "rewardActionId": "str" # Optional. RewardActionID is the action ID recommended by Personalizer. } ] } """ return self._client.rank_multi_slot(body, **kwargs)
[docs] @distributed_trace def reward_multi_slot( # pylint: disable=inconsistent-return-statements self, event_id: str, body: Union[JSON, IO], **kwargs: Any ) -> None: """Reward (MultiSlot). Report reward that resulted from using the action specified in rewardActionId for the slot. :param event_id: The event id this reward applies to. Required. :type event_id: str :param body: List of slot id and reward values. The reward should be a floating point number, typically between 0 and 1. Required. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python # JSON input template you can fill out and use as your body input. body = { "reward": [ { "slotId": "str", # Slot id for which we are sending the reward. Required. "value": 0.0 # Reward to be assigned to slotId. Value should be between -1 and 1 inclusive. Required. } ] } """ return self._client.reward_multi_slot_event(event_id, body, **kwargs)
[docs] @distributed_trace def activate_multi_slot( self, event_id: str, **kwargs: Any ) -> None: # pylint: disable=inconsistent-return-statements """Activate Event (MultiSlot). Report that the specified event was actually used or displayed to the user and a rewards should be expected for it. :param event_id: The event ID this activation applies to. Required. :type event_id: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ return self._client.activate_multi_slot_event(event_id, **kwargs)
[docs] def send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = client.send_request(request) <HttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.HttpResponse """ return self._client.send_request(request, **kwargs)
[docs] def close(self): # type: () -> None self._client.close()
def __enter__(self): # type: () -> PersonalizerClient self._client.__enter__() return self def __exit__(self, *exc_details): # type: (Any) -> None self._client.__exit__(*exc_details)
def patch_sdk(): """Do not remove from this file. `patch_sdk` is a last resort escape hatch that allows you to do customizations you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """