From 15960bfdbe3c13ef0136f8b4c6cb10fe4f49b2c5 Mon Sep 17 00:00:00 2001 From: zhen Date: Fri, 12 Apr 2024 12:46:29 +0800 Subject: [PATCH] [Prompty] Support model config in prompty (#2728) # Description - prompty with azure openai ``` --- name: Basic Prompt description: A basic prompt that uses the GPT-3 chat API to answer questions model: api: chat configuration: type: azure_openai azure_deployment: gpt-35-turbo azure_endpoint: ${env:AZURE_ENDPOINT} api_key: ${env:AZURE_API_KEY} parameters: max_tokens: 128 temperature: 0.2 inputs: firstName: type: string default: John lastName: type: string default: Doh question: type: string --- system: You are an AI assistant who helps people find information. Use their name to address them in your responses. user: {{question}} ``` - prompty with connection ``` --- name: Basic Prompt description: A basic prompt that uses the GPT-3 chat API to answer questions model: api: chat connection: azure_open_ai_connection configuration: type: azure_openai azure_deployment: gpt-35-turbo parameters: max_tokens: 128 temperature: 0.2 inputs: firstName: type: string default: John lastName: type: string default: Doh question: type: string --- system: You are an AI assistant who helps people find information. Use their name to address them in your responses. user: {{question}} ``` - prompty with openai ``` --- name: Basic Prompt description: A basic prompt that uses the GPT-3 chat API to answer questions model: api: chat connection: azure_open_ai_connection configuration: type: openai model: gpt-35-turbo api_key: ${env:API_KEY} base_url: ${env:BASE_URL} parameters: max_tokens: 128 temperature: 0.2 inputs: firstName: type: string default: John lastName: type: string default: Doh question: type: string --- system: You are an AI assistant who helps people find information. Use their name to address them in your responses. user: {{question}} ``` Please add an informative description that covers that changes made by the pull request and link all relevant issues. # All Promptflow Contribution checklist: - [ ] **The pull request does not introduce [breaking changes].** - [ ] **CHANGELOG is updated for new features, bug fixes or other significant changes.** - [ ] **I have read the [contribution guidelines](../CONTRIBUTING.md).** - [ ] **Create an issue and link to the pull request to get dedicated review from promptflow team. Learn more: [suggested workflow](../CONTRIBUTING.md#suggested-workflow).** ## General Guidelines and Best Practices - [ ] Title of the pull request is clear and informative. - [ ] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. For more information on cleaning up the commits in your PR, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). ### Testing Guidelines - [ ] Pull request includes test coverage for the included changes. --- src/promptflow-core/promptflow/core/_flow.py | 114 +++++++++++------- .../promptflow/core/_model_configuration.py | 79 ++++++++++++ .../promptflow/core/_prompty_utils.py | 91 +++++++++----- .../_sdk/_orchestrator/run_submitter.py | 3 +- .../promptflow/_sdk/entities/_run.py | 5 +- .../sdk_cli_test/e2etests/test_prompty.py | 92 ++++++++++---- .../recordings/local/node_cache.shelve.bak | 2 + .../recordings/local/node_cache.shelve.dat | Bin 356984 -> 362313 bytes .../recordings/local/node_cache.shelve.dir | 2 + .../prompty/prompty_example.prompty | 14 ++- 10 files changed, 300 insertions(+), 102 deletions(-) create mode 100644 src/promptflow-core/promptflow/core/_model_configuration.py diff --git a/src/promptflow-core/promptflow/core/_flow.py b/src/promptflow-core/promptflow/core/_flow.py index bbc45958948..c81dd73fb31 100644 --- a/src/promptflow-core/promptflow/core/_flow.py +++ b/src/promptflow-core/promptflow/core/_flow.py @@ -11,15 +11,16 @@ from promptflow._constants import DEFAULT_ENCODING, LANGUAGE_KEY, PROMPTY_EXTENSION, FlowLanguage from promptflow._utils.flow_utils import is_flex_flow, is_prompty_flow, resolve_flow_path from promptflow._utils.yaml_utils import load_yaml_string -from promptflow.contracts.tool import ValueType from promptflow.core._errors import MissingRequiredInputError +from promptflow.core._model_configuration import PromptyModelConfiguration from promptflow.core._prompty_utils import ( + convert_model_configuration_to_connection, convert_prompt_template, format_llm_response, - get_connection, get_open_ai_client_by_connection, prepare_open_ai_request_params, send_request_to_llm, + update_dict_recursively, ) from promptflow.exceptions import UserErrorException from promptflow.tracing import trace @@ -230,6 +231,55 @@ class Prompty(FlowBase): prompty = Prompty.load(source="path/to/prompty.prompty") result = prompty(input_a=1, input_b=2) + # Override model config with dict + model_config = { + "api": "chat", + "configuration": { + "type": "azure_openai", + "azure_deployment": "gpt-35-turbo", + "api_key": ${env:AZURE_OPENAI_API_KEY}, + "api_version": ${env:AZURE_OPENAI_API_VERSION}, + "azure_endpoint": ${env:AZURE_OPENAI_ENDPOINT}, + }, + "parameters": { + max_token: 512 + } + } + prompty = Prompty.load(source="path/to/prompty.prompty", model=model_config) + result = prompty(input_a=1, input_b=2) + + # Override model config with configuration + from promptflow.core._model_configuration import AzureOpenAIModelConfiguration + model_config = { + "api": "chat", + "configuration": AzureOpenAIModelConfiguration( + azure_deployment="gpt-35-turbo", + api_key="${env:AZURE_OPENAI_API_KEY}", + api_version=${env:AZURE_OPENAI_API_VERSION}", + azure_endpoint="${env:AZURE_OPENAI_ENDPOINT}", + ), + "parameters": { + max_token: 512 + } + } + prompty = Prompty.load(source="path/to/prompty.prompty", model=model_config) + result = prompty(input_a=1, input_b=2) + + # Override model config with created connection + from promptflow.core._model_configuration import AzureOpenAIModelConfiguration + model_config = { + "api": "chat", + "configuration": AzureOpenAIModelConfiguration( + connection="azure_open_ai_connection", + azure_deployment="gpt-35-turbo", + ), + "parameters": { + max_token: 512 + } + } + prompty = Prompty.load(source="path/to/prompty.prompty", model=model_config) + result = prompty(input_a=1, input_b=2) + """ def __init__( @@ -240,29 +290,15 @@ def __init__( ): # prompty file path path = Path(path) - model = model or {} configs, self._template = self._parse_prompty(path) - prompty_model = configs.get("model", {}) - prompty_model["api"] = model.get("api") or prompty_model.get("api", "chat") - # TODO wait for model spec - prompty_model["connection"] = model.get("connection") or prompty_model.get("connection", None) - if model.get("parameters", None): - if prompty_model.get("parameters", {}): - prompty_model["parameters"].update(model["parameters"]) - else: - prompty_model["parameters"] = model["parameters"] - for k in list(kwargs.keys()): - value = kwargs.pop(k) - if k in configs and isinstance(value, dict): - configs[k].update(value) - else: - configs[k] = value - configs["inputs"] = self._resolve_inputs(configs.get("inputs", {})) - self._connection = prompty_model["connection"] - self._parameters = prompty_model.get("parameters", None) - self._api = prompty_model["api"] + configs = update_dict_recursively(configs, kwargs) + configs["model"] = update_dict_recursively(configs.get("model", {}), model or {}) + + self._model = PromptyModelConfiguration(**configs["model"]) self._inputs = configs.get("inputs", {}) - configs["model"] = prompty_model + self._outputs = configs.get("outputs", {}) + # TODO support more templating engine + self._template_engine = configs.get("template", "jinja2") super().__init__(code=path.parent, path=path, data=configs, content_hash=None, **kwargs) @classmethod @@ -314,15 +350,6 @@ def _parse_prompty(path): configs = load_yaml_string(config_content) return configs, prompt_template - def _resolve_inputs(self, inputs): - resolved_inputs = {} - for k, v in inputs.items(): - if isinstance(v, dict): - resolved_inputs[k] = v - else: - resolved_inputs[k] = {"type": ValueType.from_value(v).value, "default": v} - return resolved_inputs - def _validate_inputs(self, input_values): resolved_inputs = {} missing_inputs = [] @@ -348,25 +375,26 @@ def __call__(self, *args, **kwargs): """ if args: raise UserErrorException("Prompty can only be called with keyword arguments.") + # 1. Get connection - connection = get_connection(self._connection) + connection = convert_model_configuration_to_connection(self._model.configuration) # 2.deal with prompt inputs = self._validate_inputs(kwargs) traced_convert_prompt_template = _traced(func=convert_prompt_template, args_to_ignore=["api"]) - template = traced_convert_prompt_template(self._template, inputs, self._api) + template = traced_convert_prompt_template(self._template, inputs, self._model.api) # 3. prepare params - params = prepare_open_ai_request_params(self._parameters, template, self._api, connection) + params = prepare_open_ai_request_params(self._model, template, connection) # 4. send request to open ai api_client = get_open_ai_client_by_connection(connection=connection) traced_llm_call = _traced(send_request_to_llm) - response = traced_llm_call(api_client, self._api, params) + response = traced_llm_call(api_client, self._model.api, params) return format_llm_response( response=response, - api=self._api, + api=self._model.api, response_format=params.get("response_format", None), raw=self._data.get("format", None) == "raw", ) @@ -399,24 +427,26 @@ async def __call__(self, *args, **kwargs) -> Mapping[str, Any]: """ if args: raise UserErrorException("Prompty can only be called with keyword arguments.") + # 1. Get connection - connection = get_connection(self._connection) + connection = convert_model_configuration_to_connection(self._model.configuration) # 2.deal with prompt inputs = self._validate_inputs(kwargs) - template = convert_prompt_template(self._template, inputs, self._api) + traced_convert_prompt_template = _traced(func=convert_prompt_template, args_to_ignore=["api"]) + template = traced_convert_prompt_template(self._template, inputs, self._model.api) # 3. prepare params - params = prepare_open_ai_request_params(self._parameters, template, self._api, connection) + params = prepare_open_ai_request_params(self._model, template, connection) # 4. send request to open ai api_client = get_open_ai_client_by_connection(connection=connection, is_async=True) traced_llm_call = _traced(send_request_to_llm) - response = await traced_llm_call(api_client, self._api, params) + response = await traced_llm_call(api_client, self._model.api, params) return format_llm_response( response=response, - api=self._api, + api=self._model.api, response_format=params.get("response_format", None), raw=self._data.get("format", None) == "raw", ) diff --git a/src/promptflow-core/promptflow/core/_model_configuration.py b/src/promptflow-core/promptflow/core/_model_configuration.py new file mode 100644 index 00000000000..b2bc5059443 --- /dev/null +++ b/src/promptflow-core/promptflow/core/_model_configuration.py @@ -0,0 +1,79 @@ +from dataclasses import dataclass +from typing import Union + +from promptflow._constants import ConnectionType +from promptflow.core._errors import InvalidConnectionError + + +class ModelConfiguration: + pass + + +@dataclass +class AzureOpenAIModelConfiguration(ModelConfiguration): + azure_deployment: str + azure_endpoint: str = None + api_version: str = None + api_key: str = None + organization: str = None + # connection and model configs are exclusive. + connection: str = None + + def __post_init__(self): + self._type = ConnectionType.AZURE_OPEN_AI + if any([self.azure_endpoint, self.api_key, self.api_version, self.organization]) and self.connection: + raise InvalidConnectionError("Cannot configure model config and connection at the same time.") + + +@dataclass +class OpenAIModelConfiguration(ModelConfiguration): + model: str + base_url: str = None + api_key: str = None + organization: str = None + # connection and model configs are exclusive. + connection: str = None + + def __post_init__(self): + self._type = ConnectionType.OPEN_AI + if any([self.base_url, self.api_key, self.api_version, self.organization]) and self.connection: + raise InvalidConnectionError("Cannot configure model config and connection at the same time.") + + +@dataclass +class PromptyModelConfiguration: + """ + A dataclass that represents a model config of prompty. + + :param api: Type of the LLM request, default value is chat. + :type api: str + :param configuration: Prompty model connection configuration + :type configuration: Union[dict, AzureOpenAIModelConfiguration, OpenAIModelConfiguration] + :param parameters: Params of the LLM request. + :type parameters: dict + :param response: Return the complete response or the first choice, default value is first. + :type response: str + """ + + configuration: Union[dict, AzureOpenAIModelConfiguration, OpenAIModelConfiguration] + parameters: dict + api: str = "chat" + response: str = "first" + + def __post_init__(self): + if isinstance(self.configuration, dict): + # Load connection from model configuration + model_config = { + k: v + for k, v in self.configuration.items() + if k not in ["type", "connection", "model", "azure_deployment"] + } + if self.configuration.get("connection", None) and any([v for v in model_config.values()]): + raise InvalidConnectionError( + "Cannot configure model config and connection in configuration at the same time." + ) + self._model = self.configuration.get("azure_deployment", None) or self.configuration.get("model", None) + elif isinstance(self.configuration, OpenAIModelConfiguration): + self._model = self.configuration.model + elif isinstance(self.configuration, AzureOpenAIModelConfiguration): + self._model = self.configuration.azure_deployment diff --git a/src/promptflow-core/promptflow/core/_prompty_utils.py b/src/promptflow-core/promptflow/core/_prompty_utils.py index 0b0c2f224a3..23b13c7c9f2 100644 --- a/src/promptflow-core/promptflow/core/_prompty_utils.py +++ b/src/promptflow-core/promptflow/core/_prompty_utils.py @@ -2,6 +2,7 @@ import json import os import re +from dataclasses import asdict from typing import List, Mapping from promptflow.core._connection import AzureOpenAIConnection, OpenAIConnection, _Connection @@ -9,14 +10,29 @@ ChatAPIFunctionRoleInvalidFormatError, ChatAPIInvalidRoleError, CoreError, - InvalidConnectionError, UnknownConnectionType, ) +from promptflow.core._model_configuration import ModelConfiguration from promptflow.core._utils import render_jinja_template_content +def update_dict_recursively(origin_dict, overwrite_dict): + updated_dict = {} + for k, v in overwrite_dict.items(): + if isinstance(v, dict): + updated_dict[k] = update_dict_recursively(origin_dict.get(k, {}), v) + else: + updated_dict[k] = v + for k, v in origin_dict.items(): + if k not in updated_dict: + updated_dict[k] = v + return updated_dict + + def parse_environment_variable(value): """Get environment variable from ${env:ENV_NAME}. If not found, return original value.""" + if not isinstance(value, str): + return value pattern = r"^\$\{env:(.*)\}$" result = re.match(pattern, value) if result: @@ -26,33 +42,46 @@ def parse_environment_variable(value): return value -def get_connection(connection): - if not isinstance(connection, (str, dict, _Connection)): - error_message = ( - "Illegal definition of connection, only support connection name or dict of connection info. " - "You can refer to https://microsoft.github.io/promptflow/how-to-guides/" - "manage-connections.html#create-a-connection for more details about connection." - ) - raise InvalidConnectionError(message=error_message) - if isinstance(connection, str): - # Get connection by name - try: - from promptflow._sdk._pf_client import PFClient - except ImportError as ex: - raise CoreError(f"Please try 'pip install promptflow-devkit' to install dependency, {ex.msg}") - client = PFClient() - connection_obj = client.connections.get(connection, with_secrets=True) - connection = connection_obj._to_execution_connection_dict()["value"] - connection_type = connection_obj.TYPE - elif isinstance(connection, dict): - connection_type = connection.pop("type", None) - # Get value from environment - connection = {k: parse_environment_variable(v) for k, v in connection.items()} - else: - return connection - if connection_type == AzureOpenAIConnection.TYPE: +def get_connection_by_name(connection_name): + try: + from promptflow._sdk._pf_client import PFClient + except ImportError as ex: + raise CoreError(f"Please try 'pip install promptflow-devkit' to install dependency, {ex.msg}") + client = PFClient() + connection_obj = client.connections.get(connection_name, with_secrets=True) + connection = connection_obj._to_execution_connection_dict()["value"] + connection_type = connection_obj.TYPE + return connection, connection_type + + +def convert_model_configuration_to_connection(model_configuration): + if isinstance(model_configuration, dict): + # Get connection from connection field + connection = model_configuration.get("connection", None) + if connection: + if isinstance(connection, str): + # Get connection by name + connection, connection_type = get_connection_by_name(connection) + elif isinstance(connection, _Connection): + return connection + else: + connection_dict = copy.copy(model_configuration) + connection_type = connection_dict.pop("type", None) + # Get value from environment + connection = {k: parse_environment_variable(v) for k, v in connection_dict.items()} + elif isinstance(model_configuration, ModelConfiguration): + # Get connection from model configuration + connection_type = model_configuration._type + if model_configuration.connection: + connection, _ = get_connection_by_name(model_configuration.connection) + else: + connection = {k: parse_environment_variable(v) for k, v in asdict(model_configuration).items()} + + if connection_type in [AzureOpenAIConnection.TYPE, "azure_openai"]: + if "api_base" not in connection: + connection["api_base"] = connection.get("azure_endpoint", None) return AzureOpenAIConnection(**connection) - elif connection_type == OpenAIConnection.TYPE: + elif connection_type in [OpenAIConnection.TYPE, "openai"]: return OpenAIConnection(**connection) error_message = ( f"Not Support connection type {connection_type} for embedding api. " @@ -76,14 +105,14 @@ def convert_prompt_template(template, inputs, api): return parse_chat(rendered_prompt, list(referenced_images)) -def prepare_open_ai_request_params(params, template, api, connection): +def prepare_open_ai_request_params(model_config, template, connection): # TODO validate function in params - params = copy.copy(params) + params = copy.copy(model_config.parameters) if isinstance(connection, AzureOpenAIConnection): - params["model"] = params.pop("deployment_name") params["extra_headers"] = {"ms-azure-ai-promptflow-called-from": "promptflow-core"} + params["model"] = model_config._model - if api == "completion": + if model_config.api == "completion": params["prompt"] = template else: params["messages"] = template diff --git a/src/promptflow-devkit/promptflow/_sdk/_orchestrator/run_submitter.py b/src/promptflow-devkit/promptflow/_sdk/_orchestrator/run_submitter.py index d6cf2842022..a1a3ed7a553 100644 --- a/src/promptflow-devkit/promptflow/_sdk/_orchestrator/run_submitter.py +++ b/src/promptflow-devkit/promptflow/_sdk/_orchestrator/run_submitter.py @@ -9,7 +9,6 @@ from promptflow._constants import FlowLanguage, FlowType from promptflow._sdk._constants import REMOTE_URI_PREFIX, ContextAttributeKey, FlowRunProperties -from promptflow._sdk._utils import get_flow_name from promptflow._sdk.entities._flows import Flow, Prompty from promptflow._sdk.entities._run import Run from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations @@ -91,7 +90,7 @@ def _run_bulk(self, run: Run, stream=False, **kwargs): logger.debug("start trace for flow run...") if is_collection_writeable(): logger.debug("trace collection is writeable, will use flow name as collection...") - collection_for_run = get_flow_name(run.flow) + collection_for_run = run._flow_name logger.debug("collection for run: %s", collection_for_run) # pass with internal parameter `_collection` start_trace(attributes=attributes, run=run.name, _collection=collection_for_run) diff --git a/src/promptflow-devkit/promptflow/_sdk/entities/_run.py b/src/promptflow-devkit/promptflow/_sdk/entities/_run.py index fa8a2950251..7e996dc0df9 100644 --- a/src/promptflow-devkit/promptflow/_sdk/entities/_run.py +++ b/src/promptflow-devkit/promptflow/_sdk/entities/_run.py @@ -178,7 +178,10 @@ def __init__( self._output_path = Path( kwargs.get("output_path", self._generate_output_path(config=kwargs.get("config", None))) ) - self._flow_name = flow_dir.name + if is_prompty_flow(self.flow): + self._flow_name = Path(self.flow).stem + else: + self._flow_name = flow_dir.name elif self._run_source == RunInfoSources.INDEX_SERVICE: self._metrics = kwargs.get("metrics", {}) self._experiment_name = experiment_name diff --git a/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_prompty.py b/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_prompty.py index 8eac7c32268..a7e2c523abe 100644 --- a/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_prompty.py +++ b/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_prompty.py @@ -3,21 +3,26 @@ from pathlib import Path import pytest +from _constants import PROMPTFLOW_ROOT from openai.types.chat import ChatCompletion from promptflow._sdk._pf_client import PFClient -from promptflow.connections import AzureOpenAIConnection from promptflow.core import Flow -from promptflow.core._errors import MissingRequiredInputError +from promptflow.core._errors import InvalidConnectionError, MissingRequiredInputError from promptflow.core._flow import AsyncPrompty, Prompty +from promptflow.core._model_configuration import AzureOpenAIModelConfiguration +from promptflow.core._prompty_utils import convert_model_configuration_to_connection -TEST_ROOT = Path(__file__).parent.parent.parent +TEST_ROOT = PROMPTFLOW_ROOT / "tests" DATA_DIR = TEST_ROOT / "test_configs/datas" PROMPTY_DIR = TEST_ROOT / "test_configs/prompty" FLOW_DIR = TEST_ROOT / "test_configs/flows" EAGER_FLOW_DIR = TEST_ROOT / "test_configs/eager_flows" +@pytest.mark.usefixtures("use_secrets_config_file", "setup_local_connection", "recording_injection") +@pytest.mark.sdk_test +@pytest.mark.e2etest class TestPrompty: def test_load_prompty(self): expect_data = { @@ -25,8 +30,12 @@ def test_load_prompty(self): "description": "A basic prompt that uses the GPT-3 chat API to answer questions", "model": { "api": "chat", - "connection": "azure_open_ai_connection", - "parameters": {"deployment_name": "gpt-35-turbo", "max_tokens": 128, "temperature": 0.2}, + "configuration": { + "connection": "azure_open_ai_connection", + "azure_deployment": "gpt-35-turbo", + "type": "azure_openai", + }, + "parameters": {"max_tokens": 128, "temperature": 0.2}, }, "inputs": { "firstName": {"type": "string", "default": "John"}, @@ -55,13 +64,12 @@ def test_overwrite_prompty(self): "description": "A basic prompt that uses the GPT-3 chat API to answer questions", "model": { "api": "chat", - "connection": "mock_connection_name", - "parameters": { - "mock_key": "mock_value", - "deployment_name": "gpt-35-turbo", - "max_tokens": 64, - "temperature": 0.2, + "configuration": { + "connection": "mock_connection_name", + "azure_deployment": "gpt-35-turbo", + "type": "azure_openai", }, + "parameters": {"max_tokens": 64, "temperature": 0.2, "mock_key": "mock_value"}, }, "inputs": { "firstName": {"type": "string", "default": "John"}, @@ -71,7 +79,7 @@ def test_overwrite_prompty(self): } params_override = { "api": "chat", - "connection": "mock_connection_name", + "configuration": {"connection": "mock_connection_name"}, "parameters": {"mock_key": "mock_value", "max_tokens": 64}, } # load prompty by flow @@ -98,25 +106,64 @@ def test_prompty_callable(self, pf: PFClient): assert "2" in result # Test connection with dict - connection = pf.connections.get(name=prompty._connection, with_secrets=True) - connection_dict = { - "type": connection.TYPE, - "api_key": connection.api_key, - "api_base": connection.api_base, + connection = convert_model_configuration_to_connection(prompty._model.configuration) + model_dict = { + "configuration": { + "type": "azure_openai", + "azure_deployment": "gpt-35-turbo", + "api_key": connection.api_key, + "api_version": connection.api_version, + "azure_endpoint": connection.api_base, + "connection": None, + }, } - prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", model={"connection": connection_dict}) + prompty = Flow.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", model=model_dict) result = prompty(question="what is the result of 1+1?") assert "2" in result - # Test using connection object - connection_obj = AzureOpenAIConnection( - api_base=connection.api_base, + # Test using model configuration + connection_obj = AzureOpenAIModelConfiguration( + azure_endpoint=connection.api_base, + azure_deployment="gpt-35-turbo", api_key=connection.api_key, + api_version=connection.api_version, + ) + prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", model={"configuration": connection_obj}) + result = prompty(question="what is the result of 1+1?") + assert "2" in result + + connection_obj = AzureOpenAIModelConfiguration( + connection="azure_open_ai_connection", + azure_deployment="gpt-35-turbo", ) - prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", model={"connection": connection_obj}) + prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", model={"configuration": connection_obj}) result = prompty(question="what is the result of 1+1?") assert "2" in result + with pytest.raises(InvalidConnectionError) as ex: + AzureOpenAIModelConfiguration( + azure_endpoint=connection.api_base, + azure_deployment="gpt-35-turbo", + api_key=connection.api_key, + api_version=connection.api_version, + connection="azure_open_ai_connection", + ) + assert "Cannot configure model config and connection at the same time." in ex.value.message + + with pytest.raises(InvalidConnectionError) as ex: + model_dict = { + "configuration": { + "type": "azure_openai", + "azure_deployment": "gpt-35-turbo", + "api_key": connection.api_key, + "api_version": connection.api_version, + "azure_endpoint": connection.api_base, + "connection": "azure_open_ai_connection", + }, + } + Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", model=model_dict) + assert "Cannot configure model config and connection" in ex.value.message + # Test format is raw prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty", format="raw") result = prompty(question="what is the result of 1+1?") @@ -135,6 +182,7 @@ def test_prompty_async_call(self): result = asyncio.run(async_prompty(question="what is the result of 1+1?")) assert isinstance(result, ChatCompletion) + @pytest.mark.skip(reason="Failed in CI pipeline, fix it in next PR.") def test_prompty_batch_run(self, pf: PFClient): run = pf.run(flow=f"{PROMPTY_DIR}/prompty_example.prompty", data=f"{DATA_DIR}/prompty_inputs.jsonl") assert run.status == "Completed" diff --git a/src/promptflow-recording/recordings/local/node_cache.shelve.bak b/src/promptflow-recording/recordings/local/node_cache.shelve.bak index 38a55851520..302f4f3f5d2 100644 --- a/src/promptflow-recording/recordings/local/node_cache.shelve.bak +++ b/src/promptflow-recording/recordings/local/node_cache.shelve.bak @@ -79,3 +79,5 @@ '53f3e92d1fedad20dbdb333cbfecc9e05e88e482', (335360, 5192) 'b1939e95969d999dc1222987f873ffc71abc7ced', (346624, 5117) '73a921e88b3a461b959e73609c3a842ec4ff28b8', (351744, 5240) +'1bf63409ed71dd20a27cbfc22daa40a520247e15', (357376, 2372) +'40930dde90321f7d2c74eb2507f82c74bc6112d9', (359936, 2377) diff --git a/src/promptflow-recording/recordings/local/node_cache.shelve.dat b/src/promptflow-recording/recordings/local/node_cache.shelve.dat index 2d5eb6f17ee55dd61779f3ff99e819703a369a62..25b7f130cd7a22a2b7b1821906312de498c630b1 100644 GIT binary patch delta 2824 zcmeHI&2!sC6t`o!Y21-Cg^4=}!EfDs|LTu~@8(sv2{yW1B)EcT+tkwiqr~u!|UX zF-W@0Q5jp=8Y){^sv`@zCCfo9wH&Tz!!;x?O+36G{Jy*mN4BBSgQJRiMC8)8OoI_< z$|clf@L6%O4yjyRZH&e0F|DjobKQnxch#f8X<4%{GJ4^-2#|seT&=(}`REYuf zNEe_@@!*6KmUSHqLwksy)~KKZI>M5=V>4_s$n3W4CI%Z;;3W5;Qz7l)El#lur$|Qa z6xFr}VzNnPMzHK+$}J{4CAsfH-vpfIIN}1i>lDru;Q8pY#BCj3@E^A)Gc7U?t2>-k zID@l(>wW2*-~6N0s-NrLh8I0}Nr_0&3{z*C1}}SXUQruFWz&Ez^}JZ<0)k$ySUqDi zePmW0*>GouozYpU&dqJ+Mpl7^C zxdc-_`c3LgO$#G&B5uw2KR=RA*WdPFHnND|7Q-bE<{}+RA{TbC0rQcQDc;1c$!f3= zk%dYaay&#!u~pIta?5bpgLf1%f0yhF%16ReT;<4ucRhG-Uqqb-c@}xd^QI|WYU>!5 zJXqcr6vnJ@PRTeCU9Bo)fb@}zqObRt-~$r0gd)jZESfMtUHInPOo{x7ITs^B9$O(o73Uxy6;1@(aB$n@L{Ll*ZZl2HWL430mAAv`~Sd11g@*mRc%$5zRlTdUUk4O(2hayWz!hwyiYL->h*!lyQb6E}GF EFNMux3;+NC delta 19 acmX@vC-&osXhREQ3sVbo3(FSPiXs432?v}2 diff --git a/src/promptflow-recording/recordings/local/node_cache.shelve.dir b/src/promptflow-recording/recordings/local/node_cache.shelve.dir index 38a55851520..302f4f3f5d2 100644 --- a/src/promptflow-recording/recordings/local/node_cache.shelve.dir +++ b/src/promptflow-recording/recordings/local/node_cache.shelve.dir @@ -79,3 +79,5 @@ '53f3e92d1fedad20dbdb333cbfecc9e05e88e482', (335360, 5192) 'b1939e95969d999dc1222987f873ffc71abc7ced', (346624, 5117) '73a921e88b3a461b959e73609c3a842ec4ff28b8', (351744, 5240) +'1bf63409ed71dd20a27cbfc22daa40a520247e15', (357376, 2372) +'40930dde90321f7d2c74eb2507f82c74bc6112d9', (359936, 2377) diff --git a/src/promptflow/tests/test_configs/prompty/prompty_example.prompty b/src/promptflow/tests/test_configs/prompty/prompty_example.prompty index b381e27062d..d7cc95fa798 100644 --- a/src/promptflow/tests/test_configs/prompty/prompty_example.prompty +++ b/src/promptflow/tests/test_configs/prompty/prompty_example.prompty @@ -3,14 +3,20 @@ name: Basic Prompt description: A basic prompt that uses the GPT-3 chat API to answer questions model: api: chat - connection: azure_open_ai_connection + configuration: + type: azure_openai + azure_deployment: gpt-35-turbo + connection: azure_open_ai_connection parameters: - deployment_name: gpt-35-turbo max_tokens: 128 temperature: 0.2 inputs: - firstName: John - lastName: Doh + firstName: + type: string + default: John + lastName: + type: string + default: Doh question: type: string ---